diff options
Diffstat (limited to 'sql')
245 files changed, 113709 insertions, 37276 deletions
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt new file mode 100755 index 00000000000..7e26f62b5f7 --- /dev/null +++ b/sql/CMakeLists.txt @@ -0,0 +1,136 @@ +# Copyright (C) 2006 MySQL AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +SET(CMAKE_CXX_FLAGS_DEBUG + "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR /Zi") +SET(CMAKE_C_FLAGS_DEBUG + "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX -DUSE_SYMDIR /Zi") +SET(CMAKE_EXE_LINKER_FLAGS_DEBUG "${CMAKE_EXE_LINKER_FLAGS_DEBUG} /MAP /MAPINFO:EXPORTS") + +INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include + ${CMAKE_SOURCE_DIR}/extra/yassl/include + ${CMAKE_SOURCE_DIR}/sql + ${CMAKE_SOURCE_DIR}/regex + ${CMAKE_SOURCE_DIR}/zlib + ${CMAKE_SOURCE_DIR}/bdb/build_win32 + ${CMAKE_SOURCE_DIR}/bdb/dbinc) + +SET_SOURCE_FILES_PROPERTIES(${CMAKE_SOURCE_DIR}/sql/message.rc + ${CMAKE_SOURCE_DIR}/sql/message.h + ${CMAKE_SOURCE_DIR}/sql/sql_yacc.h + ${CMAKE_SOURCE_DIR}/sql/sql_yacc.cc + ${CMAKE_SOURCE_DIR}/include/mysql_version.h + ${CMAKE_SOURCE_DIR}/sql/lex_hash.h + ${PROJECT_SOURCE_DIR}/include/mysqld_error.h + ${PROJECT_SOURCE_DIR}/include/mysqld_ername.h + ${PROJECT_SOURCE_DIR}/include/sql_state.h + PROPERTIES GENERATED 1) + +ADD_DEFINITIONS(-DHAVE_INNOBASE -DMYSQL_SERVER + -D_CONSOLE -DHAVE_DLOPEN) + +IF(DISABLE_GRANT_OPTIONS) + ADD_DEFINITIONS(-DDISABLE_GRANT_OPTIONS) +ENDIF(DISABLE_GRANT_OPTIONS) + +ADD_EXECUTABLE(mysqld ../sql-common/client.c derror.cc des_key_file.cc + discover.cc ../libmysql/errmsg.c field.cc field_conv.cc + filesort.cc gstream.cc ha_blackhole.cc + ha_archive.cc ha_heap.cc ha_myisam.cc ha_myisammrg.cc + ha_innodb.cc ha_federated.cc ha_berkeley.cc ha_blackhole.cc + handler.cc hash_filo.cc hash_filo.h + hostname.cc init.cc item.cc item_buff.cc item_cmpfunc.cc + item_create.cc item_func.cc item_geofunc.cc item_row.cc + item_strfunc.cc item_subselect.cc item_sum.cc item_timefunc.cc + item_uniq.cc key.cc log.cc lock.cc log_event.cc message.rc + message.h mf_iocache.cc my_decimal.cc ../sql-common/my_time.c + ../myisammrg/myrg_rnext_same.c mysqld.cc net_serv.cc + nt_servc.cc nt_servc.h opt_range.cc opt_range.h opt_sum.cc + ../sql-common/pack.c parse_file.cc password.c procedure.cc + protocol.cc records.cc repl_failsafe.cc set_var.cc + slave.cc sp.cc sp_cache.cc sp_head.cc sp_pcontext.cc + sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc + sql_cache.cc sql_class.cc sql_client.cc sql_crypt.cc sql_crypt.h + sql_cursor.cc sql_db.cc sql_delete.cc sql_derived.cc sql_do.cc + sql_error.cc sql_handler.cc sql_help.cc sql_insert.cc sql_lex.cc + sql_list.cc sql_load.cc sql_manager.cc sql_map.cc sql_parse.cc + sql_prepare.cc sql_rename.cc + sql_repl.cc sql_select.cc sql_show.cc sql_state.c sql_string.cc + sql_table.cc sql_test.cc sql_trigger.cc sql_udf.cc sql_union.cc + sql_update.cc sql_view.cc strfunc.cc table.cc thr_malloc.cc + time.cc tztime.cc uniques.cc unireg.cc + ../sql-common/my_user.c + sql_locale.cc + ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc + ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h + ${PROJECT_SOURCE_DIR}/include/mysqld_error.h + ${PROJECT_SOURCE_DIR}/include/mysqld_ername.h + ${PROJECT_SOURCE_DIR}/include/sql_state.h + ${PROJECT_SOURCE_DIR}/include/mysql_version.h + ${PROJECT_SOURCE_DIR}/sql/lex_hash.h) + +TARGET_LINK_LIBRARIES(mysqld heap myisam myisammrg mysys yassl zlib dbug yassl + taocrypt strings vio regex wsock32) + +IF(WITH_EXAMPLE_STORAGE_ENGINE) + TARGET_LINK_LIBRARIES(mysqld example) +ENDIF(WITH_EXAMPLE_STORAGE_ENGINE) + +IF(WITH_INNOBASE_STORAGE_ENGINE) + TARGET_LINK_LIBRARIES(mysqld innobase) +ENDIF(WITH_INNOBASE_STORAGE_ENGINE) + +IF(WITH_BERKELEY_STORAGE_ENGINE) + TARGET_LINK_LIBRARIES(mysqld bdb) +ENDIF(WITH_BERKELEY_STORAGE_ENGINE) + + +ADD_DEPENDENCIES(mysqld GenError) + +# Sql Parser custom command +ADD_CUSTOM_COMMAND( + SOURCE ${PROJECT_SOURCE_DIR}/sql/sql_yacc.yy + OUTPUT ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc + COMMAND bison.exe ARGS -y -p MYSQL --defines=sql_yacc.h + --output=sql_yacc.cc sql_yacc.yy + DEPENDS ${PROJECT_SOURCE_DIR}/sql/sql_yacc.yy) + +ADD_CUSTOM_COMMAND( + OUTPUT ${PROJECT_SOURCE_DIR}/sql/sql_yacc.h + COMMAND echo + DEPENDS ${PROJECT_SOURCE_DIR}/sql/sql_yacc.cc +) + +# Windows message file +ADD_CUSTOM_COMMAND( + SOURCE ${PROJECT_SOURCE_DIR}/sql/message.mc + OUTPUT message.rc message.h + COMMAND mc ARGS ${PROJECT_SOURCE_DIR}/sql/message.mc + DEPENDS ${PROJECT_SOURCE_DIR}/sql/message.mc) + +# Gen_lex_hash +ADD_EXECUTABLE(gen_lex_hash gen_lex_hash.cc) +TARGET_LINK_LIBRARIES(gen_lex_hash dbug mysqlclient wsock32) +GET_TARGET_PROPERTY(GEN_LEX_HASH_EXE gen_lex_hash LOCATION) +ADD_CUSTOM_COMMAND( + OUTPUT ${PROJECT_SOURCE_DIR}/sql/lex_hash.h + COMMAND ${GEN_LEX_HASH_EXE} ARGS > lex_hash.h + DEPENDS ${GEN_LEX_HASH_EXE} +) +ADD_DEPENDENCIES(mysqld gen_lex_hash) + +ADD_LIBRARY(udf_example MODULE udf_example.c udf_example.def) +ADD_DEPENDENCIES(udf_example strings) +TARGET_LINK_LIBRARIES(udf_example wsock32) diff --git a/sql/Makefile.am b/sql/Makefile.am index cf7bc0a1452..4f84023724f 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -1,9 +1,8 @@ -# Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +# Copyright (C) 2000-2006 MySQL AB # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. +# the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -19,18 +18,18 @@ MYSQLDATAdir = $(localstatedir) MYSQLSHAREdir = $(pkgdatadir) MYSQLBASEdir= $(prefix) -INCLUDES = @MT_INCLUDES@ @ZLIB_INCLUDES@ \ +INCLUDES = @ZLIB_INCLUDES@ \ @bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \ -I$(top_builddir)/include -I$(top_srcdir)/include \ - -I$(top_srcdir)/regex -I$(srcdir) $(openssl_includes) + -I$(top_srcdir)/regex -I$(srcdir) \ + $(openssl_includes) WRAPLIBS= @WRAPLIBS@ SUBDIRS = share libexec_PROGRAMS = mysqld EXTRA_PROGRAMS = gen_lex_hash bin_PROGRAMS = mysql_tzinfo_to_sql gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ -LDADD = @isam_libs@ \ - $(top_builddir)/myisam/libmyisam.a \ +LDADD = $(top_builddir)/myisam/libmyisam.a \ $(top_builddir)/myisammrg/libmyisammrg.a \ $(top_builddir)/heap/libheap.a \ $(top_builddir)/vio/libvio.a \ @@ -43,15 +42,17 @@ mysqld_LDADD = @MYSQLD_EXTRA_LDFLAGS@ \ @bdb_libs@ @innodb_libs@ @pstack_libs@ \ @innodb_system_libs@ \ @ndbcluster_libs@ @ndbcluster_system_libs@ \ - $(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ @openssl_libs@ + $(LDADD) $(CXXLDFLAGS) $(WRAPLIBS) @LIBDL@ \ + $(yassl_libs) $(openssl_libs) + noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ item_strfunc.h item_timefunc.h item_uniq.h \ item_create.h item_subselect.h item_row.h \ mysql_priv.h item_geofunc.h sql_bitmap.h \ procedure.h sql_class.h sql_lex.h sql_list.h \ sql_manager.h sql_map.h sql_string.h unireg.h \ - field.h handler.h mysqld_suffix.h \ - ha_isammrg.h ha_isam.h ha_myisammrg.h\ + sql_error.h field.h handler.h mysqld_suffix.h \ + ha_myisammrg.h\ ha_heap.h ha_myisam.h ha_berkeley.h ha_innodb.h \ ha_ndbcluster.h opt_range.h protocol.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h\ @@ -59,8 +60,13 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ log_event.h sql_repl.h slave.h \ stacktrace.h sql_sort.h sql_cache.h set_var.h \ spatial.h gstream.h client_settings.h tzfile.h \ - tztime.h examples/ha_example.h examples/ha_archive.h \ - examples/ha_tina.h ha_blackhole.h + tztime.h my_decimal.h\ + sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \ + parse_file.h sql_view.h sql_trigger.h \ + sql_array.h sql_cursor.h \ + examples/ha_example.h ha_archive.h \ + examples/ha_tina.h ha_blackhole.h \ + ha_federated.h mysqld_SOURCES = sql_lex.cc sql_handler.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \ @@ -82,17 +88,21 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \ records.cc filesort.cc handler.cc \ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ ha_berkeley.cc ha_innodb.cc \ - ha_isam.cc ha_isammrg.cc ha_ndbcluster.cc \ + ha_ndbcluster.cc \ sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ slave.cc sql_repl.cc sql_union.cc sql_derived.cc \ client.c sql_client.cc mini_client_errors.c pack.c\ stacktrace.c repl_failsafe.h repl_failsafe.cc \ - gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \ - tztime.cc my_time.c \ - examples/ha_example.cc examples/ha_archive.cc \ - examples/ha_tina.cc ha_blackhole.cc + sql_olap.cc sql_view.cc \ + gstream.cc spatial.cc sql_help.cc sql_cursor.cc \ + tztime.cc my_time.c my_user.c my_decimal.cc\ + sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \ + sp_cache.cc parse_file.cc sql_trigger.cc \ + examples/ha_example.cc ha_archive.cc \ + examples/ha_tina.cc ha_blackhole.cc \ + ha_federated.cc gen_lex_hash_SOURCES = gen_lex_hash.cc gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) @@ -106,8 +116,12 @@ DEFS = -DMYSQL_SERVER \ @DEFS@ BUILT_SOURCES = sql_yacc.cc sql_yacc.h lex_hash.h -EXTRA_DIST = udf_example.cc $(BUILT_SOURCES) -AM_YFLAGS = -d +EXTRA_DIST = $(BUILT_SOURCES) nt_servc.cc nt_servc.h \ + message.mc examples/CMakeLists.txt CMakeLists.txt \ + udf_example.c udf_example.def +DISTCLEANFILES = lex_hash.h sql_yacc.output + +AM_YFLAGS = -d --debug --verbose mysql_tzinfo_to_sql.cc: rm -f mysql_tzinfo_to_sql.cc @@ -122,6 +136,8 @@ link_sources: mysql_tzinfo_to_sql.cc @LN_CP_F@ $(top_srcdir)/sql-common/client.c client.c rm -f my_time.c @LN_CP_F@ $(top_srcdir)/sql-common/my_time.c my_time.c + rm -f my_user.c + @LN_CP_F@ $(top_srcdir)/sql-common/my_user.c my_user.c mysql_tzinfo_to_sql.o: $(mysql_tzinfo_to_sql_SOURCES) $(CXXCOMPILE) -c $(INCLUDES) -DTZINFO2SQL $< @@ -132,10 +148,15 @@ mysql_tzinfo_to_sql.o: $(mysql_tzinfo_to_sql_SOURCES) sql_yacc.cc: sql_yacc.yy sql_yacc.h: sql_yacc.yy +# Be careful here, note that we use VPATH and might or might not have +# a pregenerated "sql_yacc.cc" in $(srcdir) or one we just built in +# $(builddir). And it has to work if $(srcdir) == $(builddir). sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS) + @SED@ -e 's/__attribute__ ((__unused__))//' $< > sql_yacc.cc-new + @MV@ sql_yacc.cc-new sql_yacc.cc @echo "Note: The following compile may take a long time." @echo "If it fails, re-run configure with --with-low-memory" - $(CXXCOMPILE) $(LM_CFLAGS) -c $< + $(CXXCOMPILE) $(LM_CFLAGS) -c sql_yacc.cc # This generates lex_hash.h # NOTE Built sources should depend on their sources not the tool @@ -145,13 +166,11 @@ lex_hash.h: gen_lex_hash.cc lex.h ./gen_lex_hash$(EXEEXT) > $@-t $(MV) $@-t $@ -# For testing of udf_example.so; Works on platforms with gcc -# (This is not part of our build process but only provided as an example) -udf_example.so: udf_example.cc - $(CXXCOMPILE) -shared -o $@ $< +# For testing of udf_example.so +noinst_LTLIBRARIES= udf_example.la +udf_example_la_SOURCES= udf_example.c +udf_example_la_LDFLAGS= -module -rpath $(pkglibdir) -distclean-local: - rm -f lex_hash.h # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/sql/client_settings.h b/sql/client_settings.h index a8cd36af102..f0742cd8046 100644 --- a/sql/client_settings.h +++ b/sql/client_settings.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/custom_conf.h b/sql/custom_conf.h index 19ced12bfbb..137b7e9eef2 100644 --- a/sql/custom_conf.h +++ b/sql/custom_conf.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/derror.cc b/sql/derror.cc index 09f43d20044..0e74d411b1f 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -24,15 +23,43 @@ static bool read_texts(const char *file_name,const char ***point, uint error_messages); static void init_myfunc_errs(void); - /* Read messages from errorfile */ +/* + Read messages from errorfile. + + SYNOPSIS + init_errmessage() + + DESCRIPTION + This function can be called multiple times to reload the messages. + + RETURN + FALSE OK + TRUE Error +*/ bool init_errmessage(void) { + const char **errmsgs; DBUG_ENTER("init_errmessage"); - if (read_texts(ERRMSG_FILE,&my_errmsg[ERRMAPP],ER_ERROR_MESSAGES)) + /* + Get a pointer to the old error messages pointer array. + read_texts() tries to free it. + */ + errmsgs= my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST); + + /* Read messages from file. */ + if (read_texts(ERRMSG_FILE, &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1)) DBUG_RETURN(TRUE); - errmesg=my_errmsg[ERRMAPP]; /* Init global variabel */ + + /* Register messages for use with my_error(). */ + if (my_error_register(errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST)) + { + x_free((gptr) errmsgs); + DBUG_RETURN(TRUE); + } + + errmesg= errmsgs; /* Init global variabel */ init_myfunc_errs(); /* Init myfunc messages */ DBUG_RETURN(FALSE); } @@ -50,7 +77,6 @@ static bool read_texts(const char *file_name,const char ***point, char name[FN_REFLEN]; const char *buff; uchar head[32],*pos; - CHARSET_INFO *cset; // For future DBUG_ENTER("read_texts"); *point=0; // If something goes wrong @@ -76,7 +102,7 @@ Please install the latest version of this file.",name); } /* TODO: Convert the character set to server system character set */ - if (!(cset= get_charset(head[30],MYF(MY_WME)))) + if (!get_charset(head[30],MYF(MY_WME))) { sql_print_error("Character set #%d is not supported for messagefile '%s'", (int)head[30],name); @@ -148,20 +174,20 @@ static void init_myfunc_errs() init_glob_errs(); /* Initiate english errors */ if (!(specialflag & SPECIAL_ENGLISH)) { - globerrs[EE_FILENOTFOUND % ERRMOD] = ER(ER_FILE_NOT_FOUND); - globerrs[EE_CANTCREATEFILE % ERRMOD]= ER(ER_CANT_CREATE_FILE); - globerrs[EE_READ % ERRMOD] = ER(ER_ERROR_ON_READ); - globerrs[EE_WRITE % ERRMOD] = ER(ER_ERROR_ON_WRITE); - globerrs[EE_BADCLOSE % ERRMOD] = ER(ER_ERROR_ON_CLOSE); - globerrs[EE_OUTOFMEMORY % ERRMOD] = ER(ER_OUTOFMEMORY); - globerrs[EE_DELETE % ERRMOD] = ER(ER_CANT_DELETE_FILE); - globerrs[EE_LINK % ERRMOD] = ER(ER_ERROR_ON_RENAME); - globerrs[EE_EOFERR % ERRMOD] = ER(ER_UNEXPECTED_EOF); - globerrs[EE_CANTLOCK % ERRMOD] = ER(ER_CANT_LOCK); - globerrs[EE_DIR % ERRMOD] = ER(ER_CANT_READ_DIR); - globerrs[EE_STAT % ERRMOD] = ER(ER_CANT_GET_STAT); - globerrs[EE_GETWD % ERRMOD] = ER(ER_CANT_GET_WD); - globerrs[EE_SETWD % ERRMOD] = ER(ER_CANT_SET_WD); - globerrs[EE_DISK_FULL % ERRMOD] = ER(ER_DISK_FULL); + EE(EE_FILENOTFOUND) = ER(ER_FILE_NOT_FOUND); + EE(EE_CANTCREATEFILE) = ER(ER_CANT_CREATE_FILE); + EE(EE_READ) = ER(ER_ERROR_ON_READ); + EE(EE_WRITE) = ER(ER_ERROR_ON_WRITE); + EE(EE_BADCLOSE) = ER(ER_ERROR_ON_CLOSE); + EE(EE_OUTOFMEMORY) = ER(ER_OUTOFMEMORY); + EE(EE_DELETE) = ER(ER_CANT_DELETE_FILE); + EE(EE_LINK) = ER(ER_ERROR_ON_RENAME); + EE(EE_EOFERR) = ER(ER_UNEXPECTED_EOF); + EE(EE_CANTLOCK) = ER(ER_CANT_LOCK); + EE(EE_DIR) = ER(ER_CANT_READ_DIR); + EE(EE_STAT) = ER(ER_CANT_GET_STAT); + EE(EE_GETWD) = ER(ER_CANT_GET_WD); + EE(EE_SETWD) = ER(ER_CANT_SET_WD); + EE(EE_DISK_FULL) = ER(ER_DISK_FULL); } } diff --git a/sql/des_key_file.cc b/sql/des_key_file.cc index 77cb0c8de0f..d99d712b45a 100644 --- a/sql/des_key_file.cc +++ b/sql/des_key_file.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2001-2003, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/discover.cc b/sql/discover.cc index 1251055c70e..5d24607cf6b 100644 --- a/sql/discover.cc +++ b/sql/discover.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/examples/CMakeLists.txt b/sql/examples/CMakeLists.txt new file mode 100755 index 00000000000..1a22e9a3efd --- /dev/null +++ b/sql/examples/CMakeLists.txt @@ -0,0 +1,26 @@ +# Copyright (C) 2006 MySQL AB +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + +SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") +SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} -DSAFEMALLOC -DSAFE_MUTEX") + +INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql + ${CMAKE_SOURCE_DIR}/extra/yassl/include + ${CMAKE_SOURCE_DIR}/regex) + +IF(WITH_EXAMPLE_STORAGE_ENGINE) +ADD_LIBRARY(example ha_example.cc) +ADD_DEPENDENCIES(example GenError) +ENDIF(WITH_EXAMPLE_STORAGE_ENGINE) diff --git a/sql/examples/ha_example.cc b/sql/examples/ha_example.cc index 924ced816aa..19c686ee495 100644 --- a/sql/examples/ha_example.cc +++ b/sql/examples/ha_example.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -72,6 +71,31 @@ #ifdef HAVE_EXAMPLE_DB #include "ha_example.h" + +handlerton example_hton= { + "EXAMPLE", + SHOW_OPTION_YES, + "Example storage engine", + DB_TYPE_EXAMPLE_DB, + NULL, /* We do need to write one! */ + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* release savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_CAN_RECREATE +}; + /* Variables for example share methods */ static HASH example_open_tables; // Hash used to track open tables pthread_mutex_t example_mutex; // This is the mutex we use to init the hash @@ -179,13 +203,23 @@ static int free_share(EXAMPLE_SHARE *share) } +ha_example::ha_example(TABLE *table_arg) + :handler(&example_hton, table_arg) +{} + /* If frm_error() is called then we will use this to to find out what file extentions exist for the storage engine. This is also used by the default rename_table and delete_table method in handler.cc. */ +static const char *ha_example_exts[] = { + NullS +}; + const char **ha_example::bas_ext() const -{ static const char *ext[]= { NullS }; return ext; } +{ + return ha_example_exts; +} /* @@ -412,7 +446,7 @@ int ha_example::rnd_next(byte *buf) position() is called after each call to rnd_next() if the data needs to be ordered. You can do something like the following to store the position: - ha_store_ptr(ref, ref_length, current_position); + my_store_ptr(ref, ref_length, current_position); The server uses ref to store data. ref_length in the above case is the size needed to store current_position. ref is just a byte array @@ -445,6 +479,8 @@ int ha_example::rnd_pos(byte * buf, byte *pos) /* ::info() is used to return information to the optimizer. + see my_base.h for the complete description + Currently this table handler doesn't implement most of the fields really needed. SHOW also makes use of this data Another note, you will probably want to have the following in your diff --git a/sql/examples/ha_example.h b/sql/examples/ha_example.h index cb8a8465737..d6ec93cf97f 100644 --- a/sql/examples/ha_example.h +++ b/sql/examples/ha_example.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -45,9 +44,7 @@ class ha_example: public handler EXAMPLE_SHARE *share; /* Shared lock info */ public: - ha_example(TABLE *table): handler(table) - { - } + ha_example(TABLE *table_arg); ~ha_example() { } diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc index a3b05d298c2..00f927aa7b7 100644 --- a/sql/examples/ha_tina.cc +++ b/sql/examples/ha_tina.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -54,6 +53,30 @@ pthread_mutex_t tina_mutex; static HASH tina_open_tables; static int tina_init= 0; +handlerton tina_hton= { + "CSV", + SHOW_OPTION_YES, + "CSV storage engine", + DB_TYPE_CSV_DB, + NULL, /* One needs to be written! */ + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* release savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_CAN_RECREATE +}; + /***************************************************************************** ** TINA tables *****************************************************************************/ @@ -77,13 +100,34 @@ static byte* tina_get_key(TINA_SHARE *share,uint *length, return (byte*) share->table_name; } + +int free_mmap(TINA_SHARE *share) +{ + DBUG_ENTER("ha_tina::free_mmap"); + if (share->mapped_file) + { + /* + Invalidate the mapped in pages. Some operating systems (eg OpenBSD) + would reuse already cached pages even if the file has been altered + using fd based I/O. This may be optimized by perhaps only invalidating + the last page but optimization of deprecated code is not important. + */ + msync(share->mapped_file, 0, MS_INVALIDATE); + if (munmap(share->mapped_file, share->file_stat.st_size)) + DBUG_RETURN(1); + } + share->mapped_file= NULL; + DBUG_RETURN(0); +} + /* Reloads the mmap file. */ int get_mmap(TINA_SHARE *share, int write) { DBUG_ENTER("ha_tina::get_mmap"); - if (share->mapped_file && munmap(share->mapped_file, share->file_stat.st_size)) + + if (free_mmap(share)) DBUG_RETURN(1); if (my_fstat(share->data_file, &share->file_stat, MYF(MY_WME)) == -1) @@ -160,16 +204,18 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) share->table_name_length=length; share->table_name=tmp_name; strmov(share->table_name,table_name); - fn_format(data_file_name, table_name, "", ".CSV",MY_REPLACE_EXT|MY_UNPACK_FILENAME); + fn_format(data_file_name, table_name, "", ".CSV", + MY_REPLACE_EXT | MY_UNPACK_FILENAME); + + if ((share->data_file= my_open(data_file_name, O_RDWR|O_APPEND, + MYF(0))) == -1) + goto error; + if (my_hash_insert(&tina_open_tables, (byte*) share)) goto error; thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); - if ((share->data_file= my_open(data_file_name, O_RDWR|O_APPEND, - MYF(0))) == -1) - goto error2; - /* We only use share->data_file for writing, so we scan to the end to append */ if (my_seek(share->data_file, 0, SEEK_END, MYF(0)) == MY_FILEPOS_ERROR) goto error2; @@ -188,6 +234,7 @@ error3: error2: thr_lock_delete(&share->lock); pthread_mutex_destroy(&share->mutex); + hash_delete(&tina_open_tables, (byte*) share); error: pthread_mutex_unlock(&tina_mutex); my_free((gptr) share, MYF(0)); @@ -206,8 +253,7 @@ static int free_share(TINA_SHARE *share) int result_code= 0; if (!--share->use_count){ /* Drop the mapped file */ - if (share->mapped_file) - munmap(share->mapped_file, share->file_stat.st_size); + free_mmap(share); result_code= my_close(share->data_file,MYF(0)); hash_delete(&tina_open_tables, (byte*) share); thr_lock_delete(&share->lock); @@ -219,7 +265,6 @@ static int free_share(TINA_SHARE *share) DBUG_RETURN(result_code); } - bool tina_end() { if (tina_init) @@ -245,6 +290,21 @@ byte * find_eoln(byte *data, off_t begin, off_t end) return 0; } + +ha_tina::ha_tina(TABLE *table_arg) + :handler(&tina_hton, table_arg), + /* + These definitions are found in hanler.h + These are not probably completely right. + */ + current_position(0), next_position(0), chain_alloced(0), + chain_size(DEFAULT_CHAIN_LENGTH), records_is_known(0) +{ + /* Set our original buffers from pre-allocated memory */ + buffer.set(byte_buffer, IO_SIZE, system_charset_info); + chain= chain_buffer; +} + /* Encode a buffer into the quoted format. */ @@ -391,7 +451,7 @@ int ha_tina::find_current_row(byte *buf) } next_position= (end_ptr - share->mapped_file)+1; /* Maybe use \N for null? */ - memset(buf, 0, table->null_bytes); /* We do not implement nulls! */ + memset(buf, 0, table->s->null_bytes); /* We do not implement nulls! */ DBUG_RETURN(0); } @@ -400,8 +460,15 @@ int ha_tina::find_current_row(byte *buf) If frm_error() is called in table.cc this is called to find out what file extensions exist for this handler. */ +static const char *ha_tina_exts[] = { + ".CSV", + NullS +}; + const char **ha_tina::bas_ext() const -{ static const char *ext[]= { ".CSV", NullS }; return ext; } +{ + return ha_tina_exts; +} /* @@ -442,13 +509,20 @@ int ha_tina::write_row(byte * buf) int size; DBUG_ENTER("ha_tina::write_row"); - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); size= encode_quote(buf); + /* + we are going to alter the file so we must invalidate the in memory pages + otherwise we risk a race between the in memory pages and the disk pages. + */ + if (free_mmap(share)) + DBUG_RETURN(-1); + if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP))) DBUG_RETURN(-1); @@ -461,6 +535,7 @@ int ha_tina::write_row(byte * buf) */ if (get_mmap(share, 0) > 0) DBUG_RETURN(-1); + records++; DBUG_RETURN(0); } @@ -478,7 +553,8 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) int size; DBUG_ENTER("ha_tina::update_row"); - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -488,8 +564,26 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) if (chain_append()) DBUG_RETURN(-1); + /* + we are going to alter the file so we must invalidate the in memory pages + otherwise we risk a race between the in memory pages and the disk pages. + */ + if (free_mmap(share)) + DBUG_RETURN(-1); + if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP))) DBUG_RETURN(-1); + + /* + Ok, this is means that we will be doing potentially bad things + during a bulk update on some OS'es. Ideally, we should extend the length + of the file, redo the mmap and then write all the updated rows. Upon + finishing the bulk update, truncate the file length to the final length. + Since this code is all being deprecated, not point now to optimize. + */ + if (get_mmap(share, 0) > 0) + DBUG_RETURN(-1); + DBUG_RETURN(0); } @@ -505,7 +599,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) int ha_tina::delete_row(const byte * buf) { DBUG_ENTER("ha_tina::delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); if (chain_append()) DBUG_RETURN(-1); @@ -624,6 +718,7 @@ int ha_tina::rnd_init(bool scan) current_position= next_position= 0; records= 0; + records_is_known= 0; chain_ptr= chain; #ifdef HAVE_MADVISE if (scan) @@ -648,7 +743,8 @@ int ha_tina::rnd_next(byte *buf) { DBUG_ENTER("ha_tina::rnd_next"); - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= next_position; if (!share->mapped_file) @@ -664,7 +760,7 @@ int ha_tina::rnd_next(byte *buf) In the case of an order by rows will need to be sorted. ::position() is called after each call to ::rnd_next(), the data it stores is to a byte array. You can store this - data via ha_store_ptr(). ref_length is a variable defined to the + data via my_store_ptr(). ref_length is a variable defined to the class that is the sizeof() of position being stored. In our case its just a position. Look at the bdb code if you want to see a case where something other then a number is stored. @@ -672,21 +768,22 @@ int ha_tina::rnd_next(byte *buf) void ha_tina::position(const byte *record) { DBUG_ENTER("ha_tina::position"); - ha_store_ptr(ref, ref_length, current_position); + my_store_ptr(ref, ref_length, current_position); DBUG_VOID_RETURN; } /* Used to fetch a row from a posiion stored with ::position(). - ha_get_ptr() retrieves the data for you. + my_get_ptr() retrieves the data for you. */ int ha_tina::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_tina::rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); - current_position= ha_get_ptr(pos,ref_length); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); + current_position= my_get_ptr(pos,ref_length); DBUG_RETURN(find_current_row(buf)); } @@ -699,7 +796,7 @@ int ha_tina::info(uint flag) { DBUG_ENTER("ha_tina::info"); /* This is a lie, but you don't want the optimizer to see zero or 1 */ - if (records < 2) + if (!records_is_known && records < 2) records= 2; DBUG_RETURN(0); } @@ -734,6 +831,8 @@ int ha_tina::rnd_end() { DBUG_ENTER("ha_tina::rnd_end"); + records_is_known= 1; + /* First position will be truncate position, second will be increment */ if ((chain_ptr - chain) > 0) { @@ -761,15 +860,14 @@ int ha_tina::rnd_end() length= length - (size_t)(ptr->end - ptr->begin); } - /* Truncate the file to the new size */ - if (my_chsize(share->data_file, length, 0, MYF(MY_WME))) + /* Invalidate all cached mmap pages */ + if (free_mmap(share)) DBUG_RETURN(-1); - if (munmap(share->mapped_file, length)) + /* Truncate the file to the new size */ + if (my_chsize(share->data_file, length, 0, MYF(MY_WME))) DBUG_RETURN(-1); - /* We set it to null so that get_mmap() won't try to unmap it */ - share->mapped_file= NULL; if (get_mmap(share, 0) > 0) DBUG_RETURN(-1); } @@ -778,17 +876,25 @@ int ha_tina::rnd_end() } /* - Truncate table and others of its ilk call this. + DELETE without WHERE calls it */ int ha_tina::delete_all_rows() { DBUG_ENTER("ha_tina::delete_all_rows"); + if (!records_is_known) + return (my_errno=HA_ERR_WRONG_COMMAND); + + /* Invalidate all cached mmap pages */ + if (free_mmap(share)) + DBUG_RETURN(-1); + int rc= my_chsize(share->data_file, 0, 0, MYF(MY_WME)); if (get_mmap(share, 0) > 0) DBUG_RETURN(-1); + records=0; DBUG_RETURN(rc); } @@ -816,21 +922,6 @@ THR_LOCK_DATA **ha_tina::store_lock(THD *thd, } /* - Range optimizer calls this. - I need to update the information on this. -*/ -ha_rows ha_tina::records_in_range(int inx, - const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag) -{ - DBUG_ENTER("ha_tina::records_in_range "); - DBUG_RETURN(records); // Good guess -} - - -/* Create a table. You do not want to leave the table open after a call to this (the database will call ::open() if it needs to). */ diff --git a/sql/examples/ha_tina.h b/sql/examples/ha_tina.h index 266db1bc1fe..98cba8bf4cd 100644 --- a/sql/examples/ha_tina.h +++ b/sql/examples/ha_tina.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -48,19 +47,10 @@ class ha_tina: public handler tina_set *chain_ptr; byte chain_alloced; uint32 chain_size; + bool records_is_known; - public: - ha_tina(TABLE *table): handler(table), - /* - These definitions are found in hanler.h - Theses are not probably completely right. - */ - current_position(0), next_position(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH) - { - /* Set our original buffers from pre-allocated memory */ - buffer.set(byte_buffer, IO_SIZE, system_charset_info); - chain = chain_buffer; - } +public: + ha_tina(TABLE *table_arg); ~ha_tina() { if (chain_alloced) @@ -88,7 +78,6 @@ class ha_tina: public handler */ virtual double scan_time() { return (double) (records+deleted) / 20.0+10; } /* The next method will never be called */ - virtual double read_time(ha_rows rows) { DBUG_ASSERT(0); return((double) rows / 20.0+1); } virtual bool fast_key_read() { return 1;} /* TODO: return actual upper bound of number of records in the table. @@ -120,12 +109,6 @@ class ha_tina: public handler int reset(void); int external_lock(THD *thd, int lock_type); int delete_all_rows(void); - ha_rows records_in_range(int inx, const byte *start_key,uint start_key_len, - enum ha_rkey_function start_search_flag, - const byte *end_key,uint end_key_len, - enum ha_rkey_function end_search_flag); -// int delete_table(const char *from); -// int rename_table(const char * from, const char * to); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, @@ -138,3 +121,4 @@ class ha_tina: public handler }; bool tina_end(); + diff --git a/sql/field.cc b/sql/field.cc index acc837c1d37..152c1bdc364 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -38,7 +37,7 @@ Instansiate templates and static variables *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List<create_field>; template class List_iterator<create_field>; #endif @@ -47,6 +46,8 @@ uchar Field_null::null[1]={1}; const char field_separator=','; #define DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE 320 +#define LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE 128 +#define DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE 128 #define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \ ((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1))) @@ -57,9 +58,9 @@ const char field_separator=','; following #defines describe that gap and how to canculate number of fields and index of field in thia array. */ -#define FIELDTYPE_TEAR_FROM (MYSQL_TYPE_NEWDATE+1) -#define FIELDTYPE_TEAR_TO (MYSQL_TYPE_ENUM-1) -#define FIELDTYPE_NUM (FIELDTYPE_TEAR_FROM + (255-FIELDTYPE_TEAR_TO)) +#define FIELDTYPE_TEAR_FROM (MYSQL_TYPE_BIT + 1) +#define FIELDTYPE_TEAR_TO (MYSQL_TYPE_NEWDECIMAL - 1) +#define FIELDTYPE_NUM (FIELDTYPE_TEAR_FROM + (255 - FIELDTYPE_TEAR_TO)) inline int field_type2index (enum_field_types field_type) { return (field_type < FIELDTYPE_TEAR_FROM ? @@ -72,118 +73,126 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= /* MYSQL_TYPE_DECIMAL -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_DECIMAL, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_NEWDECIMAL, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_DECIMAL, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_NEWDECIMAL, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 MYSQL_TYPE_DECIMAL, MYSQL_TYPE_DECIMAL, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_TINY -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_TINY, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_TINY, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_TINY, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_SHORT -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_SHORT, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_SHORT, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_SHORT, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_SHORT, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_SHORT, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_SHORT, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_LONG -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_LONG, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_LONG, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG MYSQL_TYPE_LONG, MYSQL_TYPE_LONG, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_LONG, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_LONG, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG, + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_LONG, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_LONG, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_FLOAT -> */ { @@ -194,25 +203,27 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_FLOAT, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_FLOAT, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_DOUBLE, MYSQL_TYPE_FLOAT, + MYSQL_TYPE_FLOAT, MYSQL_TYPE_INT24, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_FLOAT, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_FLOAT, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_DOUBLE -> */ { @@ -223,30 +234,32 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_INT24, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_DOUBLE, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_NULL -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_TINY, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_TINY, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG MYSQL_TYPE_SHORT, MYSQL_TYPE_LONG, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE @@ -259,192 +272,206 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= MYSQL_TYPE_NEWDATE, MYSQL_TYPE_TIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR MYSQL_TYPE_DATETIME, MYSQL_TYPE_YEAR, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_NEWDATE, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_ENUM, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_BIT, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_ENUM, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY }, /* MYSQL_TYPE_TIMESTAMP -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_TIMESTAMP, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_DATETIME, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_DATETIME, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_LONGLONG -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_LONGLONG, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_LONGLONG, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONGLONG, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_LONGLONG, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_LONGLONG, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 MYSQL_TYPE_LONGLONG, MYSQL_TYPE_LONG, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_LONGLONG, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_LONGLONG, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_INT24 -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_DECIMAL, MYSQL_TYPE_INT24, + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_INT24, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG MYSQL_TYPE_INT24, MYSQL_TYPE_LONG, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_INT24, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_INT24, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_INT24, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_INT24, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_DATE -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_NEWDATE, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_TIME -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_TIME, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_DATETIME, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_DATETIME -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_DATETIME, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_DATETIME, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_DATETIME, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_YEAR -> */ { @@ -455,112 +482,213 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_YEAR, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_YEAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 MYSQL_TYPE_LONGLONG, MYSQL_TYPE_INT24, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_YEAR, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_YEAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_NEWDATE -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME MYSQL_TYPE_NEWDATE, MYSQL_TYPE_DATETIME, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_DATETIME, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_NEWDATE, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_DATETIME, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_NEWDATE, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR + }, + /* MYSQL_TYPE_VARCHAR -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR + }, + /* MYSQL_TYPE_BIT -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_BIT, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_BIT, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR + }, + /* MYSQL_TYPE_NEWDECIMAL -> */ + { + //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_NEWDECIMAL, + //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_NEWDECIMAL, + //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE + MYSQL_TYPE_DOUBLE, MYSQL_TYPE_DOUBLE, + //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_NEWDECIMAL, + //MYSQL_TYPE_DATE MYSQL_TYPE_TIME + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_NEWDECIMAL, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_ENUM -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_ENUM, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_ENUM, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_SET -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_SET, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_SET, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_TINY_BLOB -> */ { @@ -578,10 +706,12 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, - //MYSQL_TYPE_NEWDATE <14> + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_BIT <16>-<245> MYSQL_TYPE_TINY_BLOB, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_TINY_BLOB, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB MYSQL_TYPE_TINY_BLOB, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB @@ -607,10 +737,12 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, - //MYSQL_TYPE_NEWDATE <14> + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_BIT <16>-<245> MYSQL_TYPE_MEDIUM_BLOB, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_MEDIUM_BLOB, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_MEDIUM_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB @@ -636,10 +768,12 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, - //MYSQL_TYPE_NEWDATE <14> + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_BIT <16>-<245> MYSQL_TYPE_LONG_BLOB, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_LONG_BLOB, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB @@ -665,10 +799,12 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, - //MYSQL_TYPE_NEWDATE <14> + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, + //MYSQL_TYPE_BIT <16>-<245> MYSQL_TYPE_BLOB, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_BLOB, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB MYSQL_TYPE_BLOB, MYSQL_TYPE_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB @@ -681,31 +817,33 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= /* MYSQL_TYPE_VAR_STRING -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR }, /* MYSQL_TYPE_STRING -> */ { @@ -723,45 +861,49 @@ static enum_field_types field_types_merge_rules [FIELDTYPE_NUM][FIELDTYPE_NUM]= MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, - //MYSQL_TYPE_NEWDATE <14> + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_STRING, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> MYSQL_TYPE_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_STRING, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_STRING, MYSQL_TYPE_STRING, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB MYSQL_TYPE_STRING, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY MYSQL_TYPE_STRING, MYSQL_TYPE_STRING }, /* MYSQL_TYPE_GEOMETRY -> */ { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_NULL MYSQL_TYPE_TIMESTAMP - MYSQL_TYPE_GEOMETRY, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_GEOMETRY, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_LONGLONG MYSQL_TYPE_INT24 - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATE MYSQL_TYPE_TIME - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_VAR_STRING, - //MYSQL_TYPE_NEWDATE <14> - MYSQL_TYPE_VAR_STRING, - //<246> MYSQL_TYPE_ENUM - MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_BIT <16>-<245> + MYSQL_TYPE_VARCHAR, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB - MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_TINY_BLOB, + MYSQL_TYPE_VARCHAR, MYSQL_TYPE_TINY_BLOB, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, //MYSQL_TYPE_BLOB MYSQL_TYPE_VAR_STRING - MYSQL_TYPE_BLOB, MYSQL_TYPE_VAR_STRING, + MYSQL_TYPE_BLOB, MYSQL_TYPE_VARCHAR, //MYSQL_TYPE_STRING MYSQL_TYPE_GEOMETRY MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY } @@ -791,7 +933,7 @@ enum_field_types Field::field_type_merge(enum_field_types a, static Item_result field_types_result_type [FIELDTYPE_NUM]= { //MYSQL_TYPE_DECIMAL MYSQL_TYPE_TINY - REAL_RESULT, INT_RESULT, + DECIMAL_RESULT, INT_RESULT, //MYSQL_TYPE_SHORT MYSQL_TYPE_LONG INT_RESULT, INT_RESULT, //MYSQL_TYPE_FLOAT MYSQL_TYPE_DOUBLE @@ -804,10 +946,12 @@ static Item_result field_types_result_type [FIELDTYPE_NUM]= STRING_RESULT, STRING_RESULT, //MYSQL_TYPE_DATETIME MYSQL_TYPE_YEAR STRING_RESULT, INT_RESULT, - //MYSQL_TYPE_NEWDATE <14> + //MYSQL_TYPE_NEWDATE MYSQL_TYPE_VARCHAR + STRING_RESULT, STRING_RESULT, + //MYSQL_TYPE_BIT <16>-<245> STRING_RESULT, - //<246> MYSQL_TYPE_ENUM - STRING_RESULT, + //MYSQL_TYPE_NEWDECIMAL MYSQL_TYPE_ENUM + DECIMAL_RESULT, STRING_RESULT, //MYSQL_TYPE_SET MYSQL_TYPE_TINY_BLOB STRING_RESULT, STRING_RESULT, //MYSQL_TYPE_MEDIUM_BLOB MYSQL_TYPE_LONG_BLOB @@ -841,6 +985,59 @@ Item_result Field::result_merge_type(enum_field_types field_type) Static help functions *****************************************************************************/ + +/* + Check whether a field type can be partially indexed by a key + + This is a static method, rather than a virtual function, because we need + to check the type of a non-Field in mysql_alter_table(). + + SYNOPSIS + type_can_have_key_part() + type field type + + RETURN + TRUE Type can have a prefixed key + FALSE Type can not have a prefixed key +*/ + +bool Field::type_can_have_key_part(enum enum_field_types type) +{ + switch (type) { + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_TINY_BLOB: + case MYSQL_TYPE_MEDIUM_BLOB: + case MYSQL_TYPE_LONG_BLOB: + case MYSQL_TYPE_BLOB: + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_GEOMETRY: + return TRUE; + default: + return FALSE; + } +} + + +/* + Numeric fields base class constructor +*/ +Field_num::Field_num(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, utype unireg_check_arg, + const char *field_name_arg, + struct st_table *table_arg, + uint8 dec_arg, bool zero_arg, bool unsigned_arg) + :Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg), + dec(dec_arg),zerofill(zero_arg),unsigned_flag(unsigned_arg) +{ + if (zerofill) + flags|=ZEROFILL_FLAG; + if (unsigned_flag) + flags|=UNSIGNED_FLAG; +} + + void Field_num::prepend_zeros(String *value) { int diff; @@ -871,33 +1068,78 @@ void Field_num::prepend_zeros(String *value) Make this multi-byte-character safe RETURN - 0 ok - 1 error + 0 OK + 1 error. A warning is pushed if field_name != 0 */ -bool test_if_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs) +bool Field::check_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs) { + const char *end; if (str == int_end) - return 0; // Empty string - const char *end=str+length; + { + char buff[128]; + String tmp(buff,(uint32) sizeof(buff), system_charset_info); + tmp.copy(str, length, system_charset_info); + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "integer", tmp.c_ptr(), field_name, + (ulong) table->in_use->row_count); + return 1; // Empty string + } + end= str+length; if ((str= int_end) == end) - return 1; // All digits was used + return 0; // OK; All digits was used /* Allow end .0000 */ if (*str == '.') { - for (str++ ; str != end && *str == '0'; str++) ; + for (str++ ; str != end && *str == '0'; str++) + ; } /* Allow end space */ - for (str++ ; str != end ; str++) + for ( ; str != end ; str++) { if (!my_isspace(cs,*str)) - return 0; + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); + return 1; + } } - return 1; + return 0; } + +/* + Process decimal library return codes and issue warnings for overflow and + truncation. + + SYNOPSIS + Field::warn_if_overflow() + op_result decimal library return code (E_DEC_* see include/decimal.h) + + RETURN + 1 there was overflow + 0 no error or some other errors except overflow +*/ + +int Field::warn_if_overflow(int op_result) +{ + if (op_result == E_DEC_OVERFLOW) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; + } + if (op_result == E_DEC_TRUNCATED) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + /* We return 0 here as this is not a critical issue */ + } + return 0; +} + + #ifdef NOT_USED static bool test_if_real(const char *str,int length, CHARSET_INFO *cs) { @@ -953,22 +1195,42 @@ static bool test_if_real(const char *str,int length, CHARSET_INFO *cs) #endif -/**************************************************************************** -** Functions for the base classes -** This is an unpacked number. -****************************************************************************/ +/* + Interpret field value as an integer but return the result as a string. + + This is used for printing bit_fields as numbers while debugging +*/ + +String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_val) +{ + CHARSET_INFO *cs= &my_charset_bin; + uint length; + longlong value= val_int(); + if (val_buffer->alloc(MY_INT64_NUM_DECIMAL_DIGITS)) + return 0; + length= (uint) (*cs->cset->longlong10_to_str)(cs, (char*) val_buffer->ptr(), + MY_INT64_NUM_DECIMAL_DIGITS, + unsigned_val ? 10 : -10, + value); + val_buffer->length(length); + return val_buffer; +} + + +/* This is used as a table name when the table structure is not set up */ +const char *unknown_table_name= 0; Field::Field(char *ptr_arg,uint32 length_arg,uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg) - :ptr(ptr_arg),null_ptr(null_ptr_arg), + :ptr(ptr_arg), null_ptr(null_ptr_arg), table(table_arg),orig_table(table_arg), - table_name(table_arg ? table_arg->table_name : 0), + table_name(table_arg ? &table_arg->alias : &unknown_table_name), field_name(field_name_arg), query_id(0), key_start(0), part_of_key(0), part_of_sortkey(0), unireg_check(unireg_check_arg), - field_length(length_arg),null_bit(null_bit_arg) + field_length(length_arg), null_bit(null_bit_arg) { flags=null_ptr ? 0: NOT_NULL_FLAG; comment.str= (char*) ""; @@ -1001,55 +1263,187 @@ bool Field::send_binary(Protocol *protocol) } +my_decimal *Field::val_decimal(my_decimal *decimal) +{ + /* This never have to be called */ + DBUG_ASSERT(0); + return 0; +} + + void Field_num::add_zerofill_and_unsigned(String &res) const { if (unsigned_flag) - res.append(" unsigned"); + res.append(STRING_WITH_LEN(" unsigned")); if (zerofill) - res.append(" zerofill"); + res.append(STRING_WITH_LEN(" zerofill")); } -void Field_num::make_field(Send_field *field) + +void Field::make_field(Send_field *field) { - /* table_cache_key is not set for temp tables */ - if (orig_table->table_cache_key) + if (orig_table->s->table_cache_key && *(orig_table->s->table_cache_key)) { - field->db_name= orig_table->table_cache_key; - field->org_table_name= orig_table->real_name; + field->org_table_name= orig_table->s->table_name; + field->db_name= orig_table->s->table_cache_key; } else - { - field->db_name= field->org_table_name= ""; - } - field->table_name= orig_table->table_name; - field->col_name=field->org_col_name=field_name; + field->org_table_name= field->db_name= ""; + field->table_name= orig_table->alias; + field->col_name= field->org_col_name= field_name; field->charsetnr= charset()->number; field->length=field_length; field->type=type(); field->flags=table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; - field->decimals=dec; + field->decimals= 0; } -void Field_str::make_field(Send_field *field) +/* + Conversion from decimal to longlong with checking overflow and + setting correct value (min/max) in case of overflow + + SYNOPSIS + Field::convert_decimal2longlong() + val value which have to be converted + unsigned_flag type of integer in which we convert val + err variable to pass error code + + RETURN + value converted from val +*/ +longlong Field::convert_decimal2longlong(const my_decimal *val, + bool unsigned_flag, int *err) { - /* table_cache_key is not set for temp tables */ - if (orig_table->table_cache_key) + longlong i; + if (unsigned_flag) { - field->db_name= orig_table->table_cache_key; - field->org_table_name= orig_table->real_name; + if (val->sign()) + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + i= 0; + *err= 1; + } + else if (warn_if_overflow(my_decimal2int(E_DEC_ERROR & + ~E_DEC_OVERFLOW & ~E_DEC_TRUNCATED, + val, TRUE, &i))) + { + i= ~(longlong) 0; + *err= 1; + } } - else + else if (warn_if_overflow(my_decimal2int(E_DEC_ERROR & + ~E_DEC_OVERFLOW & ~E_DEC_TRUNCATED, + val, FALSE, &i))) { - field->db_name= field->org_table_name= ""; + i= (val->sign() ? LONGLONG_MIN : LONGLONG_MAX); + *err= 1; } - field->table_name= orig_table->table_name; - field->col_name=field->org_col_name=field_name; - field->charsetnr= charset()->number; - field->length=field_length; - field->type=type(); - field->flags=table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; - field->decimals=0; + return i; +} + + +/* + Storing decimal in integer fields. + + SYNOPSIS + Field_num::store_decimal() + val value for storing + + NOTE + This method is used by all integer fields, real/decimal redefine it + + RETURN + 0 OK + != 0 error +*/ + +int Field_num::store_decimal(const my_decimal *val) +{ + int err= 0; + longlong i= convert_decimal2longlong(val, unsigned_flag, &err); + return test(err | store(i, unsigned_flag)); +} + + +/* + Return decimal value of integer field + + SYNOPSIS + Field_num::val_decimal() + decimal_value buffer for storing decimal value + + NOTE + This method is used by all integer fields, real/decimal redefine it + All longlong values fit in our decimal buffer which cal store 8*9=72 + digits of integer number + + RETURN + pointer to decimal buffer with value of field +*/ + +my_decimal* Field_num::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(result_type() == INT_RESULT); + longlong nr= val_int(); + int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value); + return decimal_value; +} + + +Field_str::Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, utype unireg_check_arg, + const char *field_name_arg, + struct st_table *table_arg, CHARSET_INFO *charset_arg) + :Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg) +{ + field_charset= charset_arg; + if (charset_arg->state & MY_CS_BINSORT) + flags|=BINARY_FLAG; + field_derivation= DERIVATION_IMPLICIT; +} + + +void Field_num::make_field(Send_field *field) +{ + Field::make_field(field); + field->decimals= dec; +} + +/* + Decimal representation of Field_str + + SYNOPSIS + Field_str::store_decimal() + d value for storing + + NOTE + Field_str is the base class for fields like Field_enum, Field_date and some + similar. Some dates use fraction and also string value should be + converted to floating point value according our rules, so we use double + to store value of decimal in string + + RETURN + 0 OK + != 0 error +*/ + +int Field_str::store_decimal(const my_decimal *d) +{ + double val; + /* TODO: use decimal2string? */ + int err= warn_if_overflow(my_decimal2double(E_DEC_FATAL_ERROR & + ~E_DEC_OVERFLOW, d, &val)); + return err | store(val); +} + + +my_decimal *Field_str::val_decimal(my_decimal *decimal_value) +{ + longlong nr= val_int(); + int2my_decimal(E_DEC_FATAL_ERROR, nr, 0, decimal_value); + return decimal_value; } @@ -1063,11 +1457,12 @@ uint Field::fill_cache_field(CACHE_FIELD *copy) { copy->blob_field=(Field_blob*) this; copy->strip=0; - copy->length-=table->blob_ptr_size; + copy->length-= table->s->blob_ptr_size; return copy->length; } - else if (!zero_pack() && (type() == FIELD_TYPE_STRING && copy->length > 4 || - type() == FIELD_TYPE_VAR_STRING)) + else if (!zero_pack() && + (type() == MYSQL_TYPE_STRING && copy->length >= 4 && + copy->length < 256)) { copy->strip=1; /* Remove end space */ store_length= 2; @@ -1109,11 +1504,11 @@ bool Field::get_time(TIME *ltime) Needs to be changed if/when we want to support different time formats */ -void Field::store_time(TIME *ltime,timestamp_type type) +int Field::store_time(TIME *ltime, timestamp_type type_arg) { char buff[MAX_DATE_STRING_REP_LENGTH]; uint length= (uint) my_TIME_to_str(ltime, buff); - store(buff, length, &my_charset_bin); + return store(buff, length, &my_charset_bin); } @@ -1122,13 +1517,50 @@ bool Field::optimize_range(uint idx, uint part) return test(table->file->index_flags(idx, part, 1) & HA_READ_RANGE); } + +Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type __attribute__((unused))) +{ + Field *tmp; + if (!(tmp= (Field*) memdup_root(root,(char*) this,size_of()))) + return 0; + + if (tmp->table->maybe_null) + tmp->flags&= ~NOT_NULL_FLAG; + tmp->table= new_table; + tmp->key_start.init(0); + tmp->part_of_key.init(0); + tmp->part_of_sortkey.init(0); + tmp->unireg_check=Field::NONE; + tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | + ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); + tmp->reset_fields(); + return tmp; +} + + +Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field *tmp; + if ((tmp= new_field(root, new_table, table == new_table))) + { + tmp->ptr= new_ptr; + tmp->null_ptr= new_null_ptr; + tmp->null_bit= new_null_bit; + } + return tmp; +} + + /**************************************************************************** Field_null, a field that always return NULL ****************************************************************************/ void Field_null::sql_type(String &res) const { - res.set_ascii("null", 4); + res.set_ascii(STRING_WITH_LEN("null")); } @@ -1140,7 +1572,7 @@ void Field_null::sql_type(String &res) const int Field_decimal::reset(void) { - Field_decimal::store("0",1,&my_charset_bin); + Field_decimal::store(STRING_WITH_LEN("0"),&my_charset_bin); return 0; } @@ -1185,7 +1617,7 @@ void Field_decimal::overflow(bool negative) int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String tmp(buff,sizeof(buff), &my_charset_bin); /* Convert character set if the old one is multi byte */ @@ -1260,7 +1692,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) from++; if (from == end) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); is_cuted_fields_incr=1; } else if (*from == '+' || *from == '-') // Found some sign ? @@ -1326,7 +1758,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) /* We only have to generate warnings if count_cuted_fields is set. This is to avoid extra checks of the number when they are not needed. - Even if this flag is not set, it's ok to increment warnings, if + Even if this flag is not set, it's OK to increment warnings, if it makes the code easer to read. */ @@ -1336,7 +1768,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) for (;from != end && my_isspace(&my_charset_bin, *from); from++) ; if (from != end) // If still something left, warn { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); is_cuted_fields_incr=1; } } @@ -1404,7 +1836,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) int_digits_added_zeros=0; } } - tmp_uint= (tmp_dec+(int_digits_end-int_digits_from)+ + tmp_uint= (uint) (tmp_dec+(int_digits_end-int_digits_from)+ (uint)(frac_digits_from-int_digits_tail_from)+ int_digits_added_zeros); } @@ -1515,7 +1947,7 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) { if (!is_cuted_fields_incr) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, 1); + WARN_DATA_TRUNCATED, 1); return 0; } continue; @@ -1532,7 +1964,13 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) if (tmp_char != '0') // Losing a non zero digit ? { if (!is_cuted_fields_incr) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + { + /* + This is a note, not a warning, as we don't want to abort + when we cut decimals in strict mode + */ + set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + } return 0; } continue; @@ -1591,36 +2029,37 @@ int Field_decimal::store(double nr) } -int Field_decimal::store(longlong nr) +int Field_decimal::store(longlong nr, bool unsigned_val) { - if (unsigned_flag && nr < 0) + char buff[22]; + uint length, int_part; + char fyllchar, *to; + + if (nr < 0 && unsigned_flag && !unsigned_val) { overflow(1); return 1; } - char buff[22]; - uint length=(uint) (longlong10_to_str(nr,buff,-10)-buff); - uint int_part=field_length- (dec ? dec+1 : 0); + length= (uint) (longlong10_to_str(nr,buff,unsigned_val ? 10 : -10) - buff); + int_part= field_length- (dec ? dec+1 : 0); if (length > int_part) { - overflow(test(nr < 0L)); /* purecov: inspected */ + overflow(!unsigned_val && nr < 0L); /* purecov: inspected */ return 1; } - else + + fyllchar = zerofill ? (char) '0' : (char) ' '; + to= ptr; + for (uint i=int_part-length ; i-- > 0 ;) + *to++ = fyllchar; + memcpy(to,buff,length); + if (dec) { - char fyllchar = zerofill ? (char) '0' : (char) ' '; - char *to=ptr; - for (uint i=int_part-length ; i-- > 0 ;) - *to++ = fyllchar; - memcpy(to,buff,length); - if (dec) - { - to[length]='.'; - bfill(to+length+1,dec,'0'); - } - return 0; + to[length]='.'; + bfill(to+length+1,dec,'0'); } + return 0; } @@ -1737,38 +2176,357 @@ void Field_decimal::sql_type(String &res) const /**************************************************************************** +** Field_new_decimal +****************************************************************************/ + +Field_new_decimal::Field_new_decimal(char *ptr_arg, + uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, + enum utype unireg_check_arg, + const char *field_name_arg, + struct st_table *table_arg, + uint8 dec_arg,bool zero_arg, + bool unsigned_arg) + :Field_num(ptr_arg, len_arg, + null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg, + dec_arg, zero_arg, unsigned_arg) +{ + precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg); + DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) && + (dec <= DECIMAL_MAX_SCALE)); + bin_size= my_decimal_get_binary_size(precision, dec); +} + + +Field_new_decimal::Field_new_decimal(uint32 len_arg, + bool maybe_null_arg, + const char *name, + struct st_table *t_arg, + uint8 dec_arg, + bool unsigned_arg) + :Field_num((char*) 0, len_arg, + maybe_null_arg ? (uchar*) "": 0, 0, + NONE, name, t_arg, + dec_arg, + 0, unsigned_arg) +{ + precision= my_decimal_length_to_precision(len_arg, dec_arg, unsigned_arg); + DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION) && + (dec <= DECIMAL_MAX_SCALE)); + bin_size= my_decimal_get_binary_size(precision, dec); +} + + +int Field_new_decimal::reset(void) +{ + store_value(&decimal_zero); + return 0; +} + + +/* + Generate max/min decimal value in case of overflow. + + SYNOPSIS + Field_new_decimal::set_value_on_overflow(); + decimal_value buffer for value + sign sign of value which caused overflow +*/ + +void Field_new_decimal::set_value_on_overflow(my_decimal *decimal_value, + bool sign) +{ + DBUG_ENTER("Field_new_decimal::set_value_on_overflow"); + max_my_decimal(decimal_value, precision, decimals()); + if (sign) + { + if (unsigned_flag) + my_decimal_set_zero(decimal_value); + else + decimal_value->sign(TRUE); + } + DBUG_VOID_RETURN; +} + + +/* + Store decimal value in the binary buffer + + SYNOPSIS + store_value(const my_decimal *decimal_value) + decimal_value my_decimal + + DESCRIPTION + checks if decimal_value fits into field size. + if it does, stores the decimal in the buffer using binary format. + Otherwise sets maximal number that can be stored in the field. + + RETURN + 0 ok + 1 error +*/ + +bool Field_new_decimal::store_value(const my_decimal *decimal_value) +{ + int error= 0; + DBUG_ENTER("Field_new_decimal::store_value"); +#ifndef DBUG_OFF + { + char dbug_buff[DECIMAL_MAX_STR_LENGTH+1]; + DBUG_PRINT("enter", ("value: %s", dbug_decimal_as_string(dbug_buff, decimal_value))); + } +#endif + + /* check that we do not try to write negative value in unsigned field */ + if (unsigned_flag && decimal_value->sign()) + { + DBUG_PRINT("info", ("unsigned overflow")); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + decimal_value= &decimal_zero; + } +#ifndef DBUG_OFF + { + char dbug_buff[DECIMAL_MAX_STR_LENGTH+1]; + DBUG_PRINT("info", ("saving with precision %d, scale: %d, value %s", + (int)precision, (int)dec, + dbug_decimal_as_string(dbug_buff, decimal_value))); + } +#endif + + if (warn_if_overflow(my_decimal2binary(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW, + decimal_value, ptr, precision, dec))) + { + my_decimal buff; + DBUG_PRINT("info", ("overflow")); + set_value_on_overflow(&buff, decimal_value->sign()); + my_decimal2binary(E_DEC_FATAL_ERROR, &buff, ptr, precision, dec); + error= 1; + } + DBUG_EXECUTE("info", print_decimal_buff(decimal_value, (byte *) ptr, bin_size);); + DBUG_RETURN(error); +} + + +int Field_new_decimal::store(const char *from, uint length, + CHARSET_INFO *charset_arg) +{ + int err; + my_decimal decimal_value; + DBUG_ENTER("Field_new_decimal::store(char*)"); + + if ((err= str2my_decimal(E_DEC_FATAL_ERROR & + ~(E_DEC_OVERFLOW | E_DEC_BAD_NUM), + from, length, charset_arg, &decimal_value)) && + table->in_use->abort_on_warning) + { + /* Because "from" is not NUL-terminated and we use %s in the ER() */ + String from_as_str; + from_as_str.copy(from, length, &my_charset_bin); + + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "decimal", from_as_str.c_ptr(), field_name, + (ulong) table->in_use->row_count); + + DBUG_RETURN(err); + } + + switch (err) { + case E_DEC_TRUNCATED: + set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + break; + case E_DEC_OVERFLOW: + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + set_value_on_overflow(&decimal_value, decimal_value.sign()); + break; + case E_DEC_BAD_NUM: + { + /* Because "from" is not NUL-terminated and we use %s in the ER() */ + String from_as_str; + from_as_str.copy(from, length, &my_charset_bin); + + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "decimal", from_as_str.c_ptr(), field_name, + (ulong) table->in_use->row_count); + my_decimal_set_zero(&decimal_value); + + break; + } + } + +#ifndef DBUG_OFF + char dbug_buff[DECIMAL_MAX_STR_LENGTH+1]; + DBUG_PRINT("enter", ("value: %s", + dbug_decimal_as_string(dbug_buff, &decimal_value))); +#endif + store_value(&decimal_value); + DBUG_RETURN(err); +} + + +int Field_new_decimal::store(double nr) +{ + my_decimal decimal_value; + int err; + DBUG_ENTER("Field_new_decimal::store(double)"); + + err= double2my_decimal(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW, nr, + &decimal_value); + /* + TODO: fix following when double2my_decimal when double2decimal + will return E_DEC_TRUNCATED always correctly + */ + if (!err) + { + double nr2; + my_decimal2double(E_DEC_FATAL_ERROR, &decimal_value, &nr2); + if (nr2 != nr) + err= E_DEC_TRUNCATED; + } + if (err) + { + if (check_overflow(err)) + set_value_on_overflow(&decimal_value, decimal_value.sign()); + /* Only issue a warning if store_value doesn't issue an warning */ + table->in_use->got_warning= 0; + } + if (store_value(&decimal_value)) + err= 1; + else if (err && !table->in_use->got_warning) + err= warn_if_overflow(err); + DBUG_RETURN(err); +} + + +int Field_new_decimal::store(longlong nr, bool unsigned_val) +{ + my_decimal decimal_value; + int err; + + if ((err= int2my_decimal(E_DEC_FATAL_ERROR & ~E_DEC_OVERFLOW, + nr, unsigned_val, &decimal_value))) + { + if (check_overflow(err)) + set_value_on_overflow(&decimal_value, decimal_value.sign()); + /* Only issue a warning if store_value doesn't issue an warning */ + table->in_use->got_warning= 0; + } + if (store_value(&decimal_value)) + err= 1; + else if (err && !table->in_use->got_warning) + err= warn_if_overflow(err); + return err; +} + + +int Field_new_decimal::store_decimal(const my_decimal *decimal_value) +{ + return store_value(decimal_value); +} + + +int Field_new_decimal::store_time(TIME *ltime, timestamp_type t_type) +{ + my_decimal decimal_value; + return store_value(date2my_decimal(ltime, &decimal_value)); +} + + +double Field_new_decimal::val_real(void) +{ + double dbl; + my_decimal decimal_value; + my_decimal2double(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), &dbl); + return dbl; +} + + +longlong Field_new_decimal::val_int(void) +{ + longlong i; + my_decimal decimal_value; + my_decimal2int(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), + unsigned_flag, &i); + return i; +} + + +my_decimal* Field_new_decimal::val_decimal(my_decimal *decimal_value) +{ + DBUG_ENTER("Field_new_decimal::val_decimal"); + binary2my_decimal(E_DEC_FATAL_ERROR, ptr, decimal_value, + precision, dec); + DBUG_EXECUTE("info", print_decimal_buff(decimal_value, (byte *) ptr, + bin_size);); + DBUG_RETURN(decimal_value); +} + + +String *Field_new_decimal::val_str(String *val_buffer, + String *val_ptr __attribute__((unused))) +{ + my_decimal decimal_value; + uint fixed_precision= zerofill ? precision : 0; + my_decimal2string(E_DEC_FATAL_ERROR, val_decimal(&decimal_value), + fixed_precision, dec, '0', val_buffer); + return val_buffer; +} + + +int Field_new_decimal::cmp(const char *a,const char*b) +{ + return memcmp(a, b, bin_size); +} + + +void Field_new_decimal::sort_string(char *buff, + uint length __attribute__((unused))) +{ + memcpy(buff, ptr, bin_size); +} + + +void Field_new_decimal::sql_type(String &str) const +{ + CHARSET_INFO *cs= str.charset(); + str.length(cs->cset->snprintf(cs, (char*) str.ptr(), str.alloced_length(), + "decimal(%d,%d)", precision, (int)dec)); + add_zerofill_and_unsigned(str); +} + + +/**************************************************************************** ** tiny int ****************************************************************************/ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) { - int not_used; // We can ignore result from str2int char *end; - long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); - int error= 0; + int error; if (unsigned_flag) { - if (tmp < 0) - { - tmp=0; /* purecov: inspected */ - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; - } - else if (tmp > 255) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > 255) { - tmp= 255; + set_if_smaller(tmp, 255); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } + else + error= 0; + ptr[0]= (char) tmp; } else { + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); if (tmp < -128) { tmp= -128; @@ -1781,13 +2539,12 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } + else + error= 0; + ptr[0]= (char) tmp; } - ptr[0]= (char) tmp; return error; } @@ -1828,23 +2585,25 @@ int Field_tiny::store(double nr) error= 1; } else - *ptr=(char) nr; + *ptr=(char) (int) nr; } return error; } -int Field_tiny::store(longlong nr) + +int Field_tiny::store(longlong nr, bool unsigned_val) { int error= 0; + if (unsigned_flag) { - if (nr < 0L) + if (nr < 0 && !unsigned_val) { - *ptr=0; + *ptr= 0; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr > 255L) + else if ((ulonglong) nr > (ulonglong) 255) { *ptr= (char) 255; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); @@ -1855,13 +2614,15 @@ int Field_tiny::store(longlong nr) } else { - if (nr < -128L) + if (nr < 0 && unsigned_val) + nr= 256; // Generate overflow + if (nr < -128) { *ptr= (char) -128; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr > 127L) + else if (nr > 127) { *ptr=127; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); @@ -1881,6 +2642,7 @@ double Field_tiny::val_real(void) return (double) tmp; } + longlong Field_tiny::val_int(void) { int tmp= unsigned_flag ? (int) ((uchar*) ptr)[0] : @@ -1888,6 +2650,7 @@ longlong Field_tiny::val_int(void) return (longlong) tmp; } + String *Field_tiny::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { @@ -1946,33 +2709,34 @@ void Field_tiny::sql_type(String &res) const int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) { - int not_used; // We can ignore result from str2int char *end; - long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); - int error= 0; + int error; if (unsigned_flag) { - if (tmp < 0) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX16) { - tmp=0; + set_if_smaller(tmp, UINT_MAX16); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (tmp > (uint16) ~0) - { - tmp=(uint16) ~0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) + else + error= 0; +#ifdef WORDS_BIGENDIAN + if (table->s->db_low_byte_first) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); - error= 1; + int2store(ptr,tmp); } + else +#endif + shortstore(ptr,(short) tmp); } else { + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); if (tmp < INT_MIN16) { tmp= INT_MIN16; @@ -1985,20 +2749,19 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } - } + else + error= 0; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) - { - int2store(ptr,tmp); - } - else + if (table->s->db_low_byte_first) + { + int2store(ptr,tmp); + } + else #endif - shortstore(ptr,(short) tmp); + shortstore(ptr,(short) tmp); + } return error; } @@ -2016,9 +2779,9 @@ int Field_short::store(double nr) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr > (double) (uint16) ~0) + else if (nr > (double) UINT_MAX16) { - res=(int16) (uint16) ~0; + res=(int16) UINT_MAX16; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } @@ -2040,10 +2803,10 @@ int Field_short::store(double nr) error= 1; } else - res=(int16) nr; + res=(int16) (int) nr; } #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int2store(ptr,res); } @@ -2053,21 +2816,23 @@ int Field_short::store(double nr) return error; } -int Field_short::store(longlong nr) + +int Field_short::store(longlong nr, bool unsigned_val) { int error= 0; int16 res; + if (unsigned_flag) { - if (nr < 0L) + if (nr < 0L && !unsigned_val) { res=0; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr > (longlong) (uint16) ~0) + else if ((ulonglong) nr > (ulonglong) UINT_MAX16) { - res=(int16) (uint16) ~0; + res=(int16) UINT_MAX16; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } @@ -2076,13 +2841,16 @@ int Field_short::store(longlong nr) } else { + if (nr < 0 && unsigned_val) + nr= UINT_MAX16+1; // Generate overflow + if (nr < INT_MIN16) { res=INT_MIN16; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr > INT_MAX16) + else if (nr > (longlong) INT_MAX16) { res=INT_MAX16; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); @@ -2092,7 +2860,7 @@ int Field_short::store(longlong nr) res=(int16) nr; } #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int2store(ptr,res); } @@ -2107,7 +2875,7 @@ double Field_short::val_real(void) { short j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint2korr(ptr); else #endif @@ -2119,7 +2887,7 @@ longlong Field_short::val_int(void) { short j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint2korr(ptr); else #endif @@ -2138,7 +2906,7 @@ String *Field_short::val_str(String *val_buffer, char *to=(char*) val_buffer->ptr(); short j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint2korr(ptr); else #endif @@ -2166,7 +2934,7 @@ int Field_short::cmp(const char *a_ptr, const char *b_ptr) { short a,b; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { a=sint2korr(a_ptr); b=sint2korr(b_ptr); @@ -2187,7 +2955,7 @@ int Field_short::cmp(const char *a_ptr, const char *b_ptr) void Field_short::sort_string(char *to,uint length __attribute__((unused))) { #ifdef WORDS_BIGENDIAN - if (!table->db_low_byte_first) + if (!table->s->db_low_byte_first) { if (unsigned_flag) to[0] = ptr[0]; @@ -2221,33 +2989,27 @@ void Field_short::sql_type(String &res) const int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) { - int not_used; // We can ignore result from str2int char *end; - long tmp= my_strntol(cs, from, len, 10, &end, ¬_used); - int error= 0; + int error; if (unsigned_flag) { - if (tmp < 0) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > UINT_MAX24) { - tmp=0; + set_if_smaller(tmp, UINT_MAX24); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (tmp >= (long) (1L << 24)) - { - tmp=(long) (1L << 24)-1L; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); - error= 1; - } + else + error= 0; + int3store(ptr,tmp); } else { + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); if (tmp < INT_MIN24) { tmp= INT_MIN24; @@ -2260,14 +3022,12 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } + else + error= 0; + int3store(ptr,tmp); } - - int3store(ptr,tmp); return error; } @@ -2316,20 +3076,22 @@ int Field_medium::store(double nr) return error; } -int Field_medium::store(longlong nr) + +int Field_medium::store(longlong nr, bool unsigned_val) { int error= 0; + if (unsigned_flag) { - if (nr < 0L) + if (nr < 0 && !unsigned_val) { int3store(ptr,0); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr >= (longlong) (long) (1L << 24)) + else if ((ulonglong) nr >= (ulonglong) (long) (1L << 24)) { - long tmp=(long) (1L << 24)-1L;; + long tmp= (long) (1L << 24)-1L; int3store(ptr,tmp); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; @@ -2339,9 +3101,12 @@ int Field_medium::store(longlong nr) } else { + if (nr < 0 && unsigned_val) + nr= (ulonglong) (long) (1L << 24); // Generate overflow + if (nr < (longlong) INT_MIN24) { - long tmp=(long) INT_MIN24; + long tmp= (long) INT_MIN24; int3store(ptr,tmp); set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; @@ -2437,92 +3202,57 @@ void Field_medium::sql_type(String &res) const ** long int ****************************************************************************/ -/* - A helper function to check whether the next character - in the string "s" is MINUS SIGN. -*/ -#ifdef HAVE_CHARSET_ucs2 -static bool test_if_minus(CHARSET_INFO *cs, - const char *s, const char *e) -{ - my_wc_t wc; - return cs->cset->mb_wc(cs, &wc, (uchar*) s, (uchar*) e) > 0 && wc == '-'; -} -#else -/* - If not UCS2 support is compiled then it is easier -*/ -#define test_if_minus(cs, s, e) (*s == '-') -#endif - - int Field_long::store(const char *from,uint len,CHARSET_INFO *cs) { - long tmp; - int error= 0; + long store_tmp; + int error; char *end; - - tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); - len-= tmp; - from+= tmp; - my_errno=0; if (unsigned_flag) { - if (!len || test_if_minus(cs, from, from + len)) + ulonglong tmp= cs->cset->strntoull10rnd(cs, from, len, 1, &end, &error); + if (error == MY_ERRNO_ERANGE || tmp > (ulonglong) UINT_MAX32) { - tmp=0; // Set negative to 0 - my_errno=ERANGE; + set_if_smaller(tmp, (ulonglong) UINT_MAX32); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; else - tmp=(long) my_strntoul(cs,from,len,10,&end,&error); + error= 0; + store_tmp= (long) tmp; } else - tmp=my_strntol(cs,from,len,10,&end,&error); - if (error || - (from+len != end && table->in_use->count_cuted_fields && - !test_if_int(from,len,end,cs))) - { - if (error != 1) - error= 2; - } -#if SIZEOF_LONG > 4 - if (unsigned_flag) { - if ((ulong) tmp > UINT_MAX32) + longlong tmp= cs->cset->strntoull10rnd(cs, from, len, 0, &end, &error); + if (tmp < INT_MIN32) { - tmp= UINT_MAX32; + tmp= INT_MIN32; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; - my_errno=ERANGE; } - } - else - { - if (tmp > INT_MAX32) + else if (tmp > INT_MAX32) { - tmp= INT_MAX32; + tmp=INT_MAX32; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; - my_errno=ERANGE; } - else if (tmp < INT_MIN32) - { - tmp= INT_MIN32; + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - my_errno=ERANGE; - } + else + error= 0; + store_tmp= (long) tmp; } -#endif - if (error) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { - int4store(ptr,tmp); + int4store(ptr, store_tmp); } else #endif - longstore(ptr,tmp); + longstore(ptr, store_tmp); return error; } @@ -2537,7 +3267,6 @@ int Field_long::store(double nr) if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) UINT_MAX32) @@ -2554,20 +3283,21 @@ int Field_long::store(double nr) if (nr < (double) INT_MIN32) { res=(int32) INT_MIN32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) INT_MAX32) { res=(int32) INT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else - res=(int32) nr; + res=(int32) (longlong) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int4store(ptr,res); } @@ -2578,30 +3308,22 @@ int Field_long::store(double nr) } -int Field_long::store(longlong nr) +int Field_long::store(longlong nr, bool unsigned_val) { int error= 0; int32 res; - - /* - This assert has nothing to do with this method per se, it was put here - only because it is one of the best places for catching places there its - condition is broken. - */ - DBUG_ASSERT(table->in_use == current_thd); + DBUG_ASSERT(table->in_use == current_thd); // General safety if (unsigned_flag) { - if (nr < 0) + if (nr < 0 && !unsigned_val) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr >= (LL(1) << 32)) + else if ((ulonglong) nr >= (LL(1) << 32)) { res=(int32) (uint32) ~0L; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -2609,23 +3331,26 @@ int Field_long::store(longlong nr) } else { - if (nr < (longlong) INT_MIN32) + if (nr < 0 && unsigned_val) + nr= ((longlong) INT_MAX32) + 1; // Generate overflow + if (nr < (longlong) INT_MIN32) { res=(int32) INT_MIN32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (longlong) INT_MAX32) { res=(int32) INT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else res=(int32) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int4store(ptr,res); } @@ -2640,7 +3365,7 @@ double Field_long::val_real(void) { int32 j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint4korr(ptr); else #endif @@ -2654,7 +3379,7 @@ longlong Field_long::val_int(void) /* See the comment in Field_long::store(long long) */ DBUG_ASSERT(table->in_use == current_thd); #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint4korr(ptr); else #endif @@ -2672,7 +3397,7 @@ String *Field_long::val_str(String *val_buffer, char *to=(char*) val_buffer->ptr(); int32 j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint4korr(ptr); else #endif @@ -2698,7 +3423,7 @@ int Field_long::cmp(const char *a_ptr, const char *b_ptr) { int32 a,b; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { a=sint4korr(a_ptr); b=sint4korr(b_ptr); @@ -2717,7 +3442,7 @@ int Field_long::cmp(const char *a_ptr, const char *b_ptr) void Field_long::sort_string(char *to,uint length __attribute__((unused))) { #ifdef WORDS_BIGENDIAN - if (!table->db_low_byte_first) + if (!table->s->db_low_byte_first) { if (unsigned_flag) to[0] = ptr[0]; @@ -2755,39 +3480,22 @@ void Field_long::sql_type(String &res) const int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) { - longlong tmp; - int error= 0; + int error; char *end; - - tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); - len-= (uint)tmp; - from+= tmp; - my_errno=0; - if (unsigned_flag) + ulonglong tmp; + + tmp= cs->cset->strntoull10rnd(cs,from,len,unsigned_flag,&end,&error); + if (error == MY_ERRNO_ERANGE) { - if (!len || test_if_minus(cs, from, from + len)) - { - tmp=0; // Set negative to 0 - my_errno= ERANGE; - error= 1; - } - else - tmp=(longlong) my_strntoull(cs,from,len,10,&end,&error); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; } + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; else - tmp=my_strntoll(cs,from,len,10,&end,&error); - if (error || - (from+len != end && table->in_use->count_cuted_fields && - !test_if_int(from,len,end,cs))) - { - if (error != 1) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); - error= 2; - } - } + error= 0; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int8store(ptr,tmp); } @@ -2802,19 +3510,18 @@ int Field_longlong::store(double nr) { int error= 0; longlong res; - nr=rint(nr); + + nr= rint(nr); if (unsigned_flag) { if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr >= (double) ~ (ulonglong) 0) + else if (nr >= (double) ULONGLONG_MAX) { res= ~(longlong) 0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -2824,21 +3531,22 @@ int Field_longlong::store(double nr) { if (nr <= (double) LONGLONG_MIN) { - res=(longlong) LONGLONG_MIN; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; + res= LONGLONG_MIN; + error= (nr < (double) LONGLONG_MIN); } else if (nr >= (double) (ulonglong) LONGLONG_MAX) { - res=(longlong) LONGLONG_MAX; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; + res= LONGLONG_MAX; + error= (nr > (double) LONGLONG_MAX); } else res=(longlong) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int8store(ptr,res); } @@ -2849,17 +3557,33 @@ int Field_longlong::store(double nr) } -int Field_longlong::store(longlong nr) +int Field_longlong::store(longlong nr, bool unsigned_val) { + int error= 0; + + if (nr < 0) // Only possible error + { + /* + if field is unsigned and value is signed (< 0) or + if field is signed and value is unsigned we have an overflow + */ + if (unsigned_flag != unsigned_val) + { + nr= unsigned_flag ? (ulonglong) 0 : (ulonglong) LONGLONG_MAX; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + error= 1; + } + } + #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int8store(ptr,nr); } else #endif longlongstore(ptr,nr); - return 0; + return error; } @@ -2867,7 +3591,7 @@ double Field_longlong::val_real(void) { longlong j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { j=sint8korr(ptr); } @@ -2888,7 +3612,7 @@ longlong Field_longlong::val_int(void) { longlong j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint8korr(ptr); else #endif @@ -2907,7 +3631,7 @@ String *Field_longlong::val_str(String *val_buffer, char *to=(char*) val_buffer->ptr(); longlong j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) j=sint8korr(ptr); else #endif @@ -2932,7 +3656,7 @@ int Field_longlong::cmp(const char *a_ptr, const char *b_ptr) { longlong a,b; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { a=sint8korr(a_ptr); b=sint8korr(b_ptr); @@ -2952,7 +3676,7 @@ int Field_longlong::cmp(const char *a_ptr, const char *b_ptr) void Field_longlong::sort_string(char *to,uint length __attribute__((unused))) { #ifdef WORDS_BIGENDIAN - if (!table->db_low_byte_first) + if (!table->s->db_low_byte_first) { if (unsigned_flag) to[0] = ptr[0]; @@ -3002,10 +3726,12 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs) int error; char *end; double nr= my_strntod(cs,(char*) from,len,&end,&error); - if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) + if (error || (!len || (uint) (end-from) != len && + table->in_use->count_cuted_fields)) { - error= 2; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + (error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1); + error= error ? 1 : 2; } Field_float::store(nr); return error; @@ -3065,7 +3791,7 @@ int Field_float::store(double nr) } #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float4store(ptr,j); } @@ -3076,9 +3802,9 @@ int Field_float::store(double nr) } -int Field_float::store(longlong nr) +int Field_float::store(longlong nr, bool unsigned_val) { - return store((double)nr); + return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr); } @@ -3086,7 +3812,7 @@ double Field_float::val_real(void) { float j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float4get(j,ptr); } @@ -3100,14 +3826,14 @@ longlong Field_float::val_int(void) { float j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float4get(j,ptr); } else #endif memcpy_fixed((byte*) &j,ptr,sizeof(j)); - return ((longlong) j); + return (longlong) rint(j); } @@ -3116,7 +3842,7 @@ String *Field_float::val_str(String *val_buffer, { float nr; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float4get(nr,ptr); } @@ -3198,7 +3924,7 @@ int Field_float::cmp(const char *a_ptr, const char *b_ptr) { float a,b; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float4get(a,a_ptr); float4get(b,b_ptr); @@ -3218,7 +3944,7 @@ void Field_float::sort_string(char *to,uint length __attribute__((unused))) { float nr; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float4get(nr,ptr); } @@ -3267,7 +3993,7 @@ void Field_float::sql_type(String &res) const { if (dec == NOT_FIXED_DEC) { - res.set_ascii("float", 5); + res.set_ascii(STRING_WITH_LEN("float")); } else { @@ -3288,10 +4014,12 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs) int error; char *end; double nr= my_strntod(cs,(char*) from, len, &end, &error); - if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) + if (error || (!len || (uint) (end-from) != len && + table->in_use->count_cuted_fields)) { - error= 2; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + (error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1); + error= error ? 1 : 2; } Field_double::store(nr); return error; @@ -3344,7 +4072,7 @@ int Field_double::store(double nr) } #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float8store(ptr,nr); } @@ -3355,17 +4083,24 @@ int Field_double::store(double nr) } -int Field_double::store(longlong nr) +int Field_double::store(longlong nr, bool unsigned_val) { - return store((double)nr); + return store(unsigned_val ? ulonglong2double((ulonglong) nr) : (double) nr); } +int Field_real::store_decimal(const my_decimal *dm) +{ + double dbl; + my_decimal2double(E_DEC_FATAL_ERROR, dm, &dbl); + return store(dbl); +} + double Field_double::val_real(void) { double j; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float8get(j,ptr); } @@ -3378,8 +4113,9 @@ double Field_double::val_real(void) longlong Field_double::val_int(void) { double j; + longlong res; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float8get(j,ptr); } @@ -3388,10 +4124,35 @@ longlong Field_double::val_int(void) doubleget(j,ptr); /* Check whether we fit into longlong range */ if (j <= (double) LONGLONG_MIN) - return (longlong) LONGLONG_MIN; + { + res= (longlong) LONGLONG_MIN; + goto warn; + } if (j >= (double) (ulonglong) LONGLONG_MAX) - return (longlong) LONGLONG_MAX; - return ((longlong) j); + { + res= (longlong) LONGLONG_MAX; + goto warn; + } + return (longlong) rint(j); + +warn: + { + char buf[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE]; + String tmp(buf, sizeof(buf), &my_charset_latin1), *str; + str= val_str(&tmp, 0); + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", + str->c_ptr()); + } + return res; +} + + +my_decimal *Field_real::val_decimal(my_decimal *decimal_value) +{ + double2my_decimal(E_DEC_FATAL_ERROR, val_real(), decimal_value); + return decimal_value; } @@ -3400,7 +4161,7 @@ String *Field_double::val_str(String *val_buffer, { double nr; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float8get(nr,ptr); } @@ -3488,7 +4249,7 @@ int Field_double::cmp(const char *a_ptr, const char *b_ptr) { double a,b; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float8get(a,a_ptr); float8get(b,b_ptr); @@ -3511,7 +4272,7 @@ void Field_double::sort_string(char *to,uint length __attribute__((unused))) { double nr; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { float8get(nr,ptr); } @@ -3527,7 +4288,7 @@ void Field_double::sql_type(String &res) const CHARSET_INFO *cs=res.charset(); if (dec == NOT_FIXED_DEC) { - res.set_ascii("double",6); + res.set_ascii(STRING_WITH_LEN("double")); } else { @@ -3562,7 +4323,7 @@ void Field_double::sql_type(String &res) const TIMESTAMP_OLD_FIELD - old timestamp, if there was not any fields with auto-set-on-update (or now() as default) in this table before, then this field has NOW() as default and is updated when row changes, else it is - field which has 0 as default value and is not automaitcally updated. + field which has 0 as default value and is not automatically updated. TIMESTAMP_DN_FIELD - field with NOW() as default but not set on update automatically (TIMESTAMP DEFAULT NOW()) TIMESTAMP_UN_FIELD - field which is set on update automatically but has not @@ -3604,6 +4365,24 @@ Field_timestamp::Field_timestamp(char *ptr_arg, uint32 len_arg, } +Field_timestamp::Field_timestamp(bool maybe_null_arg, + const char *field_name_arg, + struct st_table *table_arg, CHARSET_INFO *cs) + :Field_str((char*) 0, 19, maybe_null_arg ? (uchar*) "": 0, 0, + NONE, field_name_arg, table_arg, cs) +{ + /* For 4.0 MYD and 4.0 InnoDB compatibility */ + flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; + if (table && !table->timestamp_field && + unireg_check != NONE) + { + /* This timestamp has auto-update */ + table->timestamp_field= this; + flags|=TIMESTAMP_FLAG; + } +} + + /* Get auto-set type for TIMESTAMP field. @@ -3624,7 +4403,7 @@ timestamp_auto_set_type Field_timestamp::get_auto_set_type() const return TIMESTAMP_AUTO_SET_ON_UPDATE; case TIMESTAMP_OLD_FIELD: /* - Altough we can have several such columns in legacy tables this + Although we can have several such columns in legacy tables this function should be called only for first of them (i.e. the one having auto-set property). */ @@ -3649,39 +4428,44 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) my_time_t tmp= 0; int error; bool have_smth_to_conv; - bool in_dst_time_gap; - THD *thd= table->in_use; + my_bool in_dst_time_gap; + THD *thd= table ? table->in_use : current_thd; - have_smth_to_conv= (str_to_datetime(from, len, &l_time, 0, &error) > + /* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */ + have_smth_to_conv= (str_to_datetime(from, len, &l_time, + (thd->variables.sql_mode & + MODE_NO_ZERO_DATE) | + MODE_NO_ZERO_IN_DATE, &error) > MYSQL_TIMESTAMP_ERROR); - - if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + + if (error || !have_smth_to_conv) + { + error= 1; + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_DATETIME, 1); + } - if (have_smth_to_conv) + /* Only convert a correct date (not a zero date) */ + if (have_smth_to_conv && l_time.month) { if (!(tmp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap))) { - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, from, len, MYSQL_TIMESTAMP_DATETIME, !error); - error= 1; } else if (in_dst_time_gap) { set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_INVALID_TIMESTAMP, + ER_WARN_INVALID_TIMESTAMP, from, len, MYSQL_TIMESTAMP_DATETIME, !error); error= 1; } } - if (error > 1) - error= 2; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { int4store(ptr,tmp); } @@ -3691,62 +4475,70 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) return error; } + int Field_timestamp::store(double nr) { int error= 0; if (nr < 0 || nr > 99991231235959.0) { - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_OUT_OF_RANGE, + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_DATETIME); nr= 0; // Avoid overflow on buff error= 1; } - error|= Field_timestamp::store((longlong) rint(nr)); + error|= Field_timestamp::store((longlong) rint(nr), FALSE); return error; } -int Field_timestamp::store(longlong nr) +int Field_timestamp::store(longlong nr, bool unsigned_val) { TIME l_time; my_time_t timestamp= 0; int error; - bool in_dst_time_gap; - THD *thd= table->in_use; + my_bool in_dst_time_gap; + THD *thd= table ? table->in_use : current_thd; + + /* We don't want to store invalid or fuzzy datetime values in TIMESTAMP */ + longlong tmp= number_to_datetime(nr, &l_time, (thd->variables.sql_mode & + MODE_NO_ZERO_DATE) | + MODE_NO_ZERO_IN_DATE, &error); + if (tmp == LL(-1)) + { + error= 2; + } - if (number_to_TIME(nr, &l_time, 0, &error)) + if (!error && tmp) { if (!(timestamp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap))) { - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_OUT_OF_RANGE, - nr, MYSQL_TIMESTAMP_DATETIME, 1); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, + nr, MYSQL_TIMESTAMP_DATETIME, 1); error= 1; } - if (in_dst_time_gap) { set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_INVALID_TIMESTAMP, - nr, MYSQL_TIMESTAMP_DATETIME, !error); + ER_WARN_INVALID_TIMESTAMP, + nr, MYSQL_TIMESTAMP_DATETIME, 1); error= 1; } - } - else if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, + } else if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, nr, MYSQL_TIMESTAMP_DATETIME, 1); #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { int4store(ptr,timestamp); } else #endif longstore(ptr,(uint32) timestamp); - + return error; } @@ -3760,10 +4552,10 @@ longlong Field_timestamp::val_int(void) { uint32 temp; TIME time_tmp; - THD *thd= table->in_use; + THD *thd= table ? table->in_use : current_thd; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) temp=uint4korr(ptr); else #endif @@ -3785,7 +4577,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr) { uint32 temp, temp2; TIME time_tmp; - THD *thd= table->in_use; + THD *thd= table ? table->in_use : current_thd; char *to; val_buffer->alloc(field_length+1); @@ -3793,7 +4585,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr) val_buffer->length(field_length); #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) temp=uint4korr(ptr); else #endif @@ -3801,7 +4593,7 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr) if (temp == 0L) { /* Zero time is "000000" */ - val_ptr->set("0000-00-00 00:00:00", 19, &my_charset_bin); + val_ptr->set(STRING_WITH_LEN("0000-00-00 00:00:00"), &my_charset_bin); return val_ptr; } val_buffer->set_charset(&my_charset_bin); // Safety @@ -3856,16 +4648,16 @@ String *Field_timestamp::val_str(String *val_buffer, String *val_ptr) bool Field_timestamp::get_date(TIME *ltime, uint fuzzydate) { long temp; - THD *thd= table->in_use; + THD *thd= table ? table->in_use : current_thd; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) temp=uint4korr(ptr); else #endif longget(temp,ptr); if (temp == 0L) { /* Zero time is "000000" */ - if (!fuzzydate) + if (fuzzydate & TIME_NO_ZERO_DATE) return 1; bzero((char*) ltime,sizeof(*ltime)); } @@ -3886,7 +4678,7 @@ bool Field_timestamp::get_time(TIME *ltime) bool Field_timestamp::send_binary(Protocol *protocol) { TIME tm; - Field_timestamp::get_date(&tm, TIME_FUZZY_DATE); + Field_timestamp::get_date(&tm, 0); return protocol->store(&tm); } @@ -3895,7 +4687,7 @@ int Field_timestamp::cmp(const char *a_ptr, const char *b_ptr) { int32 a,b; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { a=sint4korr(a_ptr); b=sint4korr(b_ptr); @@ -3913,7 +4705,7 @@ int Field_timestamp::cmp(const char *a_ptr, const char *b_ptr) void Field_timestamp::sort_string(char *to,uint length __attribute__((unused))) { #ifdef WORDS_BIGENDIAN - if (!table || !table->db_low_byte_first) + if (!table || !table->s->db_low_byte_first) { to[0] = ptr[0]; to[1] = ptr[1]; @@ -3933,16 +4725,17 @@ void Field_timestamp::sort_string(char *to,uint length __attribute__((unused))) void Field_timestamp::sql_type(String &res) const { - res.set_ascii("timestamp", 9); + res.set_ascii(STRING_WITH_LEN("timestamp")); } void Field_timestamp::set_time() { - long tmp= (long) table->in_use->query_start(); + THD *thd= table ? table->in_use : current_thd; + long tmp= (long) thd->query_start(); set_notnull(); #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { int4store(ptr,tmp); } @@ -3969,14 +4762,14 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) { tmp=0L; error= 2; - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_TIME, 1); } else { if (warning & MYSQL_TIME_WARN_TRUNCATED) set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, + WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_TIME, 1); if (warning & MYSQL_TIME_WARN_OUT_OF_RANGE) { @@ -3999,6 +4792,16 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) } +int Field_time::store_time(TIME *ltime, timestamp_type time_type) +{ + long tmp= ((ltime->month ? 0 : ltime->day * 24L) + ltime->hour) * 10000L + + (ltime->minute * 100 + ltime->second); + if (ltime->neg) + tmp= -tmp; + return Field_time::store((longlong) tmp, FALSE); +} + + int Field_time::store(double nr) { long tmp; @@ -4036,21 +4839,21 @@ int Field_time::store(double nr) } -int Field_time::store(longlong nr) +int Field_time::store(longlong nr, bool unsigned_val) { long tmp; int error= 0; - if (nr > (longlong) TIME_MAX_VALUE) + if (nr < (longlong) -TIME_MAX_VALUE && !unsigned_val) { - tmp= TIME_MAX_VALUE; + tmp= -TIME_MAX_VALUE; set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_TIME, 1); error= 1; } - else if (nr < (longlong) -TIME_MAX_VALUE) + else if (nr > (longlong) TIME_MAX_VALUE || nr < 0 && unsigned_val) { - tmp= -TIME_MAX_VALUE; + tmp= TIME_MAX_VALUE; set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_TIME, 1); @@ -4112,7 +4915,7 @@ String *Field_time::val_str(String *val_buffer, /* - Normally we would not consider 'time' as a vaild date, but we allow + Normally we would not consider 'time' as a valid date, but we allow get_date() here to be able to do things like DATE_FORMAT(time, "%l.%i %p") */ @@ -4120,12 +4923,13 @@ String *Field_time::val_str(String *val_buffer, bool Field_time::get_date(TIME *ltime, uint fuzzydate) { long tmp; - if (!fuzzydate) + THD *thd= table ? table->in_use : current_thd; + if (!(fuzzydate & TIME_FUZZY_DATE)) { - push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, ER(ER_WARN_DATA_OUT_OF_RANGE), field_name, - table->in_use->row_count); + thd->row_count); return 1; } tmp=(long) sint3korr(ptr); @@ -4191,7 +4995,7 @@ void Field_time::sort_string(char *to,uint length __attribute__((unused))) void Field_time::sql_type(String &res) const { - res.set_ascii("time", 4); + res.set_ascii(STRING_WITH_LEN("time")); } /**************************************************************************** @@ -4202,26 +5006,19 @@ void Field_time::sql_type(String &res) const int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) { - int err; char *end; - long nr= my_strntol(cs, from, len, 10, &end, &err); - - if (err) - { - if (table->in_use->count_cuted_fields) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); - *ptr= 0; - return 0; - } + int error; + long nr= my_strntol(cs, from, len, 10, &end, &error); - if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) + if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155 || error) { *ptr=0; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } - if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; + if (nr != 0 || len != 4) { if (nr < YY_PART_YEAR) @@ -4230,7 +5027,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) nr-= 1900; } *ptr= (char) (unsigned char) nr; - return 0; + return error; } @@ -4238,18 +5035,18 @@ int Field_year::store(double nr) { if (nr < 0.0 || nr >= 2155.0) { - (void) Field_year::store((longlong) -1); + (void) Field_year::store((longlong) -1, FALSE); return 1; } - else - return Field_year::store((longlong) nr); + return Field_year::store((longlong) nr, FALSE); } -int Field_year::store(longlong nr) + +int Field_year::store(longlong nr, bool unsigned_val) { if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) { - *ptr=0; + *ptr= 0; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } @@ -4264,17 +5061,20 @@ int Field_year::store(longlong nr) return 0; } + bool Field_year::send_binary(Protocol *protocol) { ulonglong tmp= Field_year::val_int(); return protocol->store_short(tmp); } + double Field_year::val_real(void) { return (double) Field_year::val_int(); } + longlong Field_year::val_int(void) { int tmp= (int) ((uchar*) ptr)[0]; @@ -4285,6 +5085,7 @@ longlong Field_year::val_int(void) return (longlong) tmp; } + String *Field_year::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { @@ -4295,6 +5096,7 @@ String *Field_year::val_str(String *val_buffer, return val_buffer; } + void Field_year::sql_type(String &res) const { CHARSET_INFO *cs=res.charset(); @@ -4315,21 +5117,26 @@ int Field_date::store(const char *from, uint len,CHARSET_INFO *cs) TIME l_time; uint32 tmp; int error; - - if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) + THD *thd= table ? table->in_use : current_thd; + + if (str_to_datetime(from, len, &l_time, TIME_FUZZY_DATE | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES)), + &error) <= MYSQL_TIMESTAMP_ERROR) { - tmp=0; + tmp= 0; error= 2; } else tmp=(uint32) l_time.year*10000L + (uint32) (l_time.month*100+l_time.day); if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_DATE, 1); #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { int4store(ptr,tmp); } @@ -4342,56 +5149,61 @@ int Field_date::store(const char *from, uint len,CHARSET_INFO *cs) int Field_date::store(double nr) { - long tmp; + longlong tmp; int error= 0; if (nr >= 19000000000000.0 && nr <= 99991231235959.0) nr=floor(nr/1000000.0); // Timestamp to date if (nr < 0.0 || nr > 99991231.0) { - tmp=0L; - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_OUT_OF_RANGE, + tmp= LL(0); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_DATE); error= 1; } else - tmp=(long) rint(nr); -#ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) - { - int4store(ptr,tmp); - } - else -#endif - longstore(ptr,tmp); - return error; + tmp= (longlong) rint(nr); + + return Field_date::store(tmp, TRUE); } -int Field_date::store(longlong nr) +int Field_date::store(longlong nr, bool unsigned_val) { - long tmp; - int error= 0; - if (nr >= LL(19000000000000) && nr < LL(99991231235959)) - nr=nr/LL(1000000); // Timestamp to date - if (nr < 0 || nr > LL(99991231)) + TIME not_used; + int error; + longlong initial_nr= nr; + THD *thd= table ? table->in_use : current_thd; + + nr= number_to_datetime(nr, ¬_used, (TIME_FUZZY_DATE | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | + MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error); + + if (nr == LL(-1)) { - tmp=0L; - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_OUT_OF_RANGE, - nr, MYSQL_TIMESTAMP_DATE, 0); - error= 1; + nr= 0; + error= 2; } - else - tmp=(long) nr; + + if (nr >= 19000000000000.0 && nr <= 99991231235959.0) + nr= (longlong) floor(nr/1000000.0); // Timestamp to date + + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + error == 2 ? ER_WARN_DATA_OUT_OF_RANGE : + WARN_DATA_TRUNCATED, initial_nr, + MYSQL_TIMESTAMP_DATETIME, 1); + #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { - int4store(ptr,tmp); + int4store(ptr, nr); } else #endif - longstore(ptr,tmp); + longstore(ptr, nr); return error; } @@ -4411,7 +5223,7 @@ double Field_date::val_real(void) { int32 j; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) j=sint4korr(ptr); else #endif @@ -4419,11 +5231,12 @@ double Field_date::val_real(void) return (double) (uint32) j; } + longlong Field_date::val_int(void) { int32 j; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) j=sint4korr(ptr); else #endif @@ -4431,6 +5244,7 @@ longlong Field_date::val_int(void) return (longlong) (uint32) j; } + String *Field_date::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { @@ -4438,7 +5252,7 @@ String *Field_date::val_str(String *val_buffer, val_buffer->alloc(field_length); int32 tmp; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) tmp=sint4korr(ptr); else #endif @@ -4456,7 +5270,7 @@ int Field_date::cmp(const char *a_ptr, const char *b_ptr) { int32 a,b; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { a=sint4korr(a_ptr); b=sint4korr(b_ptr); @@ -4474,7 +5288,7 @@ int Field_date::cmp(const char *a_ptr, const char *b_ptr) void Field_date::sort_string(char *to,uint length __attribute__((unused))) { #ifdef WORDS_BIGENDIAN - if (!table || !table->db_low_byte_first) + if (!table || !table->s->db_low_byte_first) { to[0] = ptr[0]; to[1] = ptr[1]; @@ -4493,9 +5307,10 @@ void Field_date::sort_string(char *to,uint length __attribute__((unused))) void Field_date::sql_type(String &res) const { - res.set_ascii("date", 4); + res.set_ascii(STRING_WITH_LEN("date")); } + /**************************************************************************** ** The new date type ** This is identical to the old date type, but stored on 3 bytes instead of 4 @@ -4505,92 +5320,108 @@ void Field_date::sql_type(String &res) const int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) { TIME l_time; - long tmp; int error; - if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) - { - tmp=0L; + THD *thd= table ? table->in_use : current_thd; + enum enum_mysql_timestamp_type ret; + if ((ret= str_to_datetime(from, len, &l_time, + (TIME_FUZZY_DATE | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error)) <= MYSQL_TIMESTAMP_ERROR) + { + int3store(ptr,0L); error= 2; } else - tmp= l_time.day + l_time.month*32 + l_time.year*16*32; + { + int3store(ptr, l_time.day + l_time.month*32 + l_time.year*16*32); + if(!error && (ret != MYSQL_TIMESTAMP_DATE)) + return 2; + } if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_DATE, 1); - - int3store(ptr,tmp); + return error; } + int Field_newdate::store(double nr) { if (nr < 0.0 || nr > 99991231235959.0) { - (void) Field_newdate::store((longlong) -1); - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, nr, MYSQL_TIMESTAMP_DATE); + int3store(ptr,(int32) 0); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, nr, MYSQL_TIMESTAMP_DATE); return 1; } - else - return Field_newdate::store((longlong) rint(nr)); + return Field_newdate::store((longlong) rint(nr), FALSE); } -int Field_newdate::store(longlong nr) +int Field_newdate::store(longlong nr, bool unsigned_val) { - int32 tmp; - int error= 0; - if (nr >= LL(100000000) && nr <= LL(99991231235959)) - nr=nr/LL(1000000); // Timestamp to date - if (nr < 0L || nr > 99991231L) - { - tmp=0; - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_OUT_OF_RANGE, nr, - MYSQL_TIMESTAMP_DATE, 1); - error= 1; + TIME l_time; + longlong tmp; + int error; + THD *thd= table ? table->in_use : current_thd; + if (number_to_datetime(nr, &l_time, + (TIME_FUZZY_DATE | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error) == LL(-1)) + { + tmp= 0L; + error= 2; } else - { - tmp=(int32) nr; - if (tmp) - { - if (tmp < YY_PART_YEAR*10000L) // Fix short dates - tmp+= (uint32) 20000000L; - else if (tmp < 999999L) - tmp+= (uint32) 19000000L; - } - uint month= (uint) ((tmp/100) % 100); - uint day= (uint) (tmp%100); - if (month > 12 || day > 31) - { - tmp=0L; // Don't allow date to change - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_OUT_OF_RANGE, nr, - MYSQL_TIMESTAMP_DATE, 1); - error= 1; - } - else - tmp= day + month*32 + (tmp/10000)*16*32; - } - int3store(ptr,(int32) tmp); + tmp= l_time.day + l_time.month*32 + l_time.year*16*32; + + if (error) + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + error == 2 ? ER_WARN_DATA_OUT_OF_RANGE : + WARN_DATA_TRUNCATED,nr,MYSQL_TIMESTAMP_DATE, 1); + + int3store(ptr,tmp); return error; } -void Field_newdate::store_time(TIME *ltime,timestamp_type type) + +int Field_newdate::store_time(TIME *ltime, timestamp_type time_type) { long tmp; - if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) + int error= 0; + if (time_type == MYSQL_TIMESTAMP_DATE || + time_type == MYSQL_TIMESTAMP_DATETIME) + { tmp=ltime->year*16*32+ltime->month*32+ltime->day; + if (check_date(ltime, tmp != 0, + (TIME_FUZZY_DATE | + (current_thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error)) + { + char buff[12]; + String str(buff, sizeof(buff), &my_charset_latin1); + make_date((DATE_TIME_FORMAT *) 0, ltime, &str); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + str.ptr(), str.length(), MYSQL_TIMESTAMP_DATE, 1); + } + } else { tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } int3store(ptr,tmp); + return error; } + bool Field_newdate::send_binary(Protocol *protocol) { TIME tm; @@ -4598,11 +5429,13 @@ bool Field_newdate::send_binary(Protocol *protocol) return protocol->store_date(&tm); } + double Field_newdate::val_real(void) { return (double) Field_newdate::val_int(); } + longlong Field_newdate::val_int(void) { ulong j= uint3korr(ptr); @@ -4610,6 +5443,7 @@ longlong Field_newdate::val_int(void) return (longlong) j; } + String *Field_newdate::val_str(String *val_buffer, String *val_ptr __attribute__((unused))) { @@ -4637,24 +5471,26 @@ String *Field_newdate::val_str(String *val_buffer, return val_buffer; } + bool Field_newdate::get_date(TIME *ltime,uint fuzzydate) { - if (is_null()) - return 1; uint32 tmp=(uint32) uint3korr(ptr); ltime->day= tmp & 31; ltime->month= (tmp >> 5) & 15; ltime->year= (tmp >> 9); ltime->time_type= MYSQL_TIMESTAMP_DATE; ltime->hour= ltime->minute= ltime->second= ltime->second_part= ltime->neg= 0; - return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0; + return ((!(fuzzydate & TIME_FUZZY_DATE) && (!ltime->month || !ltime->day)) ? + 1 : 0); } + bool Field_newdate::get_time(TIME *ltime) { return Field_newdate::get_date(ltime,0); } + int Field_newdate::cmp(const char *a_ptr, const char *b_ptr) { uint32 a,b; @@ -4663,6 +5499,7 @@ int Field_newdate::cmp(const char *a_ptr, const char *b_ptr) return (a < b) ? -1 : (a > b) ? 1 : 0; } + void Field_newdate::sort_string(char *to,uint length __attribute__((unused))) { to[0] = ptr[2]; @@ -4670,9 +5507,10 @@ void Field_newdate::sort_string(char *to,uint length __attribute__((unused))) to[2] = ptr[0]; } + void Field_newdate::sql_type(String &res) const { - res.set_ascii("date", 4); + res.set_ascii(STRING_WITH_LEN("date")); } @@ -4688,17 +5526,27 @@ int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs) TIME time_tmp; int error; ulonglong tmp= 0; - - if (str_to_datetime(from, len, &time_tmp, 1, &error) > MYSQL_TIMESTAMP_ERROR) + enum enum_mysql_timestamp_type func_res; + THD *thd= table ? table->in_use : current_thd; + + func_res= str_to_datetime(from, len, &time_tmp, + (TIME_FUZZY_DATE | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error); + if ((int) func_res > (int) MYSQL_TIMESTAMP_ERROR) tmp= TIME_to_ulonglong_datetime(&time_tmp); - + else + error= 1; // Fix if invalid zero date + if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, from, len, MYSQL_TIMESTAMP_DATETIME, 1); #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { int8store(ptr,tmp); } @@ -4717,29 +5565,41 @@ int Field_datetime::store(double nr) set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, nr, MYSQL_TIMESTAMP_DATETIME); - nr=0.0; + nr= 0.0; error= 1; } - error |= Field_datetime::store((longlong) rint(nr)); + error|= Field_datetime::store((longlong) rint(nr), FALSE); return error; } -int Field_datetime::store(longlong nr) +int Field_datetime::store(longlong nr, bool unsigned_val) { TIME not_used; int error; longlong initial_nr= nr; - - nr= number_to_TIME(nr, ¬_used, 1, &error); + THD *thd= table ? table->in_use : current_thd; + + nr= number_to_datetime(nr, ¬_used, (TIME_FUZZY_DATE | + (thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | + MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error); + + if (nr == LL(-1)) + { + nr= 0; + error= 2; + } if (error) - set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, initial_nr, + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + error == 2 ? ER_WARN_DATA_OUT_OF_RANGE : + WARN_DATA_TRUNCATED, initial_nr, MYSQL_TIMESTAMP_DATETIME, 1); #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { int8store(ptr,nr); } @@ -4750,29 +5610,47 @@ int Field_datetime::store(longlong nr) } -void Field_datetime::store_time(TIME *ltime,timestamp_type type) +int Field_datetime::store_time(TIME *ltime,timestamp_type time_type) { longlong tmp; + int error= 0; /* We don't perform range checking here since values stored in TIME structure always fit into DATETIME range. */ - if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) + if (time_type == MYSQL_TIMESTAMP_DATE || + time_type == MYSQL_TIMESTAMP_DATETIME) + { tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+ (ltime->hour*10000L+ltime->minute*100+ltime->second)); + if (check_date(ltime, tmp != 0, + (TIME_FUZZY_DATE | + (current_thd->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), &error)) + { + char buff[19]; + String str(buff, sizeof(buff), &my_charset_latin1); + make_datetime((DATE_TIME_FORMAT *) 0, ltime, &str); + set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, + str.ptr(), str.length(), MYSQL_TIMESTAMP_DATETIME,1); + } + } else { tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + error= 1; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { int8store(ptr,tmp); } else #endif longlongstore(ptr,tmp); + return error; } bool Field_datetime::send_binary(Protocol *protocol) @@ -4792,7 +5670,7 @@ longlong Field_datetime::val_int(void) { longlong j; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) j=sint8korr(ptr); else #endif @@ -4812,14 +5690,14 @@ String *Field_datetime::val_str(String *val_buffer, int part3; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) tmp=sint8korr(ptr); else #endif longlongget(tmp,ptr); /* - Avoid problem with slow longlong aritmetic and sprintf + Avoid problem with slow longlong arithmetic and sprintf */ part1=(long) (tmp/LL(1000000)); @@ -4865,7 +5743,7 @@ bool Field_datetime::get_date(TIME *ltime, uint fuzzydate) ltime->day= (int) (part1%100); ltime->month= (int) (part1/100%100); ltime->year= (int) (part1/10000); - return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0; + return (!(fuzzydate & TIME_FUZZY_DATE) && (!ltime->month || !ltime->day)) ? 1 : 0; } bool Field_datetime::get_time(TIME *ltime) @@ -4877,7 +5755,7 @@ int Field_datetime::cmp(const char *a_ptr, const char *b_ptr) { longlong a,b; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) { a=sint8korr(a_ptr); b=sint8korr(b_ptr); @@ -4895,7 +5773,7 @@ int Field_datetime::cmp(const char *a_ptr, const char *b_ptr) void Field_datetime::sort_string(char *to,uint length __attribute__((unused))) { #ifdef WORDS_BIGENDIAN - if (!table || !table->db_low_byte_first) + if (!table || !table->s->db_low_byte_first) { to[0] = ptr[0]; to[1] = ptr[1]; @@ -4923,7 +5801,7 @@ void Field_datetime::sort_string(char *to,uint length __attribute__((unused))) void Field_datetime::sql_type(String &res) const { - res.set_ascii("datetime", 8); + res.set_ascii(STRING_WITH_LEN("datetime")); } /**************************************************************************** @@ -4931,56 +5809,172 @@ void Field_datetime::sql_type(String &res) const ** A string may be varchar or binary ****************************************************************************/ +/* + Report "not well formed" or "cannot convert" error + after storing a character string info a field. + + SYNOPSIS + check_string_copy_error() + field - Field + well_formed_error_pos - where not well formed data was first met + cannot_convert_error_pos - where a not-convertable character was first met + end - end of the string + + NOTES + As of version 5.0 both cases return the same error: + + "Invalid string value: 'xxx' for column 't' at row 1" + + Future versions will possibly introduce a new error message: + + "Cannot convert character string: 'xxx' for column 't' at row 1" + + RETURN + FALSE - If errors didn't happen + TRUE - If an error happened +*/ + +static bool +check_string_copy_error(Field_str *field, + const char *well_formed_error_pos, + const char *cannot_convert_error_pos, + const char *end) +{ + const char *pos, *end_orig; + char tmp[64], *t; + + if (!(pos= well_formed_error_pos) && + !(pos= cannot_convert_error_pos)) + return FALSE; + + end_orig= end; + set_if_smaller(end, pos + 6); + + for (t= tmp; pos < end; pos++) + { + if (((unsigned char) *pos) >= 0x20 && + ((unsigned char) *pos) <= 0x7F) + { + *t++= *pos; + } + else + { + *t++= '\\'; + *t++= 'x'; + *t++= _dig_vec_upper[((unsigned char) *pos) >> 4]; + *t++= _dig_vec_upper[((unsigned char) *pos) & 15]; + } + } + if (end_orig > end) + { + *t++= '.'; + *t++= '.'; + *t++= '.'; + } + *t= '\0'; + push_warning_printf(field->table->in_use, + field->table->in_use->abort_on_warning ? + MYSQL_ERROR::WARN_LEVEL_ERROR : + MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "string", tmp, field->field_name, + (ulong) field->table->in_use->row_count); + return TRUE; +} + + + +/* + Send a truncation warning or a truncation error + after storing a too long character string info a field. + + SYNOPSIS + report_data_too_long() + field - Field + + RETURN + N/A +*/ + +inline void +report_data_too_long(Field_str *field) +{ + if (field->table->in_use->abort_on_warning) + field->set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1); + else + field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); +} + + +/* + Test if the given string contains important data: + not spaces for character string, + or any data for binary string. + + SYNOPSIS + test_if_important_data() + cs Character set + str String to test + strend String end + + RETURN + FALSE - If string does not have important data + TRUE - If string has some important data +*/ + +static bool +test_if_important_data(CHARSET_INFO *cs, const char *str, const char *strend) +{ + if (cs != &my_charset_bin) + str+= cs->cset->scan(cs, str, strend, MY_SEQ_SPACES); + return (str < strend); +} + + /* Copy a string and fill with space */ int Field_string::store(const char *from,uint length,CHARSET_INFO *cs) { - int error= 0, well_formed_error; - uint32 not_used; - char buff[80]; - String tmpstr(buff,sizeof(buff), &my_charset_bin); uint copy_length; - + const char *well_formed_error_pos; + const char *cannot_convert_error_pos; + const char *from_end_pos; + /* See the comment for Field_long::store(long long) */ DBUG_ASSERT(table->in_use == current_thd); - - /* Convert character set if nesessary */ - if (String::needs_conversion(length, cs, field_charset, ¬_used)) - { - uint conv_errors; - tmpstr.copy(from, length, cs, field_charset, &conv_errors); - from= tmpstr.ptr(); - length= tmpstr.length(); - if (conv_errors) - error= 2; - } - /* - Make sure we don't break a multibyte sequence - as well as don't copy a malformed data. - */ - copy_length= field_charset->cset->well_formed_len(field_charset, - from,from+length, - field_length/ - field_charset->mbmaxlen, - &well_formed_error); - memcpy(ptr,from,copy_length); - if (copy_length < field_length) // Append spaces if shorter + copy_length= well_formed_copy_nchars(field_charset, + ptr, field_length, + cs, from, length, + field_length / field_charset->mbmaxlen, + &well_formed_error_pos, + &cannot_convert_error_pos, + &from_end_pos); + + /* Append spaces if the string was shorter than the field. */ + if (copy_length < field_length) field_charset->cset->fill(field_charset,ptr+copy_length, - field_length-copy_length,' '); - - if ((copy_length < length) && table->in_use->count_cuted_fields) - { // Check if we loosed some info - const char *end=from+length; - from+= copy_length; - from+= field_charset->cset->scan(field_charset, from, end, - MY_SEQ_SPACES); - if (from != end) - error= 2; + field_length-copy_length, + field_charset->pad_char); + + if (check_string_copy_error(this, well_formed_error_pos, + cannot_convert_error_pos, from + length)) + return 2; + + /* + Check if we lost any important data (anything in a binary string, + or any non-space in others). + */ + if ((from_end_pos < from + length) && table->in_use->count_cuted_fields) + { + if (test_if_important_data(field_charset, from_end_pos, from + length)) + { + report_data_too_long(this); + return 2; + } } - if (error) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); - return error; + return 0; } @@ -5000,60 +5994,101 @@ int Field_str::store(double nr) char buff[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE]; uint length; bool use_scientific_notation= TRUE; - uint char_length= field_length / charset()->mbmaxlen; + uint local_char_length= field_length / charset()->mbmaxlen; /* Check fabs(nr) against longest value that can be stored in field, which depends on whether the value is < 1 or not, and negative or not */ double anr= fabs(nr); int neg= (nr < 0.0) ? 1 : 0; - if (char_length > 4 && char_length < 32 && - (anr < 1.0 ? anr > 1/(log_10[max(0,(int) char_length-neg-2)]) /* -2 for "0." */ - : anr < log_10[char_length-neg]-1)) + if (local_char_length > 4 && local_char_length < 32 && + (anr < 1.0 ? anr > 1/(log_10[max(0,(int) local_char_length-neg-2)]) /* -2 for "0." */ + : anr < log_10[local_char_length-neg]-1)) use_scientific_notation= FALSE; length= (uint) my_sprintf(buff, (buff, "%-.*g", (use_scientific_notation ? - max(0, (int)char_length-neg-5) : - char_length), + max(0, (int)local_char_length-neg-5) : + local_char_length), nr)); /* +1 below is because "precision" in %g above means the max. number of significant digits, not the output width. Thus the width can be larger than number of significant digits by 1 (for decimal point) - the test for char_length < 5 is for extreme cases, + the test for local_char_length < 5 is for extreme cases, like inserting 500.0 in char(1) */ - DBUG_ASSERT(char_length < 5 || length <= char_length+1); + DBUG_ASSERT(local_char_length < 5 || length <= local_char_length+1); return store((const char *) buff, length, charset()); } -int Field_string::store(longlong nr) +int Field_string::store(longlong nr, bool unsigned_val) { char buff[64]; int l; CHARSET_INFO *cs=charset(); - l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr); + l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff), + unsigned_val ? 10 : -10, nr); return Field_string::store(buff,(uint)l,cs); } +int Field_longstr::store_decimal(const my_decimal *d) +{ + char buff[DECIMAL_MAX_STR_LENGTH+1]; + String str(buff, sizeof(buff), &my_charset_bin); + my_decimal2string(E_DEC_FATAL_ERROR, d, 0, 0, 0, &str); + return store(str.ptr(), str.length(), str.charset()); +} + + double Field_string::val_real(void) { - int not_used; - char *end_not_used; - CHARSET_INFO *cs=charset(); - return my_strntod(cs, ptr, field_length, &end_not_used, ¬_used); + int error; + char *end; + CHARSET_INFO *cs= charset(); + double result; + + result= my_strntod(cs,ptr,field_length,&end,&error); + if (!table->in_use->no_errors && + (error || (field_length != (uint32)(end - ptr) && + !check_if_only_end_space(cs, end, ptr + field_length)))) + { + char buf[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE]; + String tmp(buf, sizeof(buf), cs); + tmp.copy(ptr, field_length, cs); + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), + "DOUBLE", tmp.c_ptr()); + } + return result; } longlong Field_string::val_int(void) { - int not_used; - CHARSET_INFO *cs=charset(); - return my_strntoll(cs,ptr,field_length,10,NULL,¬_used); + int error; + char *end; + CHARSET_INFO *cs= charset(); + longlong result; + + result= my_strntoll(cs,ptr,field_length,10,&end,&error); + if (!table->in_use->no_errors && + (error || (field_length != (uint32)(end - ptr) && + !check_if_only_end_space(cs, end, ptr + field_length)))) + { + char buf[LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE]; + String tmp(buf, sizeof(buf), cs); + tmp.copy(ptr, field_length, cs); + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), + "INTEGER", tmp.c_ptr()); + } + return result; } @@ -5068,6 +6103,26 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)), } +my_decimal *Field_string::val_decimal(my_decimal *decimal_value) +{ + int err= str2my_decimal(E_DEC_FATAL_ERROR, ptr, field_length, charset(), + decimal_value); + if (!table->in_use->no_errors && err) + { + char buf[DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE]; + CHARSET_INFO *cs= charset(); + String tmp(buf, sizeof(buf), cs); + tmp.copy(ptr, field_length, cs); + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), + "DECIMAL", tmp.c_ptr()); + } + + return decimal_value; +} + + int Field_string::cmp(const char *a_ptr, const char *b_ptr) { uint a_len, b_len; @@ -5086,15 +6141,16 @@ int Field_string::cmp(const char *a_ptr, const char *b_ptr) */ return field_charset->coll->strnncollsp(field_charset, (const uchar*) a_ptr, a_len, - (const uchar*) b_ptr, b_len); + (const uchar*) b_ptr, b_len, + 0); } void Field_string::sort_string(char *to,uint length) { - uint tmp=my_strnxfrm(field_charset, - (unsigned char *) to, length, - (unsigned char *) ptr, field_length); + IF_DBUG(uint tmp=) my_strnxfrm(field_charset, + (unsigned char *) to, length, + (unsigned char *) ptr, field_length); DBUG_ASSERT(tmp == length); } @@ -5103,27 +6159,30 @@ void Field_string::sql_type(String &res) const { THD *thd= table->in_use; CHARSET_INFO *cs=res.charset(); - ulong length= cs->cset->snprintf(cs,(char*) res.ptr(), - res.alloced_length(), "%s(%d)", - (field_length > 3 && - (table->db_options_in_use & - HA_OPTION_PACK_RECORD) ? - (has_charset() ? "varchar" : "varbinary") : + ulong length; + + length= cs->cset->snprintf(cs,(char*) res.ptr(), + res.alloced_length(), "%s(%d)", + ((type() == MYSQL_TYPE_VAR_STRING && + !thd->variables.new_mode) ? + (has_charset() ? "varchar" : "varbinary") : (has_charset() ? "char" : "binary")), - (int) field_length / charset()->mbmaxlen); + (int) field_length / charset()->mbmaxlen); res.length(length); if ((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) && has_charset() && (charset()->state & MY_CS_BINSORT)) - res.append(" binary"); + res.append(STRING_WITH_LEN(" binary")); } + char *Field_string::pack(char *to, const char *from, uint max_length) { uint length= min(field_length,max_length); - uint char_length= max_length/field_charset->mbmaxlen; - if (length > char_length) - char_length= my_charpos(field_charset, from, from+length, char_length); - set_if_smaller(length, char_length); + uint local_char_length= max_length/field_charset->mbmaxlen; + if (length > local_char_length) + local_char_length= my_charpos(field_charset, from, from+length, + local_char_length); + set_if_smaller(length, local_char_length); while (length && from[length-1] == ' ') length--; *to++= (char) (uchar) length; @@ -5150,10 +6209,27 @@ const char *Field_string::unpack(char *to, const char *from) } -int Field_string::pack_cmp(const char *a, const char *b, uint length) +/* + Compare two packed keys + + SYNOPSIS + pack_cmp() + a New key + b Original key + length Key length + insert_or_update 1 if this is an insert or update + + RETURN + < 0 a < b + 0 a = b + > 0 a > b +*/ + +int Field_string::pack_cmp(const char *a, const char *b, uint length, + my_bool insert_or_update) { uint a_length, b_length; - if (field_length > 255) + if (length > 255) { a_length= uint2korr(a); b_length= uint2korr(b); @@ -5165,29 +6241,51 @@ int Field_string::pack_cmp(const char *a, const char *b, uint length) a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - return my_strnncoll(field_charset, - (const uchar*)a,a_length, - (const uchar*)b,b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } -int Field_string::pack_cmp(const char *b, uint length) +/* + Compare a packed key against row + + SYNOPSIS + pack_cmp() + key Original key + length Key length. (May be less than field length) + insert_or_update 1 if this is an insert or update + + RETURN + < 0 row < key + 0 row = key + > 0 row > key +*/ + +int Field_string::pack_cmp(const char *key, uint length, + my_bool insert_or_update) { - uint b_length; - if (field_length > 255) + uint row_length, local_key_length; + char *end; + if (length > 255) { - b_length= uint2korr(b); - b+= 2; + local_key_length= uint2korr(key); + key+= 2; } else - b_length= (uint) (uchar) *b++; - char *end= ptr + field_length; + local_key_length= (uint) (uchar) *key++; + + /* Only use 'length' of key, not field_length */ + end= ptr + length; while (end > ptr && end[-1] == ' ') end--; - uint a_length = (uint) (end - ptr); - return my_strnncoll(field_charset, - (const uchar*)ptr,a_length, - (const uchar*)b, b_length); + row_length= (uint) (end - ptr); + + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) ptr, row_length, + (const uchar*) key, local_key_length, + insert_or_update); } @@ -5195,68 +6293,118 @@ uint Field_string::packed_col_length(const char *data_ptr, uint length) { if (length > 255) return uint2korr(data_ptr)+2; - else - return (uint) ((uchar) *data_ptr)+1; + return (uint) ((uchar) *data_ptr)+1; } + uint Field_string::max_packed_col_length(uint max_length) { return (max_length > 255 ? 2 : 1)+max_length; } +Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type) +{ + Field *field; + + if (type() != MYSQL_TYPE_VAR_STRING || keep_type) + field= Field::new_field(root, new_table, keep_type); + else + { + + /* + Old VARCHAR field which should be modified to a VARCHAR on copy + This is done to ensure that ALTER TABLE will convert old VARCHAR fields + to now VARCHAR fields. + */ + field= new Field_varstring(field_length, maybe_null(), + field_name, new_table, charset()); + /* + Normally orig_table is different from table only if field was created + via ::new_field. Here we alter the type of field, so ::new_field is + not applicable. But we still need to preserve the original field + metadata for the client-server protocol. + */ + field->orig_table= orig_table; + } + return field; +} + /**************************************************************************** -** VARCHAR type (Not available for the end user yet) + VARCHAR type + Data in field->ptr is stored as: + 1 or 2 bytes length-prefix-header (from Field_varstring::length_bytes) + data + + NOTE: + When VARCHAR is stored in a key (for handler::index_read() etc) it's always + stored with a 2 byte prefix. (Just like blob keys). + + Normally length_bytes is calculated as (field_length < 256 : 1 ? 2) + The exception is if there is a prefix key field that is part of a long + VARCHAR, in which case field_length for this may be 1 but the length_bytes + is 2. ****************************************************************************/ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) { - int error= 0; - uint32 not_used; - char buff[80]; - String tmpstr(buff,sizeof(buff), &my_charset_bin); + uint copy_length; + const char *well_formed_error_pos; + const char *cannot_convert_error_pos; + const char *from_end_pos; - /* Convert character set if nesessary */ - if (String::needs_conversion(length, cs, field_charset, ¬_used)) - { - uint conv_errors; - tmpstr.copy(from, length, cs, field_charset, &conv_errors); - from= tmpstr.ptr(); - length= tmpstr.length(); - if (conv_errors) - error= 2; - } - if (length > field_length) + copy_length= well_formed_copy_nchars(field_charset, + ptr + length_bytes, field_length, + cs, from, length, + field_length / field_charset->mbmaxlen, + &well_formed_error_pos, + &cannot_convert_error_pos, + &from_end_pos); + + if (length_bytes == 1) + *ptr= (uchar) copy_length; + else + int2store(ptr, copy_length); + + if (check_string_copy_error(this, well_formed_error_pos, + cannot_convert_error_pos, from + length)) + return 2; + + // Check if we lost something other than just trailing spaces + if ((from_end_pos < from + length) && table->in_use->count_cuted_fields) { - length=field_length; - error= 2; + if (test_if_important_data(field_charset, from_end_pos, from + length)) + report_data_too_long(this); + else /* If we lost only spaces then produce a NOTE, not a WARNING */ + set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1); + return 2; } - if (error) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); - memcpy(ptr+HA_KEY_BLOB_LENGTH,from,length); - int2store(ptr, length); - return error; + return 0; } -int Field_varstring::store(longlong nr) +int Field_varstring::store(longlong nr, bool unsigned_val) { char buff[64]; - int l; - CHARSET_INFO *cs=charset(); - l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr); - return Field_varstring::store(buff,(uint)l,cs); + uint length; + length= (uint) (field_charset->cset->longlong10_to_str)(field_charset, + buff, + sizeof(buff), + (unsigned_val ? 10: + -10), + nr); + return Field_varstring::store(buff, length, field_charset); } double Field_varstring::val_real(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); char *end_not_used; - return my_strntod(cs, ptr+HA_KEY_BLOB_LENGTH, length, &end_not_used, + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntod(field_charset, ptr+length_bytes, length, &end_not_used, ¬_used); } @@ -5264,111 +6412,286 @@ double Field_varstring::val_real(void) longlong Field_varstring::val_int(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); - return my_strntoll(cs,ptr+HA_KEY_BLOB_LENGTH,length,10,NULL, ¬_used); + char *end_not_used; + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntoll(field_charset, ptr+length_bytes, length, 10, + &end_not_used, ¬_used); } - String *Field_varstring::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { - uint length=uint2korr(ptr); - val_ptr->set((const char*) ptr+HA_KEY_BLOB_LENGTH,length,field_charset); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + val_ptr->set((const char*) ptr+length_bytes, length, field_charset); return val_ptr; } +my_decimal *Field_varstring::val_decimal(my_decimal *decimal_value) +{ + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + str2my_decimal(E_DEC_FATAL_ERROR, ptr+length_bytes, length, charset(), + decimal_value); + return decimal_value; +} + + int Field_varstring::cmp(const char *a_ptr, const char *b_ptr) { - uint a_length=uint2korr(a_ptr); - uint b_length=uint2korr(b_ptr); + uint a_length, b_length; int diff; - diff= my_strnncoll(field_charset, - (const uchar*) a_ptr+HA_KEY_BLOB_LENGTH, - min(a_length,b_length), - (const uchar*) b_ptr+HA_KEY_BLOB_LENGTH, - min(a_length,b_length)); - return diff ? diff : (int) (a_length - b_length); + + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } + diff= field_charset->coll->strnncollsp(field_charset, + (const uchar*) a_ptr+ + length_bytes, + a_length, + (const uchar*) b_ptr+ + length_bytes, + b_length,0); + return diff; +} + + +/* + NOTE: varstring and blob keys are ALWAYS stored with a 2 byte length prefix +*/ + +int Field_varstring::key_cmp(const byte *key_ptr, uint max_key_length) +{ + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + uint local_char_length= max_key_length / field_charset->mbmaxlen; + + local_char_length= my_charpos(field_charset, ptr + length_bytes, + ptr + length_bytes + length, local_char_length); + set_if_smaller(length, local_char_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) ptr + length_bytes, + length, + (const uchar*) key_ptr+ + HA_KEY_BLOB_LENGTH, + uint2korr(key_ptr), 0); } + +/* + Compare to key segments (always 2 byte length prefix) + + NOTE + This is used only to compare key segments created for index_read(). + (keys are created and compared in key.cc) +*/ + +int Field_varstring::key_cmp(const byte *a,const byte *b) +{ + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a + + HA_KEY_BLOB_LENGTH, + uint2korr(a), + (const uchar*) b + + HA_KEY_BLOB_LENGTH, + uint2korr(b), + 0); +} + + void Field_varstring::sort_string(char *to,uint length) { - uint tot_length=uint2korr(ptr); + uint tot_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + + if (field_charset == &my_charset_bin) + { + /* Store length last in high-byte order to sort longer strings first */ + if (length_bytes == 1) + to[length-1]= tot_length; + else + mi_int2store(to+length-2, tot_length); + length-= length_bytes; + } + tot_length= my_strnxfrm(field_charset, (uchar*) to, length, - (uchar*) ptr+HA_KEY_BLOB_LENGTH, + (uchar*) ptr + length_bytes, tot_length); DBUG_ASSERT(tot_length == length); } +enum ha_base_keytype Field_varstring::key_type() const +{ + enum ha_base_keytype res; + + if (binary()) + res= length_bytes == 1 ? HA_KEYTYPE_VARBINARY1 : HA_KEYTYPE_VARBINARY2; + else + res= length_bytes == 1 ? HA_KEYTYPE_VARTEXT1 : HA_KEYTYPE_VARTEXT2; + return res; +} + + void Field_varstring::sql_type(String &res) const { + THD *thd= table->in_use; CHARSET_INFO *cs=res.charset(); - ulong length= cs->cset->snprintf(cs,(char*) res.ptr(), - res.alloced_length(),"varchar(%u)", - field_length / charset()->mbmaxlen); + ulong length; + + length= cs->cset->snprintf(cs,(char*) res.ptr(), + res.alloced_length(), "%s(%d)", + (has_charset() ? "varchar" : "varbinary"), + (int) field_length / charset()->mbmaxlen); res.length(length); + if ((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) && + has_charset() && (charset()->state & MY_CS_BINSORT)) + res.append(STRING_WITH_LEN(" binary")); +} + + +uint32 Field_varstring::data_length(const char *from) +{ + return length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); } +/* + Functions to create a packed row. + Here the number of length bytes are depending on the given max_length +*/ + char *Field_varstring::pack(char *to, const char *from, uint max_length) { - uint length=uint2korr(from); + uint length= length_bytes == 1 ? (uint) (uchar) *from : uint2korr(from); + set_if_smaller(max_length, field_length); if (length > max_length) length=max_length; *to++= (char) (length & 255); if (max_length > 255) *to++= (char) (length >> 8); if (length) - memcpy(to, from+HA_KEY_BLOB_LENGTH, length); + memcpy(to, from+length_bytes, length); return to+length; } -char *Field_varstring::pack_key(char *to, const char *from, uint max_length) +char *Field_varstring::pack_key(char *to, const char *key, uint max_length) { - uint length=uint2korr(from); - uint char_length= (field_charset->mbmaxlen > 1) ? - max_length/field_charset->mbmaxlen : max_length; - from+=HA_KEY_BLOB_LENGTH; - if (length > char_length) - char_length= my_charpos(field_charset, from, from+length, char_length); - set_if_smaller(length, char_length); + uint length= length_bytes == 1 ? (uint) (uchar) *key : uint2korr(key); + uint local_char_length= ((field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length); + key+= length_bytes; + if (length > local_char_length) + { + local_char_length= my_charpos(field_charset, key, key+length, + local_char_length); + set_if_smaller(length, local_char_length); + } + *to++= (char) (length & 255); + if (max_length > 255) + *to++= (char) (length >> 8); + if (length) + memcpy(to, key, length); + return to+length; +} + + +/* + Unpack a key into a record buffer. + + SYNOPSIS + unpack_key() + to Pointer into the record buffer. + key Pointer to the packed key. + max_length Key length limit from key description. + + DESCRIPTION + A VARCHAR key has a maximum size of 64K-1. + In its packed form, the length field is one or two bytes long, + depending on 'max_length'. + + RETURN + Pointer to end of 'key' (To the next key part if multi-segment key) +*/ + +const char *Field_varstring::unpack_key(char *to, const char *key, + uint max_length) +{ + /* get length of the blob key */ + uint32 length= *((uchar*) key++); + if (max_length > 255) + length+= (*((uchar*) key++)) << 8; + + /* put the length into the record buffer */ + if (length_bytes == 1) + *ptr= (uchar) length; + else + int2store(ptr, length); + memcpy(ptr + length_bytes, key, length); + return key + length; +} + +/* + Create a packed key that will be used for storage in the index tree + + SYNOPSIS + pack_key_from_key_image() + to Store packed key segment here + from Key segment (as given to index_read()) + max_length Max length of key + + RETURN + end of key storage +*/ + +char *Field_varstring::pack_key_from_key_image(char *to, const char *from, + uint max_length) +{ + /* Key length is always stored as 2 bytes */ + uint length= uint2korr(from); + if (length > max_length) + length= max_length; *to++= (char) (length & 255); if (max_length > 255) *to++= (char) (length >> 8); if (length) - memcpy(to, from, length); + memcpy(to, from+HA_KEY_BLOB_LENGTH, length); return to+length; } +/* + unpack field packed with Field_varstring::pack() +*/ + const char *Field_varstring::unpack(char *to, const char *from) { uint length; - if (field_length > 255) - { + if (length_bytes == 1) length= (uint) (uchar) (*to= *from++); - to[1]=0; - } else { - length=uint2korr(from); - to[0] = *from++; - to[1] = *from++; + length= uint2korr(from); + to[0]= *from++; + to[1]= *from++; } if (length) - memcpy(to+HA_KEY_BLOB_LENGTH, from, length); + memcpy(to+ length_bytes, from, length); return from+length; } -int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length) +int Field_varstring::pack_cmp(const char *a, const char *b, + uint key_length_arg, + my_bool insert_or_update) { - uint a_length; - uint b_length; - if (key_length > 255) + uint a_length, b_length; + if (key_length_arg > 255) { a_length=uint2korr(a); a+= 2; b_length=uint2korr(b); b+= 2; @@ -5378,63 +6701,142 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length) a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } -int Field_varstring::pack_cmp(const char *b, uint key_length) + +int Field_varstring::pack_cmp(const char *b, uint key_length_arg, + my_bool insert_or_update) { - char *a= ptr+HA_KEY_BLOB_LENGTH; - uint a_length= uint2korr(ptr); + char *a= ptr+ length_bytes; + uint a_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); uint b_length; - if (key_length > 255) + uint local_char_length= ((field_charset->mbmaxlen > 1) ? + key_length_arg / field_charset->mbmaxlen : + key_length_arg); + + if (key_length_arg > 255) { - b_length=uint2korr(b); b+= 2; + b_length=uint2korr(b); b+= HA_KEY_BLOB_LENGTH; } else - { b_length= (uint) (uchar) *b++; + + if (a_length > local_char_length) + { + local_char_length= my_charpos(field_charset, a, a+a_length, + local_char_length); + set_if_smaller(a_length, local_char_length); } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, + a_length, + (const uchar*) b, b_length, + insert_or_update); } + uint Field_varstring::packed_col_length(const char *data_ptr, uint length) { if (length > 255) - return uint2korr(data_ptr)+HA_KEY_BLOB_LENGTH; - else - return (uint) ((uchar) *data_ptr)+1; + return uint2korr(data_ptr)+2; + return (uint) ((uchar) *data_ptr)+1; } + uint Field_varstring::max_packed_col_length(uint max_length) { return (max_length > 255 ? 2 : 1)+max_length; } -void Field_varstring::get_key_image(char *buff, uint length, CHARSET_INFO *cs, - imagetype type) + +void Field_varstring::get_key_image(char *buff, uint length, + imagetype type_arg) { - uint f_length=uint2korr(ptr); - if (f_length > length) - f_length= length; - int2store(buff,length); - memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+HA_KEY_BLOB_LENGTH, length); -#ifdef HAVE_purify + uint f_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + uint local_char_length= length / field_charset->mbmaxlen; + char *pos= ptr+length_bytes; + local_char_length= my_charpos(field_charset, pos, pos + f_length, + local_char_length); + set_if_smaller(f_length, local_char_length); + /* Key is always stored with 2 bytes */ + int2store(buff,f_length); + memcpy(buff+HA_KEY_BLOB_LENGTH, pos, f_length); if (f_length < length) + { + /* + Must clear this as we do a memcmp in opt_range.cc to detect + identical keys + */ bzero(buff+HA_KEY_BLOB_LENGTH+f_length, (length-f_length)); -#endif + } } -void Field_varstring::set_key_image(char *buff,uint length, CHARSET_INFO *cs) + +void Field_varstring::set_key_image(char *buff,uint length) +{ + length= uint2korr(buff); // Real length is here + (void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length, + field_charset); +} + + +int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, + uint32 max_length) { - length=uint2korr(buff); // Real length is here - (void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length, cs); + uint32 a_length,b_length; + + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } + set_if_smaller(a_length, max_length); + set_if_smaller(b_length, max_length); + if (a_length != b_length) + return 1; + return memcmp(a_ptr+length_bytes, b_ptr+length_bytes, a_length); } +Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type) +{ + Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table, + keep_type); + if (res) + res->length_bytes= length_bytes; + return res; +} + + +Field *Field_varstring::new_key_field(MEM_ROOT *root, + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field_varstring *res; + if ((res= (Field_varstring*) Field::new_key_field(root, + new_table, + new_ptr, + new_null_ptr, + new_null_bit))) + { + /* Keys length prefixes are always packed with 2 bytes */ + res->length_bytes= 2; + } + return res; +} + /**************************************************************************** ** blob type @@ -5446,14 +6848,17 @@ Field_blob::Field_blob(char *ptr_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg,uint blob_pack_length, CHARSET_INFO *cs) - :Field_str(ptr_arg, BLOB_PACK_LENGTH_TO_MAX_LENGH(blob_pack_length), + :Field_longstr(ptr_arg, BLOB_PACK_LENGTH_TO_MAX_LENGH(blob_pack_length), null_ptr_arg, null_bit_arg, unireg_check_arg, field_name_arg, table_arg, cs), packlength(blob_pack_length) { flags|= BLOB_FLAG; if (table) - table->blob_fields++; + { + table->s->blob_fields++; + /* TODO: why do not fill table->s->blob_field array here? */ + } } @@ -5465,7 +6870,7 @@ void Field_blob::store_length(uint32 number) break; case 2: #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int2store(ptr,(unsigned short) number); } @@ -5478,7 +6883,7 @@ void Field_blob::store_length(uint32 number) break; case 4: #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int4store(ptr,number); } @@ -5498,7 +6903,7 @@ uint32 Field_blob::get_length(const char *pos) { uint16 tmp; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) tmp=sint2korr(pos); else #endif @@ -5511,7 +6916,7 @@ uint32 Field_blob::get_length(const char *pos) { uint32 tmp; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) tmp=uint4korr(pos); else #endif @@ -5561,58 +6966,70 @@ void Field_blob::put_length(char *pos, uint32 length) int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs) { - int error= 0, well_formed_error; + uint copy_length, new_length; + const char *well_formed_error_pos; + const char *cannot_convert_error_pos; + const char *from_end_pos, *tmp; + char buff[STRING_BUFFER_USUAL_SIZE]; + String tmpstr(buff,sizeof(buff), &my_charset_bin); + if (!length) { bzero(ptr,Field_blob::pack_length()); + return 0; } - else - { - bool was_conversion; - char buff[80]; - String tmpstr(buff,sizeof(buff), &my_charset_bin); - uint copy_length; - uint32 not_used; - /* Convert character set if nesessary */ - if ((was_conversion= String::needs_conversion(length, cs, field_charset, - ¬_used))) - { - uint conv_errors; - tmpstr.copy(from, length, cs, field_charset, &conv_errors); - from= tmpstr.ptr(); - length= tmpstr.length(); - if (conv_errors) - error= 2; - } - - copy_length= max_data_length(); - /* - copy_length is ok as last argument to well_formed_len as this is never - used to limit the length of the data. The cut of long data is done with - the 'min()' call below. - */ - copy_length= field_charset->cset->well_formed_len(field_charset, - from,from + - min(length, copy_length), - copy_length, - &well_formed_error); - if (copy_length < length) - error= 2; - Field_blob::store_length(copy_length); - if (was_conversion || table->copy_blobs || copy_length <= MAX_FIELD_WIDTH) - { // Must make a copy - if (from != value.ptr()) // For valgrind - { - value.copy(from,copy_length,charset()); - from=value.ptr(); - } + if (from == value.ptr()) + { + uint32 dummy_offset; + if (!String::needs_conversion(length, cs, field_charset, &dummy_offset)) + { + Field_blob::store_length(length); + bmove(ptr+packlength,(char*) &from,sizeof(char*)); + return 0; } - bmove(ptr+packlength,(char*) &from,sizeof(char*)); + if (tmpstr.copy(from, length, cs)) + goto oom_error; + from= tmpstr.ptr(); } - if (error) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + + new_length= min(max_data_length(), field_charset->mbmaxlen * length); + if (value.alloc(new_length)) + goto oom_error; + + /* + "length" is OK as "nchars" argument to well_formed_copy_nchars as this + is never used to limit the length of the data. The cut of long data + is done with the new_length value. + */ + copy_length= well_formed_copy_nchars(field_charset, + (char*) value.ptr(), new_length, + cs, from, length, + length, + &well_formed_error_pos, + &cannot_convert_error_pos, + &from_end_pos); + + Field_blob::store_length(copy_length); + tmp= value.ptr(); + bmove(ptr+packlength,(char*) &tmp,sizeof(char*)); + + if (check_string_copy_error(this, well_formed_error_pos, + cannot_convert_error_pos, from + length)) + return 2; + + if (from_end_pos < from + length) + { + report_data_too_long(this); + return 2; + } + return 0; + +oom_error: + /* Fatal OOM error */ + bzero(ptr,Field_blob::pack_length()); + return -1; } @@ -5624,10 +7041,13 @@ int Field_blob::store(double nr) } -int Field_blob::store(longlong nr) +int Field_blob::store(longlong nr, bool unsigned_val) { CHARSET_INFO *cs=charset(); - value.set(nr, cs); + if (unsigned_val) + value.set((ulonglong) nr, cs); + else + value.set(nr, cs); return Field_blob::store(value.ptr(), (uint) value.length(), cs); } @@ -5635,14 +7055,16 @@ int Field_blob::store(longlong nr) double Field_blob::val_real(void) { int not_used; - char *blob; - char *end_not_used; + char *end_not_used, *blob; + uint32 length; + CHARSET_INFO *cs; + memcpy_fixed(&blob,ptr+packlength,sizeof(char*)); if (!blob) return 0.0; - uint32 length=get_length(ptr); - CHARSET_INFO *cs=charset(); - return my_strntod(cs,blob,length, &end_not_used, ¬_used); + length= get_length(ptr); + cs= charset(); + return my_strntod(cs, blob, length, &end_not_used, ¬_used); } @@ -5657,7 +7079,6 @@ longlong Field_blob::val_int(void) return my_strntoll(charset(),blob,length,10,NULL,¬_used); } - String *Field_blob::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { @@ -5671,13 +7092,25 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)), } +my_decimal *Field_blob::val_decimal(my_decimal *decimal_value) +{ + const char *blob; + memcpy_fixed(&blob, ptr+packlength, sizeof(const char*)); + if (!blob) + blob= ""; + str2my_decimal(E_DEC_FATAL_ERROR, blob, get_length(ptr), charset(), + decimal_value); + return decimal_value; +} + + int Field_blob::cmp(const char *a,uint32 a_length, const char *b, uint32 b_length) { - return field_charset->coll->strnncoll(field_charset, - (const uchar*)a, a_length, - (const uchar*)b, b_length, - 0); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*)a, a_length, + (const uchar*)b, b_length, + 0); } @@ -5691,18 +7124,6 @@ int Field_blob::cmp(const char *a_ptr, const char *b_ptr) } -int Field_blob::cmp_offset(uint row_offset) -{ - return Field_blob::cmp(ptr,ptr+row_offset); -} - - -int Field_blob::cmp_binary_offset(uint row_offset) -{ - return cmp_binary(ptr, ptr+row_offset); -} - - int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr, uint32 max_length) { @@ -5724,14 +7145,13 @@ int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr, /* The following is used only when comparing a key */ -void Field_blob::get_key_image(char *buff,uint length, - CHARSET_INFO *cs, imagetype type) +void Field_blob::get_key_image(char *buff, uint length, imagetype type_arg) { uint32 blob_length= get_length(ptr); char *blob; #ifdef HAVE_SPATIAL - if (type == itMBR) + if (type_arg == itMBR) { const char *dummy; MBR mbr; @@ -5745,7 +7165,7 @@ void Field_blob::get_key_image(char *buff,uint length, } get_ptr(&blob); gobj= Geometry::construct(&buffer, blob, blob_length); - if (gobj->get_mbr(&mbr, &dummy)) + if (!gobj || gobj->get_mbr(&mbr, &dummy)) bzero(buff, SIZEOF_STORED_DOUBLE*4); else { @@ -5759,9 +7179,10 @@ void Field_blob::get_key_image(char *buff,uint length, #endif /*HAVE_SPATIAL*/ get_ptr(&blob); - uint char_length= length / cs->mbmaxlen; - char_length= my_charpos(cs, blob, blob + blob_length, char_length); - set_if_smaller(blob_length, char_length); + uint local_char_length= length / field_charset->mbmaxlen; + local_char_length= my_charpos(field_charset, blob, blob + blob_length, + local_char_length); + set_if_smaller(blob_length, local_char_length); if ((uint32) length > blob_length) { @@ -5776,10 +7197,11 @@ void Field_blob::get_key_image(char *buff,uint length, memcpy(buff+HA_KEY_BLOB_LENGTH, blob, length); } -void Field_blob::set_key_image(char *buff,uint length, CHARSET_INFO *cs) + +void Field_blob::set_key_image(char *buff,uint length) { length= uint2korr(buff); - (void) Field_blob::store(buff+HA_KEY_BLOB_LENGTH, length, cs); + (void) Field_blob::store(buff+HA_KEY_BLOB_LENGTH, length, field_charset); } @@ -5789,10 +7211,11 @@ int Field_blob::key_cmp(const byte *key_ptr, uint max_key_length) uint blob_length=get_length(ptr); memcpy_fixed(&blob1,ptr+packlength,sizeof(char*)); CHARSET_INFO *cs= charset(); - uint char_length= max_key_length / cs->mbmaxlen; - char_length= my_charpos(cs, blob1, blob1+blob_length, char_length); - set_if_smaller(blob_length, char_length); - return Field_blob::cmp(blob1,min(blob_length, max_key_length), + uint local_char_length= max_key_length / cs->mbmaxlen; + local_char_length= my_charpos(cs, blob1, blob1+blob_length, + local_char_length); + set_if_smaller(blob_length, local_char_length); + return Field_blob::cmp(blob1, blob_length, (char*) key_ptr+HA_KEY_BLOB_LENGTH, uint2korr(key_ptr)); } @@ -5804,6 +7227,13 @@ int Field_blob::key_cmp(const byte *a,const byte *b) } +uint32 Field_blob::sort_length() const +{ + return (uint32) (current_thd->variables.max_sort_length + + (field_charset == &my_charset_bin ? 0 : packlength)); +} + + void Field_blob::sort_string(char *to,uint length) { char *blob; @@ -5813,6 +7243,31 @@ void Field_blob::sort_string(char *to,uint length) bzero(to,length); else { + if (field_charset == &my_charset_bin) + { + char *pos; + + /* + Store length of blob last in blob to shorter blobs before longer blobs + */ + length-= packlength; + pos= to+length; + + switch (packlength) { + case 1: + *pos= (char) blob_length; + break; + case 2: + mi_int2store(pos, blob_length); + break; + case 3: + mi_int3store(pos, blob_length); + break; + case 4: + mi_int4store(pos, blob_length); + break; + } + } memcpy_fixed(&blob,ptr+packlength,sizeof(char*)); blob_length=my_strnxfrm(field_charset, @@ -5835,10 +7290,10 @@ void Field_blob::sql_type(String &res) const } res.set_ascii(str,length); if (charset() == &my_charset_bin) - res.append("blob"); + res.append(STRING_WITH_LEN("blob")); else { - res.append("text"); + res.append(STRING_WITH_LEN("text")); } } @@ -5881,11 +7336,11 @@ const char *Field_blob::unpack(char *to, const char *from) /* Keys for blobs are like keys on varchars */ -int Field_blob::pack_cmp(const char *a, const char *b, uint key_length) +int Field_blob::pack_cmp(const char *a, const char *b, uint key_length_arg, + my_bool insert_or_update) { - uint a_length; - uint b_length; - if (key_length > 255) + uint a_length, b_length; + if (key_length_arg > 255) { a_length=uint2korr(a); a+=2; b_length=uint2korr(b); b+=2; @@ -5895,32 +7350,33 @@ int Field_blob::pack_cmp(const char *a, const char *b, uint key_length) a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } -int Field_blob::pack_cmp(const char *b, uint key_length) +int Field_blob::pack_cmp(const char *b, uint key_length_arg, + my_bool insert_or_update) { char *a; + uint a_length, b_length; memcpy_fixed(&a,ptr+packlength,sizeof(char*)); if (!a) - return key_length > 0 ? -1 : 0; - uint a_length=get_length(ptr); - uint b_length; + return key_length_arg > 0 ? -1 : 0; - if (key_length > 255) + a_length= get_length(ptr); + if (key_length_arg > 255) { - b_length=uint2korr(b); b+=2; + b_length= uint2korr(b); b+=2; } else - { b_length= (uint) (uchar) *b++; - } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } /* Create a packed key that will be used for storage from a MySQL row */ @@ -5930,13 +7386,14 @@ char *Field_blob::pack_key(char *to, const char *from, uint max_length) char *save=ptr; ptr=(char*) from; uint32 length=get_length(); // Length of from string - uint char_length= (field_charset->mbmaxlen > 1) ? - max_length/field_charset->mbmaxlen : max_length; + uint local_char_length= ((field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length); if (length) get_ptr((char**) &from); - if (length > char_length) - char_length= my_charpos(field_charset, from, from+length, char_length); - set_if_smaller(length, char_length); + if (length > local_char_length) + local_char_length= my_charpos(field_charset, from, from+length, + local_char_length); + set_if_smaller(length, local_char_length); *to++= (uchar) length; if (max_length > 255) // 2 byte length *to++= (uchar) (length >> 8); @@ -5989,6 +7446,7 @@ const char *Field_blob::unpack_key(char *to, const char *from, uint max_length) return from + length; } + /* Create a packed key that will be used for storage from a MySQL key */ char *Field_blob::pack_key_from_key_image(char *to, const char *from, @@ -6005,14 +7463,15 @@ char *Field_blob::pack_key_from_key_image(char *to, const char *from, return to+length; } + uint Field_blob::packed_col_length(const char *data_ptr, uint length) { if (length > 255) return uint2korr(data_ptr)+2; - else - return (uint) ((uchar) *data_ptr)+1; + return (uint) ((uchar) *data_ptr)+1; } + uint Field_blob::max_packed_col_length(uint max_length) { return (max_length > 255 ? 2 : 1)+max_length; @@ -6021,8 +7480,7 @@ uint Field_blob::max_packed_col_length(uint max_length) #ifdef HAVE_SPATIAL -void Field_geom::get_key_image(char *buff, uint length, CHARSET_INFO *cs, - imagetype type) +void Field_geom::get_key_image(char *buff, uint length, imagetype type_arg) { char *blob; const char *dummy; @@ -6038,7 +7496,7 @@ void Field_geom::get_key_image(char *buff, uint length, CHARSET_INFO *cs, } get_ptr(&blob); gobj= Geometry::construct(&buffer, blob, blob_length); - if (gobj->get_mbr(&mbr, &dummy)) + if (!gobj || gobj->get_mbr(&mbr, &dummy)) bzero(buff, SIZEOF_STORED_DOUBLE*4); else { @@ -6050,57 +7508,78 @@ void Field_geom::get_key_image(char *buff, uint length, CHARSET_INFO *cs, } -void Field_geom::set_key_image(char *buff, uint length, CHARSET_INFO *cs) -{ - Field_blob::set_key_image(buff, length, cs); -} - void Field_geom::sql_type(String &res) const { CHARSET_INFO *cs= &my_charset_latin1; switch (geom_type) { case GEOM_POINT: - res.set("point", 5, cs); + res.set(STRING_WITH_LEN("point"), cs); break; case GEOM_LINESTRING: - res.set("linestring", 10, cs); + res.set(STRING_WITH_LEN("linestring"), cs); break; case GEOM_POLYGON: - res.set("polygon", 7, cs); + res.set(STRING_WITH_LEN("polygon"), cs); break; case GEOM_MULTIPOINT: - res.set("multipoint", 10, cs); + res.set(STRING_WITH_LEN("multipoint"), cs); break; case GEOM_MULTILINESTRING: - res.set("multilinestring", 15, cs); + res.set(STRING_WITH_LEN("multilinestring"), cs); break; case GEOM_MULTIPOLYGON: - res.set("multipolygon", 12, cs); + res.set(STRING_WITH_LEN("multipolygon"), cs); break; case GEOM_GEOMETRYCOLLECTION: - res.set("geometrycollection", 18, cs); + res.set(STRING_WITH_LEN("geometrycollection"), cs); break; default: - res.set("geometry", 8, cs); + res.set(STRING_WITH_LEN("geometry"), cs); } } +int Field_geom::store(double nr) +{ + my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, + ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); + return -1; +} + + +int Field_geom::store(longlong nr, bool unsigned_val) +{ + my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, + ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); + return -1; +} + + +int Field_geom::store_decimal(const my_decimal *) +{ + my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, + ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); + return -1; +} + + int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs) { if (!length) bzero(ptr, Field_blob::pack_length()); else { + if (from == Geometry::bad_geometry_data.ptr()) + goto err; // Check given WKB uint32 wkb_type; if (length < SRID_SIZE + WKB_HEADER_SIZE + SIZEOF_STORED_DOUBLE*2) goto err; wkb_type= uint4korr(from + SRID_SIZE + 1); if (wkb_type < (uint32) Geometry::wkb_point || - wkb_type > (uint32) Geometry::wkb_end) - return -1; + wkb_type > (uint32) Geometry::wkb_last) + goto err; Field_blob::store_length(length); if (table->copy_blobs || length <= MAX_FIELD_WIDTH) { // Must make a copy @@ -6113,6 +7592,8 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs) err: bzero(ptr, Field_blob::pack_length()); + my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, + ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); return -1; } @@ -6141,7 +7622,7 @@ void Field_enum::store_type(ulonglong value) case 1: ptr[0]= (uchar) value; break; case 2: #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int2store(ptr,(unsigned short) value); } @@ -6152,7 +7633,7 @@ void Field_enum::store_type(ulonglong value) case 3: int3store(ptr,(long) value); break; case 4: #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int4store(ptr,value); } @@ -6162,7 +7643,7 @@ void Field_enum::store_type(ulonglong value) break; case 8: #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) { int8store(ptr,value); } @@ -6182,10 +7663,10 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs) { int err= 0; uint32 not_used; - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String tmpstr(buff,sizeof(buff), &my_charset_bin); - /* Convert character set if nesessary */ + /* Convert character set if necessary */ if (String::needs_conversion(length, cs, field_charset, ¬_used)) { uint dummy_errors; @@ -6207,11 +7688,11 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs) if (err || end != from+length || tmp > typelib->count) { tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } } else - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } store_type((ulonglong) tmp); return err; @@ -6220,16 +7701,16 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs) int Field_enum::store(double nr) { - return Field_enum::store((longlong) nr); + return Field_enum::store((longlong) nr, FALSE); } -int Field_enum::store(longlong nr) +int Field_enum::store(longlong nr, bool unsigned_val) { int error= 0; - if ((uint) nr > typelib->count || nr == 0) + if ((ulonglong) nr > typelib->count || nr == 0) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); nr=0; error=1; } @@ -6253,7 +7734,7 @@ longlong Field_enum::val_int(void) { uint16 tmp; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) tmp=sint2korr(ptr); else #endif @@ -6266,7 +7747,7 @@ longlong Field_enum::val_int(void) { uint32 tmp; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) tmp=uint4korr(ptr); else #endif @@ -6277,7 +7758,7 @@ longlong Field_enum::val_int(void) { longlong tmp; #ifdef WORDS_BIGENDIAN - if (table->db_low_byte_first) + if (table->s->db_low_byte_first) tmp=sint8korr(ptr); else #endif @@ -6331,7 +7812,7 @@ void Field_enum::sql_type(String &res) const String enum_item(buffer, sizeof(buffer), res.charset()); res.length(0); - res.append("enum("); + res.append(STRING_WITH_LEN("enum(")); bool flag=0; uint *len= typelib->type_lengths; @@ -6349,6 +7830,16 @@ void Field_enum::sql_type(String &res) const } +Field *Field_enum::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type) +{ + Field_enum *res= (Field_enum*) Field::new_field(root, new_table, keep_type); + if (res) + res->typelib= copy_typelib(root, typelib); + return res; +} + + /* set type. This is a string which can have a collection of different values. @@ -6366,10 +7857,10 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs) char *not_used; uint not_used2; uint32 not_used_offset; - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String tmpstr(buff,sizeof(buff), &my_charset_bin); - /* Convert character set if nesessary */ + /* Convert character set if necessary */ if (String::needs_conversion(length, cs, field_charset, ¬_used_offset)) { uint dummy_errors; @@ -6388,24 +7879,24 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs) tmp > (ulonglong) (((longlong) 1 << typelib->count) - (longlong) 1)) { tmp=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); } } else if (got_warning) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); store_type(tmp); return err; } -int Field_set::store(longlong nr) +int Field_set::store(longlong nr, bool unsigned_val) { int error= 0; if ((ulonglong) nr > (ulonglong) (((longlong) 1 << typelib->count) - (longlong) 1)) { nr&= (longlong) (((longlong) 1 << typelib->count) - (longlong) 1); - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); error=1; } store_type((ulonglong) nr); @@ -6445,7 +7936,7 @@ void Field_set::sql_type(String &res) const String set_item(buffer, sizeof(buffer), res.charset()); res.length(0); - res.append("set("); + res.append(STRING_WITH_LEN("set(")); bool flag=0; uint *len= typelib->type_lengths; @@ -6483,9 +7974,9 @@ bool Field_enum::eq_def(Field *field) for (uint i=0 ; i < from_lib->count ; i++) if (my_strnncoll(field_charset, (const uchar*)typelib->type_names[i], - strlen(typelib->type_names[i]), + (uint) strlen(typelib->type_names[i]), (const uchar*)from_lib->type_names[i], - strlen(from_lib->type_names[i]))) + (uint) strlen(from_lib->type_names[i]))) return 0; return 1; } @@ -6504,8 +7995,327 @@ bool Field_num::eq_def(Field *field) } +/* + Bit field. + + We store the first 0 - 6 uneven bits among the null bits + at the start of the record. The rest bytes are stored in + the record itself. + + For example: + + CREATE TABLE t1 (a int, b bit(17), c bit(21) not null, d bit(8)); + We would store data as follows in the record: + + Byte Bit + 1 7 - reserve for delete + 6 - null bit for 'a' + 5 - null bit for 'b' + 4 - first (high) bit of 'b' + 3 - first (high) bit of 'c' + 2 - second bit of 'c' + 1 - third bit of 'c' + 0 - forth bit of 'c' + 2 7 - firth bit of 'c' + 6 - null bit for 'd' + 3 - 6 four bytes for 'a' + 7 - 8 two bytes for 'b' + 9 - 10 two bytes for 'c' + 11 one byte for 'd' +*/ + +Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg) + : Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg), + bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7), + bytes_in_rec(len_arg / 8) +{ + flags|= UNSIGNED_FLAG; + /* + Ensure that Field::eq() can distinguish between two different bit fields. + (two bit fields that are not null, may have same ptr and null_ptr) + */ + if (!null_ptr_arg) + null_bit= bit_ofs_arg; +} + + +Field *Field_bit::new_key_field(MEM_ROOT *root, + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field_bit *res; + if ((res= (Field_bit*) Field::new_key_field(root, new_table, + new_ptr, new_null_ptr, + new_null_bit))) + { + /* Move bits normally stored in null_pointer to new_ptr */ + res->bit_ptr= (uchar*) new_ptr; + res->bit_ofs= 0; + if (bit_len) + res->ptr++; // Store rest of data here + } + return res; +} + + +int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs) +{ + int delta; + + for (; length && !*from; from++, length--); // skip left 0's + delta= bytes_in_rec - length; + + if (delta < -1 || + (delta == -1 && (uchar) *from > ((1 << bit_len) - 1)) || + (!bit_len && delta < 0)) + { + set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len); + memset(ptr, 0xff, bytes_in_rec); + if (table->in_use->really_abort_on_warning()) + set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1); + else + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; + } + /* delta is >= -1 here */ + if (delta > 0) + { + if (bit_len) + clr_rec_bits(bit_ptr, bit_ofs, bit_len); + bzero(ptr, delta); + memcpy(ptr + delta, from, length); + } + else if (delta == 0) + { + if (bit_len) + clr_rec_bits(bit_ptr, bit_ofs, bit_len); + memcpy(ptr, from, length); + } + else + { + if (bit_len) + { + set_rec_bits((uchar) *from, bit_ptr, bit_ofs, bit_len); + from++; + } + memcpy(ptr, from, bytes_in_rec); + } + return 0; +} + + +int Field_bit::store(double nr) +{ + return store((longlong) nr, FALSE); +} + + +int Field_bit::store(longlong nr, bool unsigned_val) +{ + char buf[8]; + + mi_int8store(buf, nr); + return store(buf, 8, NULL); +} + + +int Field_bit::store_decimal(const my_decimal *val) +{ + int err= 0; + longlong i= convert_decimal2longlong(val, 1, &err); + return test(err | store(i, TRUE)); +} + + +double Field_bit::val_real(void) +{ + return (double) Field_bit::val_int(); +} + + +longlong Field_bit::val_int(void) +{ + ulonglong bits= 0; + if (bit_len) + { + bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + bits<<= (bytes_in_rec * 8); + } + + switch (bytes_in_rec) { + case 0: return bits; + case 1: return bits | (ulonglong) (uchar) ptr[0]; + case 2: return bits | mi_uint2korr(ptr); + case 3: return bits | mi_uint3korr(ptr); + case 4: return bits | mi_uint4korr(ptr); + case 5: return bits | mi_uint5korr(ptr); + case 6: return bits | mi_uint6korr(ptr); + case 7: return bits | mi_uint7korr(ptr); + default: return mi_uint8korr(ptr + bytes_in_rec - sizeof(longlong)); + } +} + + +String *Field_bit::val_str(String *val_buffer, + String *val_ptr __attribute__((unused))) +{ + char buff[sizeof(longlong)]; + uint length= min(pack_length(), sizeof(longlong)); + ulonglong bits= val_int(); + mi_int8store(buff,bits); + + val_buffer->alloc(length); + memcpy_fixed((char*) val_buffer->ptr(), buff+8-length, length); + val_buffer->length(length); + val_buffer->set_charset(&my_charset_bin); + return val_buffer; +} + + +my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value) +{ + int2my_decimal(E_DEC_FATAL_ERROR, val_int(), 1, deciaml_value); + return deciaml_value; +} + + +int Field_bit::key_cmp(const byte *str, uint length) +{ + if (bit_len) + { + int flag; + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + if ((flag= (int) (bits - *(uchar*) str))) + return flag; + str++; + length--; + } + return memcmp(ptr, str, length); +} + + +int Field_bit::cmp_offset(uint row_offset) +{ + if (bit_len) + { + int flag; + uchar bits_a= get_rec_bits(bit_ptr, bit_ofs, bit_len); + uchar bits_b= get_rec_bits(bit_ptr + row_offset, bit_ofs, bit_len); + if ((flag= (int) (bits_a - bits_b))) + return flag; + } + return memcmp(ptr, ptr + row_offset, bytes_in_rec); +} + + +void Field_bit::get_key_image(char *buff, uint length, imagetype type_arg) +{ + if (bit_len) + { + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + *buff++= bits; + length--; + } + memcpy(buff, ptr, min(length, bytes_in_rec)); +} + + +void Field_bit::sql_type(String &res) const +{ + CHARSET_INFO *cs= res.charset(); + ulong length= cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(), + "bit(%d)", (int) field_length); + res.length((uint) length); +} + + +char *Field_bit::pack(char *to, const char *from, uint max_length) +{ + DBUG_ASSERT(max_length); + uint length; + if (bit_len) + { + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + *to++= bits; + } + length= min(bytes_in_rec, max_length - (bit_len > 0)); + memcpy(to, from, length); + return to + length; +} + + +const char *Field_bit::unpack(char *to, const char *from) +{ + if (bit_len) + { + set_rec_bits(*from, bit_ptr, bit_ofs, bit_len); + from++; + } + memcpy(to, from, bytes_in_rec); + return from + bytes_in_rec; +} + + +/* + Bit field support for non-MyISAM tables. +*/ + +Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg, + uchar *null_ptr_arg, uchar null_bit_arg, + enum utype unireg_check_arg, + const char *field_name_arg, + struct st_table *table_arg) + : Field_bit(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, 0, + 0, unireg_check_arg, field_name_arg, table_arg) +{ + flags|= UNSIGNED_FLAG; + bit_len= 0; + bytes_in_rec= (len_arg + 7) / 8; +} + + +int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs) +{ + int delta; + uchar bits= (uchar) (field_length & 7); + + for (; length && !*from; from++, length--); // skip left 0's + delta= bytes_in_rec - length; + + if (delta < 0 || + (delta == 0 && bits && (uint) (uchar) *from >= (uint) (1 << bits))) + { + memset(ptr, 0xff, bytes_in_rec); + if (bits) + *ptr&= ((1 << bits) - 1); /* set first byte */ + if (table->in_use->really_abort_on_warning()) + set_warning(MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DATA_TOO_LONG, 1); + else + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; + } + bzero(ptr, delta); + memcpy(ptr + delta, from, length); + return 0; +} + + +void Field_bit_as_char::sql_type(String &res) const +{ + CHARSET_INFO *cs= res.charset(); + ulong length= cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(), + "bit(%d)", (int) field_length); + res.length((uint) length); +} + + /***************************************************************************** -** Handling of field and create_field + Handling of field and create_field *****************************************************************************/ /* @@ -6527,20 +8337,440 @@ void create_field::create_length_to_internal_length(void) case MYSQL_TYPE_BLOB: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: length*= charset->mbmaxlen; - pack_length= calc_pack_length(sql_type == FIELD_TYPE_VAR_STRING ? - FIELD_TYPE_STRING : sql_type, length); + key_length= length; + pack_length= calc_pack_length(sql_type, length); break; case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: + /* Pack_length already calculated in sql_parse.cc */ length*= charset->mbmaxlen; + key_length= pack_length; + break; + case MYSQL_TYPE_BIT: + if (f_bit_as_char(pack_flag)) + { + key_length= pack_length= ((length + 7) & ~7) / 8; + } + else + { + pack_length= length / 8; + /* We need one extra byte to store the bits we save among the null bits */ + key_length= pack_length + test(length & 7); + } + break; + case MYSQL_TYPE_NEWDECIMAL: + key_length= pack_length= + my_decimal_get_binary_size(my_decimal_length_to_precision(length, + decimals, + flags & + UNSIGNED_FLAG), + decimals); break; default: - /* do nothing */ + key_length= pack_length= calc_pack_length(sql_type, length); + break; + } +} + + +void create_field::init_for_tmp_table(enum_field_types sql_type_arg, + uint32 length_arg, uint32 decimals_arg, + bool maybe_null, bool is_unsigned) +{ + field_name= ""; + sql_type= sql_type_arg; + char_length= length= length_arg;; + unireg_check= Field::NONE; + interval= 0; + charset= &my_charset_bin; + geom_type= Field::GEOM_GEOMETRY; + pack_flag= (FIELDFLAG_NUMBER | + ((decimals_arg & FIELDFLAG_MAX_DEC) << FIELDFLAG_DEC_SHIFT) | + (maybe_null ? FIELDFLAG_MAYBE_NULL : 0) | + (is_unsigned ? 0 : FIELDFLAG_DECIMAL)); +} + + +/* + Initialize field definition for create + + SYNOPSIS + thd Thread handle + fld_name Field name + fld_type Field type + fld_length Field length + fld_decimals Decimal (if any) + fld_type_modifier Additional type information + fld_default_value Field default value (if any) + fld_on_update_value The value of ON UPDATE clause + fld_comment Field comment + fld_change Field change + fld_interval_list Interval list (if any) + fld_charset Field charset + fld_geom_type Field geometry type (if any) + + RETURN + FALSE on success + TRUE on error +*/ + +bool create_field::init(THD *thd, char *fld_name, enum_field_types fld_type, + char *fld_length, char *fld_decimals, + uint fld_type_modifier, Item *fld_default_value, + Item *fld_on_update_value, LEX_STRING *fld_comment, + char *fld_change, List<String> *fld_interval_list, + CHARSET_INFO *fld_charset, uint fld_geom_type) +{ + uint sign_len, allowed_type_modifier= 0; + ulong max_field_charlength= MAX_FIELD_CHARLENGTH; + + DBUG_ENTER("create_field::init()"); + + field= 0; + field_name= fld_name; + def= fld_default_value; + flags= fld_type_modifier; + unireg_check= (fld_type_modifier & AUTO_INCREMENT_FLAG ? + Field::NEXT_NUMBER : Field::NONE); + decimals= fld_decimals ? (uint)atoi(fld_decimals) : 0; + if (decimals >= NOT_FIXED_DEC) + { + my_error(ER_TOO_BIG_SCALE, MYF(0), decimals, fld_name, + NOT_FIXED_DEC-1); + DBUG_RETURN(TRUE); + } + + sql_type= fld_type; + length= 0; + change= fld_change; + interval= 0; + pack_length= key_length= 0; + charset= fld_charset; + geom_type= (Field::geometry_type) fld_geom_type; + interval_list.empty(); + + comment= *fld_comment; + /* + Set NO_DEFAULT_VALUE_FLAG if this field doesn't have a default value and + it is NOT NULL, not an AUTO_INCREMENT field and not a TIMESTAMP. + */ + if (!fld_default_value && !(fld_type_modifier & AUTO_INCREMENT_FLAG) && + (fld_type_modifier & NOT_NULL_FLAG) && fld_type != FIELD_TYPE_TIMESTAMP) + flags|= NO_DEFAULT_VALUE_FLAG; + + if (fld_length && !(length= (uint) atoi(fld_length))) + fld_length= 0; /* purecov: inspected */ + sign_len= fld_type_modifier & UNSIGNED_FLAG ? 0 : 1; + + switch (fld_type) { + case FIELD_TYPE_TINY: + if (!fld_length) + length= MAX_TINYINT_WIDTH+sign_len; + allowed_type_modifier= AUTO_INCREMENT_FLAG; + break; + case FIELD_TYPE_SHORT: + if (!fld_length) + length= MAX_SMALLINT_WIDTH+sign_len; + allowed_type_modifier= AUTO_INCREMENT_FLAG; + break; + case FIELD_TYPE_INT24: + if (!fld_length) + length= MAX_MEDIUMINT_WIDTH+sign_len; + allowed_type_modifier= AUTO_INCREMENT_FLAG; + break; + case FIELD_TYPE_LONG: + if (!fld_length) + length= MAX_INT_WIDTH+sign_len; + allowed_type_modifier= AUTO_INCREMENT_FLAG; + break; + case FIELD_TYPE_LONGLONG: + if (!fld_length) + length= MAX_BIGINT_WIDTH; + allowed_type_modifier= AUTO_INCREMENT_FLAG; + break; + case FIELD_TYPE_NULL: + break; + case FIELD_TYPE_NEWDECIMAL: + if (!fld_length && !decimals) + length= 10; + if (length > DECIMAL_MAX_PRECISION) + { + my_error(ER_TOO_BIG_PRECISION, MYF(0), length, fld_name, + DECIMAL_MAX_PRECISION); + DBUG_RETURN(TRUE); + } + if (length < decimals) + { + my_error(ER_M_BIGGER_THAN_D, MYF(0), fld_name); + DBUG_RETURN(TRUE); + } + length= + my_decimal_precision_to_length(length, decimals, + fld_type_modifier & UNSIGNED_FLAG); + pack_length= + my_decimal_get_binary_size(length, decimals); + break; + case MYSQL_TYPE_VARCHAR: + /* + Long VARCHAR's are automaticly converted to blobs in mysql_prepare_table + if they don't have a default value + */ + max_field_charlength= MAX_FIELD_VARCHARLENGTH; + break; + case MYSQL_TYPE_STRING: + break; + case FIELD_TYPE_BLOB: + case FIELD_TYPE_TINY_BLOB: + case FIELD_TYPE_LONG_BLOB: + case FIELD_TYPE_MEDIUM_BLOB: + case FIELD_TYPE_GEOMETRY: + if (fld_default_value) + { + /* Allow empty as default value. */ + String str,*res; + res= fld_default_value->val_str(&str); + /* + A default other than '' is always an error, and any non-NULL + specified default is an error in strict mode. + */ + if (res->length() || (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))) + { + my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), + fld_name); /* purecov: inspected */ + DBUG_RETURN(TRUE); + } + else + { + /* + Otherwise a default of '' is just a warning. + */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_BLOB_CANT_HAVE_DEFAULT, + ER(ER_BLOB_CANT_HAVE_DEFAULT), + fld_name); + } + def= 0; + } + flags|= BLOB_FLAG; + break; + case FIELD_TYPE_YEAR: + if (!fld_length || length != 2) + length= 4; /* Default length */ + flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; + break; + case FIELD_TYPE_FLOAT: + /* change FLOAT(precision) to FLOAT or DOUBLE */ + allowed_type_modifier= AUTO_INCREMENT_FLAG; + if (fld_length && !fld_decimals) + { + uint tmp_length= length; + if (tmp_length > PRECISION_FOR_DOUBLE) + { + my_error(ER_WRONG_FIELD_SPEC, MYF(0), fld_name); + DBUG_RETURN(TRUE); + } + else if (tmp_length > PRECISION_FOR_FLOAT) + { + sql_type= FIELD_TYPE_DOUBLE; + length= DBL_DIG+7; /* -[digits].E+### */ + } + else + length= FLT_DIG+6; /* -[digits].E+## */ + decimals= NOT_FIXED_DEC; + break; + } + if (!fld_length && !fld_decimals) + { + length= FLT_DIG+6; + decimals= NOT_FIXED_DEC; + } + if (length < decimals && + decimals != NOT_FIXED_DEC) + { + my_error(ER_M_BIGGER_THAN_D, MYF(0), fld_name); + DBUG_RETURN(TRUE); + } break; + case FIELD_TYPE_DOUBLE: + allowed_type_modifier= AUTO_INCREMENT_FLAG; + if (!fld_length && !fld_decimals) + { + length= DBL_DIG+7; + decimals= NOT_FIXED_DEC; + } + if (length < decimals && + decimals != NOT_FIXED_DEC) + { + my_error(ER_M_BIGGER_THAN_D, MYF(0), fld_name); + DBUG_RETURN(TRUE); + } + break; + case FIELD_TYPE_TIMESTAMP: + if (!fld_length) + length= 14; /* Full date YYYYMMDDHHMMSS */ + else if (length != 19) + { + /* + We support only even TIMESTAMP lengths less or equal than 14 + and 19 as length of 4.1 compatible representation. + */ + length= ((length+1)/2)*2; /* purecov: inspected */ + length= min(length,14); /* purecov: inspected */ + } + flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; + if (fld_default_value) + { + /* Grammar allows only NOW() value for ON UPDATE clause */ + if (fld_default_value->type() == Item::FUNC_ITEM && + ((Item_func*)fld_default_value)->functype() == Item_func::NOW_FUNC) + { + unireg_check= (fld_on_update_value ? Field::TIMESTAMP_DNUN_FIELD: + Field::TIMESTAMP_DN_FIELD); + /* + We don't need default value any longer moreover it is dangerous. + Everything handled by unireg_check further. + */ + def= 0; + } + else + unireg_check= (fld_on_update_value ? Field::TIMESTAMP_UN_FIELD: + Field::NONE); + } + else + { + /* + If we have default TIMESTAMP NOT NULL column without explicit DEFAULT + or ON UPDATE values then for the sake of compatiblity we should treat + this column as having DEFAULT NOW() ON UPDATE NOW() (when we don't + have another TIMESTAMP column with auto-set option before this one) + or DEFAULT 0 (in other cases). + So here we are setting TIMESTAMP_OLD_FIELD only temporary, and will + replace this value by TIMESTAMP_DNUN_FIELD or NONE later when + information about all TIMESTAMP fields in table will be availiable. + + If we have TIMESTAMP NULL column without explicit DEFAULT value + we treat it as having DEFAULT NULL attribute. + */ + unireg_check= (fld_on_update_value ? Field::TIMESTAMP_UN_FIELD : + (flags & NOT_NULL_FLAG ? Field::TIMESTAMP_OLD_FIELD : + Field::NONE)); + } + break; + case FIELD_TYPE_DATE: + /* Old date type. */ + if (protocol_version != PROTOCOL_VERSION-1) + sql_type= FIELD_TYPE_NEWDATE; + /* fall trough */ + case FIELD_TYPE_NEWDATE: + length= 10; + break; + case FIELD_TYPE_TIME: + length= 10; + break; + case FIELD_TYPE_DATETIME: + length= 19; + break; + case FIELD_TYPE_SET: + { + if (fld_interval_list->elements > sizeof(longlong)*8) + { + my_error(ER_TOO_BIG_SET, MYF(0), fld_name); /* purecov: inspected */ + DBUG_RETURN(TRUE); + } + pack_length= get_set_pack_length(fld_interval_list->elements); + + List_iterator<String> it(*fld_interval_list); + String *tmp; + while ((tmp= it++)) + interval_list.push_back(tmp); + /* + Set fake length to 1 to pass the below conditions. + Real length will be set in mysql_prepare_table() + when we know the character set of the column + */ + length= 1; + break; + } + case FIELD_TYPE_ENUM: + { + /* Should be safe. */ + pack_length= get_enum_pack_length(fld_interval_list->elements); + + List_iterator<String> it(*fld_interval_list); + String *tmp; + while ((tmp= it++)) + interval_list.push_back(tmp); + length= 1; /* See comment for FIELD_TYPE_SET above. */ + break; + } + case MYSQL_TYPE_VAR_STRING: + DBUG_ASSERT(0); /* Impossible. */ + break; + case MYSQL_TYPE_BIT: + { + if (!fld_length) + length= 1; + if (length > MAX_BIT_FIELD_LENGTH) + { + my_error(ER_TOO_BIG_DISPLAYWIDTH, MYF(0), fld_name, + MAX_BIT_FIELD_LENGTH); + DBUG_RETURN(TRUE); + } + pack_length= (length + 7) / 8; + break; + } + case FIELD_TYPE_DECIMAL: + DBUG_ASSERT(0); /* Was obsolete */ } + /* Remember the value of length */ + char_length= length; + + if (!(flags & BLOB_FLAG) && + ((length > max_field_charlength && fld_type != FIELD_TYPE_SET && + fld_type != FIELD_TYPE_ENUM && + (fld_type != MYSQL_TYPE_VARCHAR || fld_default_value)) || + (!length && + fld_type != MYSQL_TYPE_STRING && + fld_type != MYSQL_TYPE_VARCHAR && fld_type != FIELD_TYPE_GEOMETRY))) + { + my_error((fld_type == MYSQL_TYPE_VAR_STRING || + fld_type == MYSQL_TYPE_VARCHAR || + fld_type == MYSQL_TYPE_STRING) ? ER_TOO_BIG_FIELDLENGTH : + ER_TOO_BIG_DISPLAYWIDTH, + MYF(0), + fld_name, max_field_charlength); /* purecov: inspected */ + DBUG_RETURN(TRUE); + } + fld_type_modifier&= AUTO_INCREMENT_FLAG; + if ((~allowed_type_modifier) & fld_type_modifier) + { + my_error(ER_WRONG_FIELD_SPEC, MYF(0), fld_name); + DBUG_RETURN(TRUE); + } + + DBUG_RETURN(FALSE); /* success */ +} + + +enum_field_types get_blob_type_from_length(ulong length) +{ + enum_field_types type; + if (length < 256) + type= FIELD_TYPE_TINY_BLOB; + else if (length < 65536) + type= FIELD_TYPE_BLOB; + else if (length < 256L*256L*256L) + type= FIELD_TYPE_MEDIUM_BLOB; + else + type= FIELD_TYPE_LONG_BLOB; + return type; } + /* Make a field from the .frm file info */ @@ -6548,9 +8778,10 @@ void create_field::create_length_to_internal_length(void) uint32 calc_pack_length(enum_field_types type,uint32 length) { switch (type) { - case FIELD_TYPE_STRING: - case FIELD_TYPE_DECIMAL: return (length); - case FIELD_TYPE_VAR_STRING: return (length+HA_KEY_BLOB_LENGTH); + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case FIELD_TYPE_DECIMAL: return (length); + case MYSQL_TYPE_VARCHAR: return (length + (length < 256 ? 1: 2)); case FIELD_TYPE_YEAR: case FIELD_TYPE_TINY : return 1; case FIELD_TYPE_SHORT : return 2; @@ -6571,10 +8802,13 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) case FIELD_TYPE_LONG_BLOB: return 4+portable_sizeof_char_ptr; case FIELD_TYPE_GEOMETRY: return 4+portable_sizeof_char_ptr; case FIELD_TYPE_SET: - case FIELD_TYPE_ENUM: abort(); return 0; // This shouldn't happen - default: return 0; + case FIELD_TYPE_ENUM: + case FIELD_TYPE_NEWDECIMAL: + abort(); return 0; // This shouldn't happen + case FIELD_TYPE_BIT: return length / 8; + default: + return 0; } - return 0; // Keep compiler happy } @@ -6602,11 +8836,30 @@ Field *make_field(char *ptr, uint32 field_length, const char *field_name, struct st_table *table) { + uchar *bit_ptr; + uchar bit_offset; + LINT_INIT(bit_ptr); + LINT_INIT(bit_offset); + if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag)) + { + bit_ptr= null_pos; + bit_offset= null_bit; + if (f_maybe_null(pack_flag)) // if null field + { + bit_ptr+= (null_bit == 7); // shift bit_ptr and bit_offset + bit_offset= (bit_offset + 1) & 7; + } + } + if (!f_maybe_null(pack_flag)) { null_pos=0; null_bit=0; } + else + { + null_bit= ((uchar) 1) << null_bit; + } switch (field_type) { @@ -6623,12 +8876,18 @@ Field *make_field(char *ptr, uint32 field_length, { if (!f_is_packed(pack_flag)) { - if (field_type == FIELD_TYPE_STRING || + if (field_type == MYSQL_TYPE_STRING || field_type == FIELD_TYPE_DECIMAL || // 3.23 or 4.0 string - field_type == FIELD_TYPE_VAR_STRING) + field_type == MYSQL_TYPE_VAR_STRING) return new Field_string(ptr,field_length,null_pos,null_bit, unireg_check, field_name, table, field_charset); + if (field_type == MYSQL_TYPE_VARCHAR) + return new Field_varstring(ptr,field_length, + HA_VARCHAR_PACKLENGTH(field_length), + null_pos,null_bit, + unireg_check, field_name, table, + field_charset); return 0; // Error } @@ -6666,6 +8925,12 @@ Field *make_field(char *ptr, uint32 field_length, f_decimals(pack_flag), f_is_zerofill(pack_flag) != 0, f_is_dec(pack_flag) == 0); + case FIELD_TYPE_NEWDECIMAL: + return new Field_new_decimal(ptr,field_length,null_pos,null_bit, + unireg_check, field_name, table, + f_decimals(pack_flag), + f_is_zerofill(pack_flag) != 0, + f_is_dec(pack_flag) == 0); case FIELD_TYPE_FLOAT: return new Field_float(ptr,field_length,null_pos,null_bit, unireg_check, field_name, table, @@ -6724,6 +8989,12 @@ Field *make_field(char *ptr, uint32 field_length, unireg_check, field_name, table, field_charset); case FIELD_TYPE_NULL: return new Field_null(ptr,field_length,unireg_check,field_name,table, field_charset); + case FIELD_TYPE_BIT: + return f_bit_as_char(pack_flag) ? + new Field_bit_as_char(ptr, field_length, null_pos, null_bit, + unireg_check, field_name, table) : + new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr, + bit_offset, unireg_check, field_name, table); default: // Impossible (Wrong version) break; } @@ -6741,84 +9012,119 @@ create_field::create_field(Field *old_field,Field *orig_field) flags= old_field->flags; unireg_check=old_field->unireg_check; pack_length=old_field->pack_length(); + key_length= old_field->key_length(); sql_type= old_field->real_type(); charset= old_field->charset(); // May be NULL ptr comment= old_field->comment; + decimals= old_field->decimals(); /* Fix if the original table had 4 byte pointer blobs */ if (flags & BLOB_FLAG) - pack_length= (pack_length- old_field->table->blob_ptr_size + + pack_length= (pack_length- old_field->table->s->blob_ptr_size + portable_sizeof_char_ptr); - switch (sql_type) - { - case FIELD_TYPE_BLOB: - switch (pack_length - portable_sizeof_char_ptr) - { - case 1: sql_type= FIELD_TYPE_TINY_BLOB; break; - case 2: sql_type= FIELD_TYPE_BLOB; break; - case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break; - default: sql_type= FIELD_TYPE_LONG_BLOB; break; - } - length=(length+charset->mbmaxlen-1)/charset->mbmaxlen; // QQ: Probably not needed - break; - case MYSQL_TYPE_ENUM: - case MYSQL_TYPE_SET: - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: - length=(length+charset->mbmaxlen-1)/charset->mbmaxlen; - break; - default: - break; - } - - char_length= length; - decimals= old_field->decimals(); - if (sql_type == FIELD_TYPE_STRING) - { + switch (sql_type) { + case FIELD_TYPE_BLOB: + switch (pack_length - portable_sizeof_char_ptr) { + case 1: sql_type= FIELD_TYPE_TINY_BLOB; break; + case 2: sql_type= FIELD_TYPE_BLOB; break; + case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break; + default: sql_type= FIELD_TYPE_LONG_BLOB; break; + } + length/= charset->mbmaxlen; + key_length/= charset->mbmaxlen; + break; + case MYSQL_TYPE_STRING: /* Change CHAR -> VARCHAR if dynamic record length */ - sql_type=old_field->type(); - decimals=0; + if (old_field->type() == MYSQL_TYPE_VAR_STRING) + sql_type= MYSQL_TYPE_VARCHAR; + /* fall through */ + + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_VAR_STRING: + /* This is corrected in create_length_to_internal_length */ + length= (length+charset->mbmaxlen-1) / charset->mbmaxlen; + break; +#ifdef HAVE_SPATIAL + case FIELD_TYPE_GEOMETRY: + geom_type= ((Field_geom*)old_field)->geom_type; + break; +#endif + default: + break; } + if (flags & (ENUM_FLAG | SET_FLAG)) interval= ((Field_enum*) old_field)->typelib; else interval=0; def=0; - if (!old_field->is_real_null() && ! (flags & BLOB_FLAG) && - old_field->ptr && orig_field) + char_length= length; + + if (!(flags & (NO_DEFAULT_VALUE_FLAG | BLOB_FLAG)) && + old_field->ptr && orig_field && + (sql_type != FIELD_TYPE_TIMESTAMP || /* set def only if */ + old_field->table->timestamp_field != old_field || /* timestamp field */ + unireg_check == Field::TIMESTAMP_UN_FIELD)) /* has default val */ { - char buff[MAX_FIELD_WIDTH],*pos; - String tmp(buff,sizeof(buff), charset); + my_ptrdiff_t diff; /* Get the value from default_values */ - my_ptrdiff_t diff= (my_ptrdiff_t) (orig_field->table->rec_buff_length*2); + diff= (my_ptrdiff_t) (orig_field->table->s->default_values- + orig_field->table->record[0]); orig_field->move_field(diff); // Points now at default_values - bool is_null=orig_field->is_real_null(); - orig_field->val_str(&tmp); - orig_field->move_field(-diff); // Back to record[0] - if (!is_null) + if (!orig_field->is_real_null()) { - pos= (char*) sql_memdup(tmp.ptr(),tmp.length()+1); - pos[tmp.length()]=0; - def= new Item_string(pos, tmp.length(), charset); + char buff[MAX_FIELD_WIDTH],*pos; + String tmp(buff,sizeof(buff), charset), *res; + res= orig_field->val_str(&tmp); + pos= (char*) sql_strmake(res->ptr(), res->length()); + def= new Item_string(pos, res->length(), charset); } + orig_field->move_field(-diff); // Back to record[0] } -#ifdef HAVE_SPATIAL - if (sql_type == FIELD_TYPE_GEOMETRY) +} + + +/* + maximum possible display length for blob + + SYNOPSIS + Field_blob::max_display_length() + + RETURN + length +*/ + +uint32 Field_blob::max_display_length() +{ + switch (packlength) { - geom_type= ((Field_geom*)old_field)->geom_type; + case 1: + return 255 * field_charset->mbmaxlen; + case 2: + return 65535 * field_charset->mbmaxlen; + case 3: + return 16777215 * field_charset->mbmaxlen; + case 4: + return (uint32) 4294967295U; + default: + DBUG_ASSERT(0); // we should never go here + return 0; } -#endif } -/* Warning handling */ +/***************************************************************************** + Warning handling +*****************************************************************************/ /* Produce warning or note about data saved into field - SYNOPSYS + SYNOPSIS set_warning() level - level of message (Note/Warning/Error) code - error code of message to be produced @@ -6829,11 +9135,13 @@ create_field::create_field(Field *old_field,Field *orig_field) if count_cuted_fields == FIELD_CHECK_IGNORE for current thread. RETURN VALUE - true - if count_cuted_fields == FIELD_CHECK_IGNORE - false - otherwise + 1 if count_cuted_fields == FIELD_CHECK_IGNORE + 0 otherwise */ + bool -Field::set_warning(const uint level, const uint code, int cuted_increment) +Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code, + int cuted_increment) { /* If this field was created only for type conversion purposes it @@ -6843,8 +9151,8 @@ Field::set_warning(const uint level, const uint code, int cuted_increment) if (thd->count_cuted_fields) { thd->cuted_fields+= cuted_increment; - push_warning_printf(thd, (MYSQL_ERROR::enum_warning_level) level, - code, ER(code), field_name, thd->row_count); + push_warning_printf(thd, level, code, ER(code), field_name, + thd->row_count); return 0; } return 1; @@ -6854,8 +9162,8 @@ Field::set_warning(const uint level, const uint code, int cuted_increment) /* Produce warning or note about datetime string data saved into field - SYNOPSYS - set_warning() + SYNOPSIS + set_datime_warning() level - level of message (Note/Warning/Error) code - error code of message to be produced str - string value which we tried to save @@ -6868,21 +9176,24 @@ Field::set_warning(const uint level, const uint code, int cuted_increment) fields counter if count_cuted_fields == FIELD_CHECK_IGNORE for current thread. */ + void -Field::set_datetime_warning(const uint level, const uint code, +Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level, uint code, const char *str, uint str_length, timestamp_type ts_type, int cuted_increment) { - if (set_warning(level, code, cuted_increment)) - make_truncated_value_warning(table ? table->in_use : current_thd, - str, str_length, ts_type); + THD *thd= table ? table->in_use : current_thd; + if (thd->really_abort_on_warning() || + set_warning(level, code, cuted_increment)) + make_truncated_value_warning(thd, str, str_length, ts_type, + field_name); } /* Produce warning or note about integer datetime value saved into field - SYNOPSYS + SYNOPSIS set_warning() level - level of message (Note/Warning/Error) code - error code of message to be produced @@ -6895,17 +9206,20 @@ Field::set_datetime_warning(const uint level, const uint code, fields counter if count_cuted_fields == FIELD_CHECK_IGNORE for current thread. */ + void -Field::set_datetime_warning(const uint level, const uint code, +Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level, uint code, longlong nr, timestamp_type ts_type, int cuted_increment) { - if (set_warning(level, code, cuted_increment)) + THD *thd= table ? table->in_use : current_thd; + if (thd->really_abort_on_warning() || + set_warning(level, code, cuted_increment)) { char str_nr[22]; char *str_end= longlong10_to_str(nr, str_nr, -10); - make_truncated_value_warning(table ? table->in_use : current_thd, - str_nr, str_end - str_nr, ts_type); + make_truncated_value_warning(thd, str_nr, (uint) (str_end - str_nr), + ts_type, field_name); } } @@ -6913,7 +9227,7 @@ Field::set_datetime_warning(const uint level, const uint code, /* Produce warning or note about double datetime data saved into field - SYNOPSYS + SYNOPSIS set_warning() level - level of message (Note/Warning/Error) code - error code of message to be produced @@ -6925,43 +9239,20 @@ Field::set_datetime_warning(const uint level, const uint code, fields counter if count_cuted_fields == FIELD_CHECK_IGNORE for current thread. */ + void -Field::set_datetime_warning(const uint level, const uint code, +Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level, uint code, double nr, timestamp_type ts_type) { - if (set_warning(level, code, 1)) + THD *thd= table ? table->in_use : current_thd; + if (thd->really_abort_on_warning() || + set_warning(level, code, 1)) { /* DBL_DIG is enough to print '-[digits].E+###' */ char str_nr[DBL_DIG + 8]; uint str_len= my_sprintf(str_nr, (str_nr, "%g", nr)); - make_truncated_value_warning(table ? table->in_use : current_thd, - str_nr, str_len, ts_type); + make_truncated_value_warning(thd, str_nr, str_len, ts_type, + field_name); } } -/* - maximum possible display length for blob - - SYNOPSIS - Field_blob::max_length() - - RETURN - length -*/ -uint32 Field_blob::max_length() -{ - switch (packlength) - { - case 1: - return 255 * field_charset->mbmaxlen; - case 2: - return 65535 * field_charset->mbmaxlen; - case 3: - return 16777215 * field_charset->mbmaxlen; - case 4: - return (uint32) 4294967295U; - default: - DBUG_ASSERT(0); // we should never go here - return 0; - } -} diff --git a/sql/field.h b/sql/field.h index 58177747120..e82e90ce5ab 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -30,7 +29,7 @@ class Send_field; class Protocol; struct st_cache_field; -void field_conv(Field *to,Field *from); +int field_conv(Field *to,Field *from); inline uint get_enum_pack_length(int elements) { @@ -49,11 +48,7 @@ class Field void operator=(Field &); public: static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } - static void operator delete(void *ptr_arg, size_t size) { -#ifdef SAFEMALLOC - bfill(ptr_arg, size, 0x8F); -#endif - } + static void operator delete(void *ptr_arg, size_t size) { TRASH(ptr_arg, size); } char *ptr; // Position to field in record uchar *null_ptr; // Byte where null_bit is @@ -63,9 +58,9 @@ public: */ struct st_table *table; // Pointer for table struct st_table *orig_table; // Pointer to original table - const char *table_name,*field_name; + const char **table_name, *field_name; LEX_STRING comment; - ulong query_id; // For quick test of used fields + query_id_t query_id; // For quick test of used fields /* Field is part of the following keys */ key_map key_start,part_of_key,part_of_sortkey; /* @@ -90,6 +85,7 @@ public: utype unireg_check; uint32 field_length; // Length of field + uint field_index; // field number in fields array uint16 flags; uchar null_bit; // Bit used to test null bit @@ -100,10 +96,12 @@ public: /* Store functions returns 1 on overflow and -1 on fatal error */ virtual int store(const char *to,uint length,CHARSET_INFO *cs)=0; virtual int store(double nr)=0; - virtual int store(longlong nr)=0; - virtual void store_time(TIME *ltime,timestamp_type t_type); + virtual int store(longlong nr, bool unsigned_val)=0; + virtual int store_decimal(const my_decimal *d)=0; + virtual int store_time(TIME *ltime, timestamp_type t_type); virtual double val_real(void)=0; virtual longlong val_int(void)=0; + virtual my_decimal *val_decimal(my_decimal *); inline String *val_str(String *str) { return val_str(str, str); } /* val_str(buf1, buf2) gets two buffers and should use them as follows: @@ -118,24 +116,49 @@ public: This trickery is used to decrease a number of malloc calls. */ virtual String *val_str(String*,String *)=0; + String *val_int_as_str(String *val_buffer, my_bool unsigned_flag); virtual Item_result result_type () const=0; virtual Item_result cmp_type () const { return result_type(); } virtual Item_result cast_to_int_type () const { return result_type(); } + static bool type_can_have_key_part(enum_field_types); static enum_field_types field_type_merge(enum_field_types, enum_field_types); static Item_result result_merge_type(enum_field_types); - bool eq(Field *field) { return ptr == field->ptr && null_ptr == field->null_ptr; } + virtual bool eq(Field *field) + { + return (ptr == field->ptr && null_ptr == field->null_ptr && + null_bit == field->null_bit); + } virtual bool eq_def(Field *field); + + /* + pack_length() returns size (in bytes) used to store field data in memory + (i.e. it returns the maximum size of the field in a row of the table, + which is located in RAM). + */ virtual uint32 pack_length() const { return (uint32) field_length; } + + /* + pack_length_in_rec() returns size (in bytes) used to store field data on + storage (i.e. it returns the maximal size of the field in a row of the + table, which is located on disk). + */ + virtual uint32 pack_length_in_rec() const { return pack_length(); } + + /* + data_length() return the "real size" of the data in memory. + */ + virtual uint32 data_length(const char *from) { return pack_length(); } + virtual uint32 sort_length() const { return pack_length(); } virtual int reset(void) { bzero(ptr,pack_length()); return 0; } virtual void reset_fields() {} virtual void set_default() { - my_ptrdiff_t offset = (my_ptrdiff_t) (table->default_values - + my_ptrdiff_t l_offset= (my_ptrdiff_t) (table->s->default_values - table->record[0]); - memcpy(ptr, ptr + offset, pack_length()); + memcpy(ptr, ptr + l_offset, pack_length()); if (null_ptr) *null_ptr= ((*null_ptr & (uchar) ~null_bit) | - null_ptr[offset] & null_bit); + null_ptr[l_offset] & null_bit); } virtual bool binary() const { return 1; } virtual bool zero_pack() const { return 1; } @@ -148,9 +171,9 @@ public: virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L) { return memcmp(a,b,pack_length()); } virtual int cmp_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } + { return cmp(ptr,ptr+row_offset); } virtual int cmp_binary_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } + { return cmp_binary(ptr, ptr+row_offset); }; virtual int key_cmp(const byte *a,const byte *b) { return cmp((char*) a,(char*) b); } virtual int key_cmp(const byte *str, uint length) @@ -180,7 +203,7 @@ public: { if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; } inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; } inline bool real_maybe_null(void) { return null_ptr != 0; } - virtual void make_field(Send_field *)=0; + virtual void make_field(Send_field *); virtual void sort_string(char *buff,uint length)=0; virtual bool optimize_range(uint idx, uint part); /* @@ -192,33 +215,17 @@ public: */ virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} - Field *new_field(MEM_ROOT *root, struct st_table *new_table) - { - Field *tmp= (Field*) memdup_root(root,(char*) this,size_of()); - if (tmp) - { - if (tmp->table->maybe_null) - tmp->flags&= ~NOT_NULL_FLAG; - tmp->table= new_table; - tmp->key_start.init(0); - tmp->part_of_key.init(0); - tmp->part_of_sortkey.init(0); - tmp->unireg_check=Field::NONE; - tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | - ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); -#ifdef PROBABLY_WRONG - tmp->table_name= new_table->table_name; -#endif - tmp->reset_fields(); - } - return tmp; - } + virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type); + virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) { ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg; } inline void move_field(char *ptr_arg) { ptr=ptr_arg; } - inline void move_field(my_ptrdiff_t ptr_diff) + virtual inline void move_field(my_ptrdiff_t ptr_diff) { ptr=ADD_TO_PTR(ptr,ptr_diff,char*); if (null_ptr) @@ -228,11 +235,10 @@ public: { memcpy(buff,ptr,length); } inline void set_image(char *buff,uint length, CHARSET_INFO *cs) { memcpy(ptr,buff,length); } - virtual void get_key_image(char *buff,uint length, CHARSET_INFO *cs, - imagetype type) - { get_image(buff,length,cs); } - virtual void set_key_image(char *buff,uint length, CHARSET_INFO *cs) - { set_image(buff,length,cs); } + virtual void get_key_image(char *buff, uint length, imagetype type_arg) + { get_image(buff,length, &my_charset_bin); } + virtual void set_key_image(char *buff,uint length) + { set_image(buff,length, &my_charset_bin); } inline longlong val_int_offset(uint row_offset) { ptr+=row_offset; @@ -240,6 +246,15 @@ public: ptr-=row_offset; return tmp; } + + inline String *val_str(String *str, char *new_ptr) + { + char *old_ptr= ptr; + ptr= new_ptr; + val_str(str); + ptr= old_ptr; + return str; + } virtual bool send_binary(Protocol *protocol); virtual char *pack(char* to, const char *from, uint max_length=~(uint) 0) { @@ -271,9 +286,11 @@ public: virtual uint max_packed_col_length(uint max_length) { return max_length;} - virtual int pack_cmp(const char *a,const char *b, uint key_length_arg) + virtual int pack_cmp(const char *a,const char *b, uint key_length_arg, + my_bool insert_or_update) { return cmp(a,b); } - virtual int pack_cmp(const char *b, uint key_length_arg) + virtual int pack_cmp(const char *b, uint key_length_arg, + my_bool insert_or_update) { return cmp(ptr,b); } uint offset(); // Should be inline ... void copy_from_tmp(int offset); @@ -283,19 +300,38 @@ public: virtual CHARSET_INFO *charset(void) const { return &my_charset_bin; } virtual CHARSET_INFO *sort_charset(void) const { return charset(); } virtual bool has_charset(void) const { return FALSE; } - virtual void set_charset(CHARSET_INFO *charset) { } - bool set_warning(const unsigned int level, const unsigned int code, + virtual void set_charset(CHARSET_INFO *charset_arg) { } + virtual enum Derivation derivation(void) const + { return DERIVATION_IMPLICIT; } + virtual void set_derivation(enum Derivation derivation_arg) { } + bool set_warning(MYSQL_ERROR::enum_warning_level, unsigned int code, int cuted_increment); - void set_datetime_warning(const uint level, const uint code, + bool check_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs); + void set_datetime_warning(MYSQL_ERROR::enum_warning_level, uint code, const char *str, uint str_len, timestamp_type ts_type, int cuted_increment); - void set_datetime_warning(const uint level, const uint code, + void set_datetime_warning(MYSQL_ERROR::enum_warning_level, uint code, longlong nr, timestamp_type ts_type, int cuted_increment); - void set_datetime_warning(const uint level, const uint code, + void set_datetime_warning(MYSQL_ERROR::enum_warning_level, const uint code, double nr, timestamp_type ts_type); + inline bool check_overflow(int op_result) + { + return (op_result == E_DEC_OVERFLOW); + } + int warn_if_overflow(int op_result); /* maximum possible display length */ - virtual uint32 max_length()= 0; + virtual uint32 max_display_length()= 0; + /* convert decimal to longlong with overflow check */ + longlong convert_decimal2longlong(const my_decimal *val, bool unsigned_flag, + int *err); + /* The max. number of characters */ + inline uint32 char_length() const + { + return field_length / charset()->mbmaxlen; + } + friend bool reopen_table(THD *,struct st_table *,bool); friend int cre_myisam(my_string name, register TABLE *form, uint options, ulonglong auto_increment_value); @@ -322,16 +358,7 @@ public: uchar null_bit_arg, utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, - uint8 dec_arg,bool zero_arg,bool unsigned_arg) - :Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg), - dec(dec_arg),zerofill(zero_arg),unsigned_flag(unsigned_arg) - { - if (zerofill) - flags|=ZEROFILL_FLAG; - if (unsigned_flag) - flags|=UNSIGNED_FLAG; - } + uint8 dec_arg, bool zero_arg, bool unsigned_arg); Item_result result_type () const { return REAL_RESULT; } void prepend_zeros(String *value); void add_zerofill_and_unsigned(String &res) const; @@ -340,49 +367,85 @@ public: uint decimals() const { return (uint) dec; } uint size_of() const { return sizeof(*this); } bool eq_def(Field *field); + int store_decimal(const my_decimal *); + my_decimal *val_decimal(my_decimal *); }; class Field_str :public Field { protected: CHARSET_INFO *field_charset; + enum Derivation field_derivation; public: Field_str(char *ptr_arg,uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, utype unireg_check_arg, const char *field_name_arg, - struct st_table *table_arg,CHARSET_INFO *charset) - :Field(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg) - { - field_charset=charset; - if (charset->state & MY_CS_BINSORT) - flags|=BINARY_FLAG; - } + struct st_table *table_arg, CHARSET_INFO *charset); Item_result result_type () const { return STRING_RESULT; } uint decimals() const { return NOT_FIXED_DEC; } int store(double nr); - int store(longlong nr)=0; + int store(longlong nr, bool unsigned_val)=0; + int store_decimal(const my_decimal *); int store(const char *to,uint length,CHARSET_INFO *cs)=0; - void make_field(Send_field *); uint size_of() const { return sizeof(*this); } CHARSET_INFO *charset(void) const { return field_charset; } - void set_charset(CHARSET_INFO *charset) { field_charset=charset; } + void set_charset(CHARSET_INFO *charset_arg) { field_charset= charset_arg; } + enum Derivation derivation(void) const { return field_derivation; } + virtual void set_derivation(enum Derivation derivation_arg) + { field_derivation= derivation_arg; } bool binary() const { return field_charset == &my_charset_bin; } - uint32 max_length() { return field_length; } + uint32 max_display_length() { return field_length; } friend class create_field; + my_decimal *val_decimal(my_decimal *); +}; + + +/* base class for Field_string, Field_varstring and Field_blob */ + +class Field_longstr :public Field_str +{ +public: + Field_longstr(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, utype unireg_check_arg, + const char *field_name_arg, + struct st_table *table_arg, CHARSET_INFO *charset_arg) + :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, + field_name_arg, table_arg, charset_arg) + {} + + int store_decimal(const my_decimal *d); }; +/* base class for float and double and decimal (old one) */ +class Field_real :public Field_num { +public: + + Field_real(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, utype unireg_check_arg, + const char *field_name_arg, + struct st_table *table_arg, + uint8 dec_arg, bool zero_arg, bool unsigned_arg) + :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, unireg_check_arg, + field_name_arg, table_arg, dec_arg, zero_arg, unsigned_arg) + {} -class Field_decimal :public Field_num { + + int store_decimal(const my_decimal *); + my_decimal *val_decimal(my_decimal *); + uint32 max_display_length() { return field_length; } +}; + + +class Field_decimal :public Field_real { public: Field_decimal(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, uint8 dec_arg,bool zero_arg,bool unsigned_arg) - :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, - dec_arg, zero_arg,unsigned_arg) + :Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg, + dec_arg, zero_arg, unsigned_arg) {} enum_field_types type() const { return FIELD_TYPE_DECIMAL;} enum ha_base_keytype key_type() const @@ -390,7 +453,7 @@ public: int reset(void); int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); double val_real(void); longlong val_int(void); String *val_str(String*,String *); @@ -399,7 +462,52 @@ public: void overflow(bool negative); bool zero_pack() const { return 0; } void sql_type(String &str) const; - uint32 max_length() { return field_length; } +}; + + +/* New decimal/numeric field which use fixed point arithmetic */ +class Field_new_decimal :public Field_num { +public: + /* The maximum number of decimal digits can be stored */ + uint precision; + uint bin_size; + /* + Constructors take max_length of the field as a parameter - not the + precision as the number of decimal digits allowed. + So for example we need to count length from precision handling + CREATE TABLE ( DECIMAL(x,y)) + */ + Field_new_decimal(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg, + uint8 dec_arg, bool zero_arg, bool unsigned_arg); + Field_new_decimal(uint32 len_arg, bool maybe_null_arg, + const char *field_name_arg, + struct st_table *table_arg, uint8 dec_arg, + bool unsigned_arg); + enum_field_types type() const { return FIELD_TYPE_NEWDECIMAL;} + enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; } + Item_result result_type () const { return DECIMAL_RESULT; } + int reset(void); + bool store_value(const my_decimal *decimal_value); + void set_value_on_overflow(my_decimal *decimal_value, bool sign); + int store(const char *to, uint length, CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr, bool unsigned_val); + int store_time(TIME *ltime, timestamp_type t_type); + int store_decimal(const my_decimal *); + double val_real(void); + longlong val_int(void); + my_decimal *val_decimal(my_decimal *); + String *val_str(String*, String *); + int cmp(const char *, const char*); + void sort_string(char *buff, uint length); + bool zero_pack() const { return 0; } + void sql_type(String &str) const; + uint32 max_display_length() { return field_length; } + uint size_of() const { return sizeof(*this); } + uint32 pack_length() const { return (uint32) bin_size; } }; @@ -420,7 +528,7 @@ public: { return unsigned_flag ? HA_KEYTYPE_BINARY : HA_KEYTYPE_INT8; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=0; return 0; } double val_real(void); longlong val_int(void); @@ -430,7 +538,7 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return 1; } void sql_type(String &str) const; - uint32 max_length() { return 4; } + uint32 max_display_length() { return 4; } }; @@ -456,7 +564,7 @@ public: { return unsigned_flag ? HA_KEYTYPE_USHORT_INT : HA_KEYTYPE_SHORT_INT;} int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=ptr[1]=0; return 0; } double val_real(void); longlong val_int(void); @@ -466,7 +574,7 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return 2; } void sql_type(String &str) const; - uint32 max_length() { return 6; } + uint32 max_display_length() { return 6; } }; @@ -485,9 +593,9 @@ public: enum_field_types type() const { return FIELD_TYPE_INT24;} enum ha_base_keytype key_type() const { return unsigned_flag ? HA_KEYTYPE_UINT24 : HA_KEYTYPE_INT24; } - int store(const char *to,uint length,CHARSET_INFO *charset); - int store(double nr); - int store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=ptr[1]=ptr[2]=0; return 0; } double val_real(void); longlong val_int(void); @@ -497,7 +605,7 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return 3; } void sql_type(String &str) const; - uint32 max_length() { return 8; } + uint32 max_display_length() { return 8; } }; @@ -523,7 +631,7 @@ public: { return unsigned_flag ? HA_KEYTYPE_ULONG_INT : HA_KEYTYPE_LONG_INT; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; return 0; } double val_real(void); longlong val_int(void); @@ -533,7 +641,7 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return 4; } void sql_type(String &str) const; - uint32 max_length() { return 11; } + uint32 max_display_length() { return 11; } }; @@ -561,7 +669,7 @@ public: { return unsigned_flag ? HA_KEYTYPE_ULONGLONG : HA_KEYTYPE_LONGLONG; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; @@ -576,31 +684,32 @@ public: uint32 pack_length() const { return 8; } void sql_type(String &str) const; bool can_be_compared_as_longlong() const { return TRUE; } - uint32 max_length() { return 20; } + uint32 max_display_length() { return 20; } }; #endif -class Field_float :public Field_num { + +class Field_float :public Field_real { public: Field_float(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, - uint8 dec_arg,bool zero_arg,bool unsigned_arg) - :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, - dec_arg, zero_arg,unsigned_arg) + uint8 dec_arg,bool zero_arg,bool unsigned_arg) + :Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg, + dec_arg, zero_arg, unsigned_arg) {} Field_float(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, uint8 dec_arg) - :Field_num((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0, - NONE, field_name_arg, table_arg,dec_arg,0,0) + :Field_real((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, (uint) 0, + NONE, field_name_arg, table_arg, dec_arg, 0, 0) {} enum_field_types type() const { return FIELD_TYPE_FLOAT;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_FLOAT; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { bzero(ptr,sizeof(float)); return 0; } double val_real(void); longlong val_int(void); @@ -610,11 +719,10 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(float); } void sql_type(String &str) const; - uint32 max_length() { return 24; } }; -class Field_double :public Field_num { +class Field_double :public Field_real { public: my_bool not_fixed; Field_double(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, @@ -622,28 +730,28 @@ public: enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, uint8 dec_arg,bool zero_arg,bool unsigned_arg) - :Field_num(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, - dec_arg, zero_arg, unsigned_arg), + :Field_real(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg, + dec_arg, zero_arg, unsigned_arg), not_fixed(dec_arg >= NOT_FIXED_DEC) {} Field_double(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, uint8 dec_arg) - :Field_num((char *) 0, len_arg, maybe_null_arg ? (uchar *) "" : 0, (uint) 0, - NONE, field_name_arg, table_arg,dec_arg, 0, 0), + :Field_real((char*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, (uint) 0, + NONE, field_name_arg, table_arg, dec_arg, 0, 0), not_fixed(dec_arg >= NOT_FIXED_DEC) {} Field_double(uint32 len_arg, bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, uint8 dec_arg, my_bool not_fixed_srg) - :Field_num((char *) 0, len_arg, maybe_null_arg ? (uchar *) "" : 0, (uint) 0, - NONE, field_name_arg, table_arg, dec_arg, 0, 0), + :Field_real((char*) 0, len_arg, maybe_null_arg ? (uchar*) "" : 0, (uint) 0, + NONE, field_name_arg, table_arg, dec_arg, 0, 0), not_fixed(not_fixed_srg) {} enum_field_types type() const { return FIELD_TYPE_DOUBLE;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_DOUBLE; } - int store(const char *to,uint length,CHARSET_INFO *charset); - int store(double nr); - int store(longlong nr); + int store(const char *to,uint length,CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr, bool unsigned_val); int reset(void) { bzero(ptr,sizeof(double)); return 0; } double val_real(void); longlong val_int(void); @@ -653,12 +761,11 @@ public: void sort_string(char *buff,uint length); uint32 pack_length() const { return sizeof(double); } void sql_type(String &str) const; - uint32 max_length() { return 53; } uint size_of() const { return sizeof(*this); } }; -/* Everything saved in this will disapper. It will always return NULL */ +/* Everything saved in this will disappear. It will always return NULL */ class Field_null :public Field_str { static uchar null[1]; @@ -673,10 +780,12 @@ public: int store(const char *to, uint length, CHARSET_INFO *cs) { null[0]=1; return 0; } int store(double nr) { null[0]=1; return 0; } - int store(longlong nr) { null[0]=1; return 0; } + int store(longlong nr, bool unsigned_val) { null[0]=1; return 0; } + int store_decimal(const my_decimal *d) { null[0]=1; return 0; } int reset(void) { return 0; } double val_real(void) { return 0.0;} longlong val_int(void) { return 0;} + my_decimal *val_decimal(my_decimal *) { return 0; } String *val_str(String *value,String *value2) { value2->length(0); return value2;} int cmp(const char *a, const char *b) { return 0;} @@ -684,7 +793,7 @@ public: uint32 pack_length() const { return 0; } void sql_type(String &str) const; uint size_of() const { return sizeof(*this); } - uint32 max_length() { return 4; } + uint32 max_display_length() { return 4; } }; @@ -695,12 +804,14 @@ public: enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs); + Field_timestamp(bool maybe_null_arg, const char *field_name_arg, + struct st_table *table_arg, CHARSET_INFO *cs); enum_field_types type() const { return FIELD_TYPE_TIMESTAMP;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_ULONG_INT; } enum Item_result cmp_type () const { return INT_RESULT; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; return 0; } double val_real(void); longlong val_int(void); @@ -727,7 +838,7 @@ public: if ((*null_value= is_null())) return 0; #ifdef WORDS_BIGENDIAN - if (table && table->db_low_byte_first) + if (table && table->s->db_low_byte_first) return sint4korr(ptr); #endif long tmp; @@ -752,7 +863,7 @@ public: enum_field_types type() const { return FIELD_TYPE_YEAR;} int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); double val_real(void); longlong val_int(void); String *val_str(String*,String *); @@ -779,7 +890,7 @@ public: enum Item_result cmp_type () const { return INT_RESULT; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=0; return 0; } double val_real(void); longlong val_int(void); @@ -807,8 +918,8 @@ public: enum Item_result cmp_type () const { return INT_RESULT; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); - void store_time(TIME *ltime,timestamp_type type); + int store(longlong nr, bool unsigned_val); + int store_time(TIME *ltime, timestamp_type type); int reset(void) { ptr[0]=ptr[1]=ptr[2]=0; return 0; } double val_real(void); longlong val_int(void); @@ -840,9 +951,10 @@ public: enum_field_types type() const { return FIELD_TYPE_TIME;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; } enum Item_result cmp_type () const { return INT_RESULT; } + int store_time(TIME *ltime, timestamp_type type); int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset(void) { ptr[0]=ptr[1]=ptr[2]=0; return 0; } double val_real(void); longlong val_int(void); @@ -879,8 +991,8 @@ public: uint decimals() const { return DATETIME_DEC; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); - void store_time(TIME *ltime,timestamp_type type); + int store(longlong nr, bool unsigned_val); + int store_time(TIME *ltime, timestamp_type type); int reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; @@ -901,104 +1013,141 @@ public: }; -class Field_string :public Field_str { +class Field_string :public Field_longstr { public: + bool can_alter_field_type; Field_string(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) - :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg,cs) {}; + :Field_longstr(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg, cs), + can_alter_field_type(1) {}; Field_string(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, - struct st_table *table_arg, CHARSET_INFO *cs) - :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg, cs) {}; + struct st_table *table_arg, CHARSET_INFO *cs) + :Field_longstr((char*) 0, len_arg, maybe_null_arg ? (uchar*) "": 0, 0, + NONE, field_name_arg, table_arg, cs), + can_alter_field_type(1) {}; enum_field_types type() const { - return ((table && table->db_create_options & HA_OPTION_PACK_RECORD && - field_length >= 4) ? - FIELD_TYPE_VAR_STRING : FIELD_TYPE_STRING); + return ((can_alter_field_type && orig_table && + orig_table->s->db_create_options & HA_OPTION_PACK_RECORD && + field_length >= 4) && + orig_table->s->frm_version < FRM_VER_TRUE_VARCHAR ? + MYSQL_TYPE_VAR_STRING : MYSQL_TYPE_STRING); } enum ha_base_keytype key_type() const { return binary() ? HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT; } bool zero_pack() const { return 0; } int reset(void) { - charset()->cset->fill(charset(),ptr,field_length,' '); + charset()->cset->fill(charset(),ptr,field_length, + (has_charset() ? ' ' : 0)); return 0; } int store(const char *to,uint length,CHARSET_INFO *charset); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int store(double nr) { return Field_str::store(nr); } /* QQ: To be deleted */ double val_real(void); longlong val_int(void); String *val_str(String*,String *); + my_decimal *val_decimal(my_decimal *); int cmp(const char *,const char*); void sort_string(char *buff,uint length); void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); const char *unpack(char* to, const char *from); - int pack_cmp(const char *a,const char *b,uint key_length); - int pack_cmp(const char *b,uint key_length); + int pack_cmp(const char *a,const char *b,uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b,uint key_length,my_bool insert_or_update); uint packed_col_length(const char *to, uint length); uint max_packed_col_length(uint max_length); uint size_of() const { return sizeof(*this); } enum_field_types real_type() const { return FIELD_TYPE_STRING; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type); }; -class Field_varstring :public Field_str { +class Field_varstring :public Field_longstr { public: - Field_varstring(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, + /* Store number of bytes used to store length (1 or 2) */ + uint32 length_bytes; + Field_varstring(char *ptr_arg, + uint32 len_arg, uint length_bytes_arg, + uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) - :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, cs) - {} + :Field_longstr(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg, cs), + length_bytes(length_bytes_arg) + { + if (table) + table->s->varchar_fields++; + } Field_varstring(uint32 len_arg,bool maybe_null_arg, - const char *field_name_arg, - struct st_table *table_arg, CHARSET_INFO *cs) - :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg, cs) - {} + const char *field_name_arg, + struct st_table *table_arg, CHARSET_INFO *cs) + :Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0, + NONE, field_name_arg, table_arg, cs), + length_bytes(len_arg < 256 ? 1 :2) + { + if (table) + table->s->varchar_fields++; + } - enum_field_types type() const { return FIELD_TYPE_VAR_STRING; } - enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + enum_field_types type() const { return MYSQL_TYPE_VARCHAR; } + enum ha_base_keytype key_type() const; bool zero_pack() const { return 0; } - int reset(void) { bzero(ptr,field_length+2); return 0; } - uint32 pack_length() const { return (uint32) field_length+2; } + int reset(void) { bzero(ptr,field_length+length_bytes); return 0; } + uint32 pack_length() const { return (uint32) field_length+length_bytes; } uint32 key_length() const { return (uint32) field_length; } + uint32 sort_length() const + { + return (uint32) field_length + (field_charset == &my_charset_bin ? + length_bytes : 0); + } int store(const char *to,uint length,CHARSET_INFO *charset); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int store(double nr) { return Field_str::store(nr); } /* QQ: To be deleted */ double val_real(void); longlong val_int(void); String *val_str(String*,String *); + my_decimal *val_decimal(my_decimal *); int cmp(const char *,const char*); void sort_string(char *buff,uint length); - void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length, imagetype type); + void set_key_image(char *buff,uint length); void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); char *pack_key(char *to, const char *from, uint max_length); + char *pack_key_from_key_image(char* to, const char *from, uint max_length); const char *unpack(char* to, const char *from); - int pack_cmp(const char *a, const char *b, uint key_length); - int pack_cmp(const char *b, uint key_length); + const char *unpack_key(char* to, const char *from, uint max_length); + int pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); + int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); + int key_cmp(const byte *,const byte*); + int key_cmp(const byte *str, uint length); uint packed_col_length(const char *to, uint length); uint max_packed_col_length(uint max_length); + uint32 data_length(const char *from); uint size_of() const { return sizeof(*this); } - enum_field_types real_type() const { return FIELD_TYPE_VAR_STRING; } + enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); }; -class Field_blob :public Field_str { +class Field_blob :public Field_longstr { protected: uint packlength; String value; // For temporaries @@ -1009,32 +1158,47 @@ public: CHARSET_INFO *cs); Field_blob(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) - :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg, cs), + :Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0, + NONE, field_name_arg, table_arg, cs), packlength(4) { flags|= BLOB_FLAG; } + Field_blob(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, + struct st_table *table_arg, CHARSET_INFO *cs, bool set_packlength) + :Field_longstr((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0, 0, + NONE, field_name_arg, table_arg, cs) + { + flags|= BLOB_FLAG; + packlength= 4; + if (set_packlength) + { + uint32 l_char_length= len_arg/cs->mbmaxlen; + packlength= l_char_length <= 255 ? 1 : + l_char_length <= 65535 ? 2 : + l_char_length <= 16777215 ? 3 : 4; + } + } enum_field_types type() const { return FIELD_TYPE_BLOB;} enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); double val_real(void); longlong val_int(void); String *val_str(String*,String *); + my_decimal *val_decimal(my_decimal *); int cmp(const char *,const char*); int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length); - int cmp_offset(uint offset); int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); - int cmp_binary_offset(uint row_offset); int key_cmp(const byte *,const byte*); int key_cmp(const byte *str, uint length); uint32 key_length() const { return 0; } void sort_string(char *buff,uint length); uint32 pack_length() const - { return (uint32) (packlength+table->blob_ptr_size); } + { return (uint32) (packlength+table->s->blob_ptr_size); } + uint32 sort_length() const; inline uint32 max_data_length() const { return (uint32) (((ulonglong) 1 << (packlength*8)) -1); @@ -1060,8 +1224,8 @@ public: store_length(length); memcpy_fixed(ptr+packlength,&data,sizeof(char*)); } - void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length, imagetype type); + void set_key_image(char *buff,uint length); void sql_type(String &str) const; inline bool copy() { char *tmp; @@ -1075,23 +1239,25 @@ public: return 0; } char *pack(char *to, const char *from, uint max_length= ~(uint) 0); - const char *unpack(char *to, const char *from); char *pack_key(char *to, const char *from, uint max_length); char *pack_key_from_key_image(char* to, const char *from, uint max_length); + const char *unpack(char *to, const char *from); const char *unpack_key(char* to, const char *from, uint max_length); - int pack_cmp(const char *a, const char *b, uint key_length); - int pack_cmp(const char *b, uint key_length); + int pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); uint packed_col_length(const char *col_ptr, uint length); uint max_packed_col_length(uint max_length); void free() { value.free(); } inline void clear_temporary() { bzero((char*) &value,sizeof(value)); } - friend void field_conv(Field *to,Field *from); + friend int field_conv(Field *to,Field *from); uint size_of() const { return sizeof(*this); } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } - uint32 max_length(); + uint32 max_display_length(); }; + #ifdef HAVE_SPATIAL class Field_geom :public Field_blob { public: @@ -1109,19 +1275,20 @@ public: :Field_blob(len_arg, maybe_null_arg, field_name_arg, table_arg, &my_charset_bin) { geom_type= geom_type_arg; } - enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; } enum_field_types type() const { return FIELD_TYPE_GEOMETRY; } void sql_type(String &str) const; int store(const char *to, uint length, CHARSET_INFO *charset); - int store(double nr) { return 1; } - int store(longlong nr) { return 1; } + int store(double nr); + int store(longlong nr, bool unsigned_val); + int store_decimal(const my_decimal *); + void get_key_image(char *buff,uint length,imagetype type); + uint size_of() const { return sizeof(*this); } int reset(void) { return !maybe_null() || Field_blob::reset(); } - - void get_key_image(char *buff,uint length, CHARSET_INFO *cs,imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); }; #endif /*HAVE_SPATIAL*/ + class Field_enum :public Field_str { protected: uint packlength; @@ -1139,13 +1306,14 @@ public: { flags|=ENUM_FLAG; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type); enum_field_types type() const { return FIELD_TYPE_STRING; } enum Item_result cmp_type () const { return INT_RESULT; } enum Item_result cast_to_int_type () const { return INT_RESULT; } enum ha_base_keytype key_type() const; int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); - int store(longlong nr); + int store(longlong nr, bool unsigned_val); int reset() { bzero(ptr,packlength); return 0; } double val_real(void); longlong val_int(void); @@ -1181,8 +1349,8 @@ public: flags=(flags & ~ENUM_FLAG) | SET_FLAG; } int store(const char *to,uint length,CHARSET_INFO *charset); - int store(double nr) { return Field_set::store((longlong) nr); } - int store(longlong nr); + int store(double nr) { return Field_set::store((longlong) nr, FALSE); } + int store(longlong nr, bool unsigned_val); virtual bool zero_pack() const { return 1; } String *val_str(String*,String *); void sql_type(String &str) const; @@ -1191,11 +1359,94 @@ public: }; +class Field_bit :public Field { +public: + uchar *bit_ptr; // position in record where 'uneven' bits store + uchar bit_ofs; // offset to 'uneven' high bits + uint bit_len; // number of 'uneven' high bits + uint bytes_in_rec; + Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg); + enum_field_types type() const { return FIELD_TYPE_BIT; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; } + uint32 key_length() const { return (uint32) (field_length + 7) / 8; } + uint32 max_display_length() { return field_length; } + uint size_of() const { return sizeof(*this); } + Item_result result_type () const { return INT_RESULT; } + int reset(void) { bzero(ptr, bytes_in_rec); return 0; } + int store(const char *to, uint length, CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr, bool unsigned_val); + int store_decimal(const my_decimal *); + double val_real(void); + longlong val_int(void); + String *val_str(String*, String *); + my_decimal *val_decimal(my_decimal *); + int cmp(const char *a, const char *b) + { return cmp_binary(a, b); } + int key_cmp(const byte *a, const byte *b) + { return cmp_binary((char *) a, (char *) b); } + int key_cmp(const byte *str, uint length); + int cmp_offset(uint row_offset); + int cmp_binary_offset(uint row_offset) + { return cmp_offset(row_offset); } + void get_key_image(char *buff, uint length, imagetype type); + void set_key_image(char *buff, uint length) + { Field_bit::store(buff, length, &my_charset_bin); } + void sort_string(char *buff, uint length) + { get_key_image(buff, length, itRAW); } + uint32 pack_length() const { return (uint32) (field_length + 7) / 8; } + uint32 pack_length_in_rec() const { return bytes_in_rec; } + void sql_type(String &str) const; + char *pack(char *to, const char *from, uint max_length=~(uint) 0); + const char *unpack(char* to, const char *from); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); + inline void move_field(my_ptrdiff_t ptr_diff) + { + Field::move_field(ptr_diff); + bit_ptr= ADD_TO_PTR(bit_ptr, ptr_diff, uchar*); + } + void set_bit_ptr(uchar *bit_ptr_arg, uchar bit_ofs_arg) + { + bit_ptr= bit_ptr_arg; + bit_ofs= bit_ofs_arg; + } + bool eq(Field *field) + { + return (Field::eq(field) && + field->type() == type() && + bit_ptr == ((Field_bit *)field)->bit_ptr && + bit_ofs == ((Field_bit *)field)->bit_ofs); + } +}; + + +class Field_bit_as_char: public Field_bit { +public: + Field_bit_as_char(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg); + enum ha_base_keytype key_type() const { return HA_KEYTYPE_BINARY; } + uint size_of() const { return sizeof(*this); } + int store(const char *to, uint length, CHARSET_INFO *charset); + int store(double nr) { return Field_bit::store(nr); } + int store(longlong nr, bool unsigned_val) + { return Field_bit::store(nr, unsigned_val); } + void sql_type(String &str) const; +}; + + /* Create field class for CREATE TABLE */ -class create_field :public Sql_alloc { +class create_field :public Sql_alloc +{ public: const char *field_name; const char *change; // If done with alter table @@ -1207,12 +1458,13 @@ public: At various stages in execution this can be length of field in bytes or max number of characters. */ - uint32 length; + ulong length; /* - The value of 'length' before a call to create_length_to_internal_length + The value of `length' as set by parser: is the number of characters + for most of the types, or of bytes for BLOBs or numeric types. */ uint32 char_length; - uint decimals,flags,pack_length; + uint decimals, flags, pack_length, key_length; Field::utype unireg_check; TYPELIB *interval; // Which interval to use TYPELIB *save_interval; // Temporary copy for the above @@ -1227,6 +1479,17 @@ public: create_field() :after(0) {} create_field(Field *field, Field *orig_field); void create_length_to_internal_length(void); + + /* Init for a tmp table field. To be extended if need be. */ + void init_for_tmp_table(enum_field_types sql_type_arg, + uint32 max_length, uint32 decimals, + bool maybe_null, bool is_unsigned); + + bool init(THD *thd, char *field_name, enum_field_types type, char *length, + char *decimals, uint type_modifier, Item *default_value, + Item *on_update_value, LEX_STRING *comment, char *change, + List<String> *interval_list, CHARSET_INFO *cs, + uint uint_geom_type); }; @@ -1279,11 +1542,10 @@ Field *make_field(char *ptr, uint32 field_length, TYPELIB *interval, const char *field_name, struct st_table *table); uint pack_length_to_packflag(uint type); +enum_field_types get_blob_type_from_length(ulong length); uint32 calc_pack_length(enum_field_types type,uint32 length); int set_field_to_null(Field *field); int set_field_to_null_with_conversions(Field *field, bool no_conversions); -bool test_if_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs); /* The following are for the interface with the .frm file @@ -1299,9 +1561,12 @@ bool test_if_int(const char *str, int length, const char *int_end, #define FIELDFLAG_BLOB 1024 // mangled with decimals! #define FIELDFLAG_GEOM 2048 // mangled with decimals! +#define FIELDFLAG_TREAT_BIT_AS_CHAR 4096 /* use Field_bit_as_char */ + #define FIELDFLAG_LEFT_FULLSCREEN 8192 #define FIELDFLAG_RIGHT_FULLSCREEN 16384 #define FIELDFLAG_FORMAT_NUMBER 16384 // predit: ###,,## in output +#define FIELDFLAG_NO_DEFAULT 16384 /* sql */ #define FIELDFLAG_SUM ((uint) 32768)// predit: +#fieldflag #define FIELDFLAG_MAYBE_NULL ((uint) 32768)// sql #define FIELDFLAG_PACK_SHIFT 3 @@ -1310,8 +1575,6 @@ bool test_if_int(const char *str, int length, const char *int_end, #define FIELDFLAG_NUM_SCREEN_TYPE 0x7F01 #define FIELDFLAG_ALFA_SCREEN_TYPE 0x7800 -#define FIELD_SORT_REVERSE 16384 - #define MTYP_TYPENR(type) (type & 127) /* Remove bits from type */ #define f_is_dec(x) ((x) & FIELDFLAG_DECIMAL) @@ -1329,3 +1592,5 @@ bool test_if_int(const char *str, int length, const char *int_end, #define f_is_equ(x) ((x) & (1+2+FIELDFLAG_PACK+31*256)) #define f_settype(x) (((int) x) << FIELDFLAG_PACK_SHIFT) #define f_maybe_null(x) (x & FIELDFLAG_MAYBE_NULL) +#define f_no_default(x) (x & FIELDFLAG_NO_DEFAULT) +#define f_bit_as_char(x) ((x) & FIELDFLAG_TREAT_BIT_AS_CHAR) diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 59b550572c3..429d914db97 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -121,13 +120,11 @@ set_field_to_null(Field *field) field->reset(); if (current_thd->count_cuted_fields == CHECK_FIELD_WARN) { - field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, 1); + field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1); return 0; } if (!current_thd->no_errors) - my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0), - field->field_name); + my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name); return -1; } @@ -176,7 +173,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) if (field == field->table->next_number_field) { field->table->auto_increment_field_not_null= FALSE; - return 0; // field is set in handler.cc + return 0; // field is set in fill_record() } if (current_thd->count_cuted_fields == CHECK_FIELD_WARN) { @@ -185,8 +182,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) return 0; } if (!current_thd->no_errors) - my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0), - field->field_name); + my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name); return -1; } @@ -232,7 +228,7 @@ static void do_copy_not_null(Copy_field *copy) if (*copy->from_null_ptr & copy->from_bit) { copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, 1); + WARN_DATA_TRUNCATED, 1); copy->to_field->reset(); } else @@ -307,14 +303,31 @@ static void do_field_string(Copy_field *copy) char buff[MAX_FIELD_WIDTH]; copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset()); copy->from_field->val_str(©->tmp); - copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(),copy->tmp.charset()); + copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(), + copy->tmp.charset()); +} + + +static void do_field_varbinary_pre50(Copy_field *copy) +{ + char buff[MAX_FIELD_WIDTH]; + copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset()); + copy->from_field->val_str(©->tmp); + + /* Use the same function as in 4.1 to trim trailing spaces */ + uint length= my_lengthsp_8bit(&my_charset_bin, copy->tmp.c_ptr_quick(), + copy->from_field->field_length); + + copy->to_field->store(copy->tmp.c_ptr_quick(), length, + copy->tmp.charset()); } static void do_field_int(Copy_field *copy) { - longlong value=copy->from_field->val_int(); - copy->to_field->store(value); + longlong value= copy->from_field->val_int(); + copy->to_field->store(value, + test(copy->from_field->flags & UNSIGNED_FLAG)); } static void do_field_real(Copy_field *copy) @@ -324,6 +337,13 @@ static void do_field_real(Copy_field *copy) } +static void do_field_decimal(Copy_field *copy) +{ + my_decimal value; + copy->to_field->store_decimal(copy->from_field->val_decimal(&value)); +} + + /* string copy for single byte characters set when to string is shorter than from string @@ -341,7 +361,7 @@ static void do_cut_string(Copy_field *copy) MY_SEQ_SPACES) < copy->from_length - copy->to_length) { copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, 1); + WARN_DATA_TRUNCATED, 1); } } @@ -369,7 +389,7 @@ static void do_cut_string_complex(Copy_field *copy) MY_SEQ_SPACES) < (copy->from_length - copy_length)) { copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, 1); + WARN_DATA_TRUNCATED, 1); } if (copy_length < copy->to_length) @@ -380,6 +400,16 @@ static void do_cut_string_complex(Copy_field *copy) +static void do_expand_binary(Copy_field *copy) +{ + CHARSET_INFO *cs= copy->from_field->charset(); + memcpy(copy->to_ptr,copy->from_ptr,copy->from_length); + cs->cset->fill(cs, copy->to_ptr+copy->from_length, + copy->to_length-copy->from_length, '\0'); +} + + + static void do_expand_string(Copy_field *copy) { CHARSET_INFO *cs= copy->from_field->charset(); @@ -388,20 +418,52 @@ static void do_expand_string(Copy_field *copy) copy->to_length-copy->from_length, ' '); } -static void do_varstring(Copy_field *copy) + +static void do_varstring1(Copy_field *copy) +{ + uint length= (uint) *(uchar*) copy->from_ptr; + if (length > copy->to_length- 1) + { + length=copy->to_length - 1; + if (current_thd->count_cuted_fields) + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, 1); + } + *(uchar*) copy->to_ptr= (uchar) length; + memcpy(copy->to_ptr+1, copy->from_ptr + 1, length); +} + + +static void do_varstring2(Copy_field *copy) { uint length=uint2korr(copy->from_ptr); - if (length > copy->to_length-2) + if (length > copy->to_length- HA_KEY_BLOB_LENGTH) { - length=copy->to_length-2; + length=copy->to_length-HA_KEY_BLOB_LENGTH; if (current_thd->count_cuted_fields) copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_DATA_TRUNCATED, 1); + WARN_DATA_TRUNCATED, 1); } int2store(copy->to_ptr,length); - memcpy(copy->to_ptr+2, copy->from_ptr,length); + memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, copy->from_ptr + HA_KEY_BLOB_LENGTH, + length); } + +static void do_varstring2_mb(Copy_field *copy) +{ + int well_formed_error; + CHARSET_INFO *cs= copy->from_field->charset(); + uint char_length= (copy->to_length - HA_KEY_BLOB_LENGTH) / cs->mbmaxlen; + uint from_length= uint2korr(copy->from_ptr); + const char *from_beg= copy->from_ptr + HA_KEY_BLOB_LENGTH; + uint length= cs->cset->well_formed_len(cs, from_beg, from_beg + from_length, + char_length, &well_formed_error); + int2store(copy->to_ptr, length); + memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, from_beg, length); +} + + /*************************************************************************** ** The different functions that fills in a Copy_field class ***************************************************************************/ @@ -507,26 +569,49 @@ void Copy_field::set(Field *to,Field *from,bool save) void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) { + bool compatible_db_low_byte_first= (to->table->s->db_low_byte_first == + from->table->s->db_low_byte_first); if (to->flags & BLOB_FLAG) { if (!(from->flags & BLOB_FLAG) || from->charset() != to->charset()) return do_conv_blob; - if (from_length != to_length || - to->table->db_low_byte_first != from->table->db_low_byte_first) + if (from_length != to_length || !compatible_db_low_byte_first) { // Correct pointer to point at char pointer - to_ptr+=to_length - to->table->blob_ptr_size; - from_ptr+=from_length- from->table->blob_ptr_size; + to_ptr+= to_length - to->table->s->blob_ptr_size; + from_ptr+= from_length- from->table->s->blob_ptr_size; return do_copy_blob; } } else { + if (to->real_type() == FIELD_TYPE_BIT || + from->real_type() == FIELD_TYPE_BIT) + return do_field_int; + if (to->result_type() == DECIMAL_RESULT) + return do_field_decimal; // Check if identical fields if (from->result_type() == STRING_RESULT) { + /* + Detect copy from pre 5.0 varbinary to varbinary as of 5.0 and + use special copy function that removes trailing spaces and thus + repairs data. + */ + if (from->type() == MYSQL_TYPE_VAR_STRING && !from->has_charset() && + to->type() == MYSQL_TYPE_VARCHAR && !to->has_charset()) + return do_field_varbinary_pre50; + + /* + If we are copying date or datetime's we have to check the dates + if we don't allow 'all' dates. + */ if (to->real_type() != from->real_type() || - to->table->db_low_byte_first != from->table->db_low_byte_first) + !compatible_db_low_byte_first || + ((to->table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_INVALID_DATES)) && + to->type() == FIELD_TYPE_DATE || + to->type() == FIELD_TYPE_DATETIME)) { if (from->real_type() == FIELD_TYPE_ENUM || from->real_type() == FIELD_TYPE_SET) @@ -542,18 +627,31 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else if (to->charset() != from->charset()) return do_field_string; - else if (to->real_type() == FIELD_TYPE_VAR_STRING && to_length != - from_length) - return do_varstring; + else if (to->real_type() == MYSQL_TYPE_VARCHAR) + { + if (((Field_varstring*) to)->length_bytes != + ((Field_varstring*) from)->length_bytes) + return do_field_string; + if (to_length != from_length) + return (((Field_varstring*) to)->length_bytes == 1 ? + do_varstring1 : (from->charset()->mbmaxlen == 1 ? + do_varstring2 : do_varstring2_mb)); + } else if (to_length < from_length) return (from->charset()->mbmaxlen == 1 ? do_cut_string : do_cut_string_complex); else if (to_length > from_length) - return do_expand_string; + { + if ((to->flags & BINARY_FLAG) != 0) + return do_expand_binary; + else + return do_expand_string; + } + } else if (to->real_type() != from->real_type() || to_length != from_length || - to->table->db_low_byte_first != from->table->db_low_byte_first) + !compatible_db_low_byte_first) { if (to->real_type() == FIELD_TYPE_DECIMAL || to->result_type() == STRING_RESULT) @@ -564,8 +662,7 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else { - if (!to->eq_def(from) || - to->table->db_low_byte_first != from->table->db_low_byte_first) + if (!to->eq_def(from) || !compatible_db_low_byte_first) { if (to->real_type() == FIELD_TYPE_DECIMAL) return do_field_string; @@ -591,37 +688,51 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) /* Simple quick field convert that is called on insert */ -void field_conv(Field *to,Field *from) +int field_conv(Field *to,Field *from) { - if (to->real_type() == from->real_type()) + if (to->real_type() == from->real_type() && + !(to->type() == FIELD_TYPE_BLOB && to->table->copy_blobs)) { if (to->pack_length() == from->pack_length() && !(to->flags & UNSIGNED_FLAG && !(from->flags & UNSIGNED_FLAG)) && to->real_type() != FIELD_TYPE_ENUM && to->real_type() != FIELD_TYPE_SET && - (to->real_type() != FIELD_TYPE_DECIMAL || + to->real_type() != FIELD_TYPE_BIT && + (to->real_type() != FIELD_TYPE_NEWDECIMAL || (to->field_length == from->field_length && - (((Field_num*) to)->dec == ((Field_num*) from)->dec))) && + (((Field_num*)to)->dec == ((Field_num*)from)->dec))) && from->charset() == to->charset() && - to->table->db_low_byte_first == from->table->db_low_byte_first) + to->table->s->db_low_byte_first == from->table->s->db_low_byte_first && + (!(to->table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | MODE_INVALID_DATES)) || + to->type() != FIELD_TYPE_DATE && + to->type() != FIELD_TYPE_DATETIME) && + (from->real_type() != MYSQL_TYPE_VARCHAR || + ((Field_varstring*)from)->length_bytes == + ((Field_varstring*)to)->length_bytes)) { // Identical fields #ifdef HAVE_purify /* This may happen if one does 'UPDATE ... SET x=x' */ if (to->ptr != from->ptr) #endif - memcpy(to->ptr,from->ptr,to->pack_length()); - return; + memcpy(to->ptr,from->ptr,to->pack_length()); + return 0; } } if (to->type() == FIELD_TYPE_BLOB) { // Be sure the value is stored Field_blob *blob=(Field_blob*) to; from->val_str(&blob->value); - if (!blob->value.is_alloced() && - from->real_type() != FIELD_TYPE_STRING) + /* + Copy value if copy_blobs is set, or source is not a string and + we have a pointer to its internal string conversion buffer. + */ + if (to->table->copy_blobs || + (!blob->value.is_alloced() && + from->real_type() != MYSQL_TYPE_STRING && + from->real_type() != MYSQL_TYPE_VARCHAR)) blob->value.copy(); - blob->store(blob->value.ptr(),blob->value.length(),from->charset()); - return; + return blob->store(blob->value.ptr(),blob->value.length(),from->charset()); } if ((from->result_type() == STRING_RESULT && (to->result_type() == STRING_RESULT || @@ -632,10 +743,21 @@ void field_conv(Field *to,Field *from) char buff[MAX_FIELD_WIDTH]; String result(buff,sizeof(buff),from->charset()); from->val_str(&result); - to->store(result.c_ptr_quick(),result.length(),from->charset()); + /* + We use c_ptr_quick() here to make it easier if to is a float/double + as the conversion routines will do a copy of the result doesn't + end with \0. Can be replaced with .ptr() when we have our own + string->double conversion. + */ + return to->store(result.c_ptr_quick(),result.length(),from->charset()); } else if (from->result_type() == REAL_RESULT) - to->store(from->val_real()); + return to->store(from->val_real()); + else if (from->result_type() == DECIMAL_RESULT) + { + my_decimal buff; + return to->store_decimal(from->val_decimal(&buff)); + } else - to->store(from->val_int()); + return to->store(from->val_int(), test(from->flags & UNSIGNED_FLAG)); } diff --git a/sql/filesort.cc b/sql/filesort.cc index 38a49e24263..23d652cb8cc 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -48,8 +47,10 @@ static int merge_index(SORTPARAM *param,uchar *sort_buffer, BUFFPEK *buffpek, uint maxbuffer,IO_CACHE *tempfile, IO_CACHE *outfile); -static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count); -static uint sortlength(SORT_FIELD *sortorder, uint s_length, +static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count, + FILESORT_INFO *table_sort); +static uint suffix_length(ulong string_length); +static uint sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, bool *multi_byte_charset); static SORT_ADDON_FIELD *get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength); @@ -97,7 +98,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, uint maxbuffer; BUFFPEK *buffpek; ha_rows records= HA_POS_ERROR; - uchar **sort_keys; + uchar **sort_keys= 0; IO_CACHE tempfile, buffpek_pointers, *selected_records_file, *outfile; SORTPARAM param; bool multi_byte_charset; @@ -106,19 +107,29 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, #ifdef SKIP_DBUG_IN_FILESORT DBUG_PUSH(""); /* No DBUG here */ #endif - - outfile= table->sort.io_cache; + FILESORT_INFO table_sort; + TABLE_LIST *tab= table->pos_in_table_list; + Item_subselect *subselect= tab ? tab->containing_subselect() : 0; + /* + Don't use table->sort in filesort as it is also used by + QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end + when index_merge select has finished with it. + */ + memcpy(&table_sort, &table->sort, sizeof(FILESORT_INFO)); + table->sort.io_cache= NULL; + + outfile= table_sort.io_cache; my_b_clear(&tempfile); my_b_clear(&buffpek_pointers); buffpek=0; - sort_keys= (uchar **) NULL; error= 1; bzero((char*) ¶m,sizeof(param)); - param.sort_length= sortlength(sortorder, s_length, &multi_byte_charset); + param.sort_length= sortlength(thd, sortorder, s_length, &multi_byte_charset); param.ref_length= table->file->ref_length; param.addon_field= 0; param.addon_length= 0; - if (!(table->tmp_table || table->fulltext_searched)) + if (!(table->file->table_flags() & HA_FAST_KEY_READ) && + !table->fulltext_searched) { /* Get the descriptors of all fields whose values are appended @@ -128,14 +139,15 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, param.sort_length, ¶m.addon_length); } - table->sort.addon_buf= 0; - table->sort.addon_length= param.addon_length; - table->sort.addon_field= param.addon_field; - table->sort.unpack= unpack_addon_fields; + + table_sort.addon_buf= 0; + table_sort.addon_length= param.addon_length; + table_sort.addon_field= param.addon_field; + table_sort.unpack= unpack_addon_fields; if (param.addon_field) { param.res_length= param.addon_length; - if (!(table->sort.addon_buf= (byte *) my_malloc(param.addon_length, + if (!(table_sort.addon_buf= (byte *) my_malloc(param.addon_length, MYF(MY_WME)))) goto err; } @@ -153,11 +165,11 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, if (select && select->quick) { - statistic_increment(filesort_range_count, &LOCK_status); + statistic_increment(thd->status_var.filesort_range_count, &LOCK_status); } else { - statistic_increment(filesort_scan_count, &LOCK_status); + statistic_increment(thd->status_var.filesort_scan_count, &LOCK_status); } #ifdef CAN_TRUST_RANGE if (select && select->quick && select->quick->records > 0L) @@ -190,13 +202,15 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, ulong old_memavl; ulong keys= memavl/(param.rec_length+sizeof(char*)); param.keys=(uint) min(records+1, keys); - if ((sort_keys= (uchar **) make_char_array(param.keys, param.rec_length, - MYF(0)))) + if (table_sort.sort_keys || + (table_sort.sort_keys= (uchar **) make_char_array(param.keys, param.rec_length, + MYF(0)))) break; old_memavl=memavl; if ((memavl=memavl/4*3) < min_sort_memory && old_memavl > min_sort_memory) memavl= min_sort_memory; } + sort_keys= table_sort.sort_keys; if (memavl < min_sort_memory) { my_error(ER_OUTOFMEMORY,MYF(ME_ERROR+ME_WAITTANG), @@ -218,13 +232,17 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, if (maxbuffer == 0) // The whole set is in memory { - if (save_index(¶m,sort_keys,(uint) records)) + if (save_index(¶m,sort_keys,(uint) records, &table_sort)) goto err; } else { - if (!(buffpek=read_buffpek_from_file(&buffpek_pointers, maxbuffer))) + if (!table_sort.buffpek && table_sort.buffpek_len < maxbuffer && + !(table_sort.buffpek= + (byte *) read_buffpek_from_file(&buffpek_pointers, maxbuffer))) goto err; + buffpek= (BUFFPEK *) table_sort.buffpek; + table_sort.buffpek_len= maxbuffer; close_cached_file(&buffpek_pointers); /* Open cached file if it isn't open */ if (! my_b_inited(outfile) && @@ -257,8 +275,14 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, err: if (param.tmp_buffer) x_free(param.tmp_buffer); - x_free((gptr) sort_keys); - x_free((gptr) buffpek); + if (!subselect || !subselect->is_uncacheable()) + { + x_free((gptr) sort_keys); + table_sort.sort_keys= 0; + x_free((gptr) buffpek); + table_sort.buffpek= 0; + table_sort.buffpek_len= 0; + } close_cached_file(&tempfile); close_cached_file(&buffpek_pointers); if (my_b_inited(outfile)) @@ -274,25 +298,42 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, } } if (error) - my_error(ER_FILSORT_ABORT,MYF(ME_ERROR+ME_WAITTANG)); + my_message(ER_FILSORT_ABORT, ER(ER_FILSORT_ABORT), + MYF(ME_ERROR+ME_WAITTANG)); else - statistic_add(filesort_rows, (ulong) records, &LOCK_status); + statistic_add(thd->status_var.filesort_rows, + (ulong) records, &LOCK_status); *examined_rows= param.examined_rows; #ifdef SKIP_DBUG_IN_FILESORT DBUG_POP(); /* Ok to DBUG */ #endif - DBUG_PRINT("exit",("records: %ld",records)); + memcpy(&table->sort, &table_sort, sizeof(FILESORT_INFO)); + DBUG_PRINT("exit",("records: %ld", (long) records)); DBUG_RETURN(error ? HA_POS_ERROR : records); } /* filesort */ -void filesort_free_buffers(TABLE *table) +void filesort_free_buffers(TABLE *table, bool full) { if (table->sort.record_pointers) { my_free((gptr) table->sort.record_pointers,MYF(0)); table->sort.record_pointers=0; } + if (full) + { + if (table->sort.sort_keys ) + { + x_free((gptr) table->sort.sort_keys); + table->sort.sort_keys= 0; + } + if (table->sort.buffpek) + { + x_free((gptr) table->sort.buffpek); + table->sort.buffpek= 0; + table->sort.buffpek_len= 0; + } + } if (table->sort.addon_buf) { my_free((char *) table->sort.addon_buf, MYF(0)); @@ -388,7 +429,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, my_off_t record; TABLE *sort_form; THD *thd= current_thd; - volatile my_bool *killed= &thd->killed; + volatile THD::killed_state *killed= &thd->killed; handler *file; DBUG_ENTER("find_all_keys"); DBUG_PRINT("info",("using: %s",(select?select->quick?"ranges":"where":"every row"))); @@ -408,21 +449,30 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, next_pos=ref_pos; if (! indexfile && ! quick_select) { - file->reset(); // QQ; Shouldn't be needed - if (sort_form->key_read) // QQ Can be removed after the reset - file->extra(HA_EXTRA_KEYREAD); // QQ is removed next_pos=(byte*) 0; /* Find records in sequence */ file->ha_rnd_init(1); file->extra_opt(HA_EXTRA_CACHE, current_thd->variables.read_buff_size); } + READ_RECORD read_record_info; + if (quick_select) + { + if (select->quick->reset()) + DBUG_RETURN(HA_POS_ERROR); + init_read_record(&read_record_info, current_thd, select->quick->head, + select, 1, 1); + } + for (;;) { if (quick_select) { - if ((error=select->quick->get_next())) - break; + if ((error= read_record_info.read_record(&read_record_info))) + { + error= HA_ERR_END_OF_FILE; + break; + } file->position(sort_form->record[0]); } else /* Not quick-select */ @@ -441,8 +491,8 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, error=file->rnd_next(sort_form->record[0]); if (!flag) { - ha_store_ptr(ref_pos,ref_length,record); // Position to row - record+=sort_form->db_record_offset; + my_store_ptr(ref_pos,ref_length,record); // Position to row + record+= sort_form->s->db_record_offset; } else if (!error) file->position(sort_form->record[0]); @@ -450,6 +500,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (error && error != HA_ERR_RECORD_DELETED) break; } + if (*killed) { DBUG_PRINT("info",("Sort killed by user")); @@ -479,9 +530,21 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (thd->net.report_error) DBUG_RETURN(HA_POS_ERROR); } - (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ - if (!next_pos) - file->ha_rnd_end(); + if (quick_select) + { + /* + index_merge quick select uses table->sort when retrieving rows, so free + resoures it has allocated. + */ + end_read_record(&read_record_info); + } + else + { + (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ + if (!next_pos) + file->ha_rnd_end(); + } + DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos)); if (error != HA_ERR_END_OF_FILE) { @@ -536,10 +599,10 @@ write_keys(SORTPARAM *param, register uchar **sort_keys, uint count, if (!my_b_inited(tempfile) && open_cached_file(tempfile, mysql_tmpdir, TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME))) - goto err; /* purecov: inspected */ + goto err; /* purecov: inspected */ buffpek.file_pos= my_b_tell(tempfile); if ((ha_rows) count > param->max_rows) - count=(uint) param->max_rows; /* purecov: inspected */ + count=(uint) param->max_rows; /* purecov: inspected */ buffpek.count=(ha_rows) count; for (end=sort_keys+count ; sort_keys != end ; sort_keys++) if (my_b_write(tempfile, (byte*) *sort_keys, (uint) rec_length)) @@ -553,6 +616,29 @@ err: } /* write_keys */ +/* + Store length as suffix in high-byte-first order +*/ + +static inline void store_length(uchar *to, uint length, uint pack_length) +{ + switch (pack_length) { + case 1: + *to= (uchar) length; + break; + case 2: + mi_int2store(to, length); + break; + case 3: + mi_int3store(to, length); + break; + default: + mi_int4store(to, length); + break; + } +} + + /* makes a sort-key from record */ static void make_sortkey(register SORTPARAM *param, @@ -588,20 +674,23 @@ static void make_sortkey(register SORTPARAM *param, else { // Item Item *item=sort_field->item; + maybe_null= item->maybe_null; switch (sort_field->result_type) { case STRING_RESULT: - { + { CHARSET_INFO *cs=item->collation.collation; char fill_char= ((cs->state & MY_CS_BINSORT) ? (char) 0 : ' '); + int diff; + uint sort_field_length; - if ((maybe_null=item->maybe_null)) + if (maybe_null) *to++=1; /* All item->str() to use some extra byte for end null.. */ String tmp((char*) to,sort_field->length+4,cs); - String *res=item->val_str(&tmp); + String *res= item->str_result(&tmp); if (!res) { - if (item->maybe_null) + if (maybe_null) bzero((char*) to-1,sort_field->length+1); else { @@ -611,24 +700,32 @@ static void make_sortkey(register SORTPARAM *param, } break; } - length=res->length(); - int diff=(int) (sort_field->length-length); + length= res->length(); + sort_field_length= sort_field->length - sort_field->suffix_length; + diff=(int) (sort_field_length - length); if (diff < 0) { diff=0; /* purecov: inspected */ - length=sort_field->length; + length= sort_field_length; } + if (sort_field->suffix_length) + { + /* Store length last in result_string */ + store_length(to + sort_field_length, length, + sort_field->suffix_length); + } if (sort_field->need_strxnfrm) { char *from=(char*) res->ptr(); + uint tmp_length; if ((unsigned char *)from == to) { set_if_smaller(length,sort_field->length); memcpy(param->tmp_buffer,from,length); from=param->tmp_buffer; } - uint tmp_length=my_strnxfrm(cs,to,sort_field->length, - (unsigned char *) from, length); + tmp_length= my_strnxfrm(cs,to,sort_field->length, + (unsigned char *) from, length); DBUG_ASSERT(tmp_length == sort_field->length); } else @@ -637,24 +734,26 @@ static void make_sortkey(register SORTPARAM *param, cs->cset->fill(cs, (char *)to+length,diff,fill_char); } break; - } + } case INT_RESULT: { - longlong value=item->val_int(); - if ((maybe_null=item->maybe_null)) + longlong value= item->val_int_result(); + if (maybe_null) + { *to++=1; /* purecov: inspected */ - if (item->null_value) - { - if (item->maybe_null) - bzero((char*) to-1,sort_field->length+1); - else - { - DBUG_PRINT("warning", - ("Got null on something that shouldn't be null")); - bzero((char*) to,sort_field->length); - } - break; - } + if (item->null_value) + { + if (maybe_null) + bzero((char*) to-1,sort_field->length+1); + else + { + DBUG_PRINT("warning", + ("Got null on something that shouldn't be null")); + bzero((char*) to,sort_field->length); + } + break; + } + } #if SIZEOF_LONG_LONG > 4 to[7]= (uchar) value; to[6]= (uchar) (value >> 8); @@ -678,17 +777,37 @@ static void make_sortkey(register SORTPARAM *param, #endif break; } + case DECIMAL_RESULT: + { + my_decimal dec_buf, *dec_val= item->val_decimal_result(&dec_buf); + if (maybe_null) + { + if (item->null_value) + { + bzero((char*)to, sort_field->length+1); + to++; + break; + } + *to++=1; + } + my_decimal2binary(E_DEC_FATAL_ERROR, dec_val, (char*)to, + item->max_length - (item->decimals ? 1:0), + item->decimals); + break; + } case REAL_RESULT: { - double value=item->val(); - if ((maybe_null=item->null_value)) - { - bzero((char*) to,sort_field->length+1); - to++; - break; - } - if ((maybe_null=item->maybe_null)) + double value= item->val_result(); + if (maybe_null) + { + if (item->null_value) + { + bzero((char*) to,sort_field->length+1); + to++; + break; + } *to++=1; + } change_double_for_sort(value,(byte*) to); break; } @@ -724,7 +843,7 @@ static void make_sortkey(register SORTPARAM *param, */ SORT_ADDON_FIELD *addonf= param->addon_field; uchar *nulls= to; - DBUG_ASSERT(addonf); + DBUG_ASSERT(addonf != 0); bzero((char *) nulls, addonf->offset); to+= addonf->offset; for ( ; (field= addonf->field) ; addonf++) @@ -738,12 +857,14 @@ static void make_sortkey(register SORTPARAM *param, } else { - uchar *end= (uchar*) field->pack((char *) to, field->ptr); #ifdef HAVE_purify + uchar *end= (uchar*) field->pack((char *) to, field->ptr); uint length= (uint) ((to + addonf->length) - end); DBUG_ASSERT((int) length >= 0); if (length) bzero(end, length); +#else + (void) field->pack((char *) to, field->ptr); #endif } to+= addonf->length; @@ -757,8 +878,8 @@ static void make_sortkey(register SORTPARAM *param, return; } - -static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count) +static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count, + FILESORT_INFO *table_sort) { uint offset,res_length; byte *to; @@ -769,7 +890,7 @@ static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count) offset= param->rec_length-res_length; if ((ha_rows) count > param->max_rows) count=(uint) param->max_rows; - if (!(to= param->sort_form->sort.record_pointers= + if (!(to= table_sort->record_pointers= (byte*) my_malloc(res_length*count, MYF(MY_WME)))) DBUG_RETURN(1); /* purecov: inspected */ for (uchar **end= sort_keys+count ; sort_keys != end ; sort_keys++) @@ -854,6 +975,39 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, } /* read_to_buffer */ +/* + Put all room used by freed buffer to use in adjacent buffer. Note, that + we can't simply distribute memory evenly between all buffers, because + new areas must not overlap with old ones. + SYNOPSIS + reuse_freed_buff() + queue IN list of non-empty buffers, without freed buffer + reuse IN empty buffer + key_length IN key length +*/ + +void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length) +{ + uchar *reuse_end= reuse->base + reuse->max_keys * key_length; + for (uint i= 0; i < queue->elements; ++i) + { + BUFFPEK *bp= (BUFFPEK *) queue_element(queue, i); + if (bp->base + bp->max_keys * key_length == reuse->base) + { + bp->max_keys+= reuse->max_keys; + return; + } + else if (bp->base == reuse_end) + { + bp->base= reuse->base; + bp->max_keys+= reuse->max_keys; + return; + } + } + DBUG_ASSERT(0); +} + + /* Merge buffers to one buffer SYNOPSIS @@ -883,18 +1037,19 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, ha_rows max_rows,org_max_rows; my_off_t to_start_filepos; uchar *strpos; - BUFFPEK *buffpek,**refpek; + BUFFPEK *buffpek; QUEUE queue; qsort2_cmp cmp; - volatile my_bool *killed= ¤t_thd->killed; - my_bool not_killable; + volatile THD::killed_state *killed= ¤t_thd->killed; + THD::killed_state not_killable; DBUG_ENTER("merge_buffers"); - statistic_increment(filesort_merge_passes, &LOCK_status); + statistic_increment(current_thd->status_var.filesort_merge_passes, + &LOCK_status); if (param->not_killable) { killed= ¬_killable; - not_killable= 0; + not_killable= THD::NOT_KILLED; } error=0; @@ -997,29 +1152,8 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, if (!(error= (int) read_to_buffer(from_file,buffpek, rec_length))) { - uchar *base= buffpek->base; - ulong max_keys= buffpek->max_keys; - VOID(queue_remove(&queue,0)); - - /* Put room used by buffer to use in other buffer */ - for (refpek= (BUFFPEK**) &queue_top(&queue); - refpek <= (BUFFPEK**) &queue_end(&queue); - refpek++) - { - buffpek= *refpek; - if (buffpek->base+buffpek->max_keys*rec_length == base) - { - buffpek->max_keys+= max_keys; - break; - } - else if (base+max_keys*rec_length == buffpek->base) - { - buffpek->base= base; - buffpek->max_keys+= max_keys; - break; - } - } + reuse_freed_buff(&queue, buffpek, rec_length); break; /* One buffer have been removed */ } else if (error == -1) @@ -1102,11 +1236,25 @@ static int merge_index(SORTPARAM *param, uchar *sort_buffer, } /* merge_index */ +static uint suffix_length(ulong string_length) +{ + if (string_length < 256) + return 1; + if (string_length < 256L*256L) + return 2; + if (string_length < 256L*256L*256L) + return 3; + return 4; // Can't sort longer than 4G +} + + + /* Calculate length of sort key SYNOPSIS sortlength() + thd Thread handler sortorder Order of items to sort uint s_length Number of items to sort multi_byte_charset (out) @@ -1122,10 +1270,10 @@ static int merge_index(SORTPARAM *param, uchar *sort_buffer, */ static uint -sortlength(SORT_FIELD *sortorder, uint s_length, bool *multi_byte_charset) +sortlength(THD *thd, SORT_FIELD *sortorder, uint s_length, + bool *multi_byte_charset) { reg2 uint length; - THD *thd= current_thd; CHARSET_INFO *cs; *multi_byte_charset= 0; @@ -1133,34 +1281,42 @@ sortlength(SORT_FIELD *sortorder, uint s_length, bool *multi_byte_charset) for (; s_length-- ; sortorder++) { sortorder->need_strxnfrm= 0; + sortorder->suffix_length= 0; if (sortorder->field) { - if (sortorder->field->type() == FIELD_TYPE_BLOB) - sortorder->length= thd->variables.max_sort_length; - else + cs= sortorder->field->sort_charset(); + sortorder->length= sortorder->field->sort_length(); + + if (use_strnxfrm((cs=sortorder->field->sort_charset()))) { - sortorder->length=sortorder->field->pack_length(); - if (use_strnxfrm((cs=sortorder->field->sort_charset()))) - { - sortorder->need_strxnfrm= 1; - *multi_byte_charset= 1; - sortorder->length= sortorder->length*cs->strxfrm_multiply; - } + sortorder->need_strxnfrm= 1; + *multi_byte_charset= 1; + sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length); } if (sortorder->field->maybe_null()) length++; // Place for NULL marker } else { - switch ((sortorder->result_type=sortorder->item->result_type())) { + sortorder->result_type= sortorder->item->result_type(); + if (sortorder->item->result_as_longlong()) + sortorder->result_type= INT_RESULT; + switch (sortorder->result_type) { case STRING_RESULT: sortorder->length=sortorder->item->max_length; + set_if_smaller(sortorder->length, thd->variables.max_sort_length); if (use_strnxfrm((cs=sortorder->item->collation.collation))) { - sortorder->length= sortorder->length*cs->strxfrm_multiply; + sortorder->length= cs->coll->strnxfrmlen(cs, sortorder->length); sortorder->need_strxnfrm= 1; *multi_byte_charset= 1; } + else if (cs == &my_charset_bin) + { + /* Store length last to be able to sort blob/varbinary */ + sortorder->suffix_length= suffix_length(sortorder->length); + sortorder->length+= sortorder->suffix_length; + } break; case INT_RESULT: #if SIZEOF_LONG_LONG > 4 @@ -1169,6 +1325,12 @@ sortlength(SORT_FIELD *sortorder, uint s_length, bool *multi_byte_charset) sortorder->length=4; #endif break; + case DECIMAL_RESULT: + sortorder->length= + my_decimal_get_binary_size(sortorder->item->max_length - + (sortorder->item->decimals ? 1 : 0), + sortorder->item->decimals); + break; case REAL_RESULT: sortorder->length=sizeof(double); break; @@ -1228,27 +1390,29 @@ get_addon_fields(THD *thd, Field **ptabfield, uint sortlength, uint *plength) uint length= 0; uint fields= 0; uint null_fields= 0; - - /* - If there is a reference to a field in the query add it - to the the set of appended fields. - Note for future refinement: - This this a too strong condition. - Actually we need only the fields referred in the - result set. And for some of them it makes sense to use - the values directly from sorted fields. + query_id_t query_id= thd->query_id; + /* + If there is a reference to a field in the query add it + to the the set of appended fields. + Note for future refinement: + This this a too strong condition. + Actually we need only the fields referred in the + result set. And for some of them it makes sense to use + the values directly from sorted fields. */ *plength= 0; + /* - The following statement is added to avoid sorting in alter_table. - The fact is the filter 'field->query_id != thd->query_id' - doesn't work for alter table + The following statement is added to avoid sorting in alter_table. + The fact is the filter 'field->query_id != thd->query_id' + doesn't work for alter table */ - if (thd->lex->sql_command != SQLCOM_SELECT) + if (thd->lex->sql_command != SQLCOM_SELECT && + thd->lex->sql_command != SQLCOM_INSERT_SELECT) return 0; for (pfield= ptabfield; (field= *pfield) ; pfield++) { - if (field->query_id != thd->query_id) + if (field->query_id != query_id) continue; if (field->flags & BLOB_FLAG) return 0; diff --git a/sql/frm_crypt.cc b/sql/frm_crypt.cc index 8dd70900648..590205e83ab 100644 --- a/sql/frm_crypt.cc +++ b/sql/frm_crypt.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc index 0bbdf84c8d6..2d78999017a 100644 --- a/sql/gen_lex_hash.cc +++ b/sql/gen_lex_hash.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -83,8 +82,17 @@ TODO: #include "mysql_version.h" #include "lex.h" +const char *default_dbug_option="d:t:o,/tmp/gen_lex_hash.trace"; + struct my_option my_long_options[] = { +#ifdef DBUG_OFF + {"debug", '#', "This is a non-debug version. Catch this and exit", + 0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#else + {"debug", '#', "Output debug log", (gptr*) &default_dbug_option, + (gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#endif {"help", '?', "Display help and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit", @@ -108,7 +116,7 @@ hash_lex_struct *get_hash_struct_by_len(hash_lex_struct **root_by_len, { if (*max_len<len){ *root_by_len= (hash_lex_struct *)realloc((char*)*root_by_len, - sizeof(hash_lex_struct)*len); + sizeof(hash_lex_struct)*len); hash_lex_struct *cur, *end= *root_by_len + len; for (cur= *root_by_len + *max_len; cur<end; cur++) cur->first_char= 0; @@ -197,9 +205,10 @@ void insert_symbols() void insert_sql_functions() { - size_t i= 0; + int i= 0; SYMBOL *cur; - for (cur= sql_functions; i<array_elements(sql_functions); cur++, i++){ + for (cur= sql_functions; i < (int) array_elements(sql_functions); cur++, i++) + { hash_lex_struct *root= get_hash_struct_by_len(&root_by_len,cur->length,&max_len); insert_into_hash(root,cur->name,0,-i-1,1); @@ -291,7 +300,7 @@ void print_hash_map(const char *name) char *cur; int i; - printf("uchar %s[%d]= {\n",name,size_hash_map); + printf("static uchar %s[%d]= {\n",name,size_hash_map); for (i=0, cur= hash_map; i<size_hash_map; i++, cur++) { switch(i%4){ @@ -353,6 +362,9 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case '?': usage(0); exit(0); + case '#': + DBUG_PUSH(argument ? argument : default_dbug_option); + break; } return 0; } @@ -425,17 +437,36 @@ int check_duplicates() int main(int argc,char **argv) { MY_INIT(argv[0]); + DBUG_PROCESS(argv[0]); if (get_options(argc,(char **) argv)) exit(1); - printf("/* Copyright (C) 2001 MySQL AB\n\ - This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\ - and you are welcome to modify and redistribute it under the GPL license\n\ - \n*/\n\n"); + /* Broken up to indicate that it's not advice to you, gentle reader. */ + printf("/*\n\n Do " "not " "edit " "this " "file " "directly!\n\n*/\n"); - printf("/* This code is generated by gen_lex_hash.cc that seeks for\ - a perfect\nhash function */\n\n"); + printf("\ +/* Copyright (C) 2001-2004 MySQL AB\n\ +\n\ + This program is free software; you can redistribute it and/or modify\n\ + it under the terms of the GNU General Public License as published by\n\ + the Free Software Foundation; version 2 of the License.\n\ +\n\ + This program is distributed in the hope that it will be useful,\n\ + but WITHOUT ANY WARRANTY; without even the implied warranty of\n\ + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\ + GNU General Public License for more details.\n\ +\n\ + You should have received a copy of the GNU General Public License\n\ + along with this program; see the file COPYING. If not, write to the\n\ + Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston\n\ + MA 02110-1301 USA. */\n\ +\n\ +"); + + /* Broken up to indicate that it's not advice to you, gentle reader. */ + printf("/* Do " "not " "edit " "this " "file! This is generated by " + "gen_lex_hash.cc\nthat seeks for a perfect hash function */\n\n"); printf("#include \"lex.h\"\n\n"); calc_length(); @@ -446,16 +477,23 @@ int main(int argc,char **argv) generate_find_structs(); print_find_structs(); - printf("\nunsigned int sql_functions_max_len=%d;\n",max_len); - printf("\nunsigned int symbols_max_len=%d;\n\n",max_len2); + printf("\nstatic unsigned int sql_functions_max_len=%d;\n", max_len); + printf("\nstatic unsigned int symbols_max_len=%d;\n\n", max_len2); - printf -( -"inline SYMBOL *get_hash_symbol(const char *s,\n\ + printf("\ +static inline SYMBOL *get_hash_symbol(const char *s,\n\ unsigned int len,bool function)\n\ {\n\ register uchar *hash_map;\n\ register const char *cur_str= s;\n\ +\n\ + if (len == 0) {\n\ + DBUG_PRINT(\"warning\", (\"get_hash_symbol() received a request for a zero-length symbol, which is probably a mistake.\"));\ + return(NULL);\n\ + }\n" +); + + printf("\ if (function){\n\ if (len>sql_functions_max_len) return 0;\n\ hash_map= sql_functions_map;\n\ @@ -486,7 +524,10 @@ int main(int argc,char **argv) cur_struct= uint4korr(hash_map+\n\ (((uint16)cur_struct + cur_char - first_char)*4));\n\ cur_str++;\n\ - }\n\ + }\n" +); + + printf("\ }else{\n\ if (len>symbols_max_len) return 0;\n\ hash_map= symbols_map;\n\ @@ -516,5 +557,7 @@ int main(int argc,char **argv) }\n\ }\n" ); + my_end(0); + exit(0); } diff --git a/sql/gstream.cc b/sql/gstream.cc index f7d11d76b0c..46e12b6ef3b 100644 --- a/sql/gstream.cc +++ b/sql/gstream.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -83,7 +82,7 @@ bool Gis_read_stream::get_next_number(double *d) } *d = my_strntod(m_charset, (char *)m_cur, - m_limit-m_cur, &endptr, &err); + (uint) (m_limit-m_cur), &endptr, &err); if (err) return 1; if (endptr) @@ -115,6 +114,6 @@ bool Gis_read_stream::check_next_symbol(char symbol) void Gis_read_stream::set_error_msg(const char *msg) { size_t len= strlen(msg); // ok in this context - m_err_msg= (char *) my_realloc(m_err_msg, len + 1, MYF(MY_ALLOW_ZERO_PTR)); + m_err_msg= (char *) my_realloc(m_err_msg, (uint) len + 1, MYF(MY_ALLOW_ZERO_PTR)); memcpy(m_err_msg, msg, len + 1); } diff --git a/sql/gstream.h b/sql/gstream.h index bfbf28851ce..10274635413 100644 --- a/sql/gstream.h +++ b/sql/gstream.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/examples/ha_archive.cc b/sql/ha_archive.cc index 16ba7605415..2ee514f29c9 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/ha_archive.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -18,10 +17,11 @@ #pragma implementation // gcc: Class implementation #endif -#include "../mysql_priv.h" +#include "mysql_priv.h" -#ifdef HAVE_ARCHIVE_DB +#if defined(HAVE_ARCHIVE_DB) #include "ha_archive.h" +#include <my_dir.h> /* First, if you want to understand storage engines you should look at @@ -115,10 +115,12 @@ */ /* If the archive storage engine has been inited */ -static bool archive_inited= 0; +static bool archive_inited= FALSE; /* Variables for archive share methods */ pthread_mutex_t archive_mutex; static HASH archive_open_tables; +static z_off_t max_zfile_size; +static int zoffset_size; /* The file extension */ #define ARZ ".ARZ" // The data file @@ -134,6 +136,39 @@ static HASH archive_open_tables; #define DATA_BUFFER_SIZE 2 // Size of the data used in the data file #define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption +/* + Number of rows that will force a bulk insert. +*/ +#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2 + + + +/* dummy handlerton - only to have something to return from archive_db_init */ +handlerton archive_hton = { + "ARCHIVE", + SHOW_OPTION_YES, + "Archive storage engine", + DB_TYPE_ARCHIVE_DB, + archive_db_init, + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* releas savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_NO_FLAGS +}; + + /* Used for hash table that tracks open tables. */ @@ -159,13 +194,36 @@ static byte* archive_get_key(ARCHIVE_SHARE *share,uint *length, bool archive_db_init() { - archive_inited= 1; - VOID(pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST)); - return (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0, - (hash_get_key) archive_get_key, 0, 0)); + DBUG_ENTER("archive_db_init"); + if (pthread_mutex_init(&archive_mutex, MY_MUTEX_INIT_FAST)) + goto error; + if (hash_init(&archive_open_tables, system_charset_info, 32, 0, 0, + (hash_get_key) archive_get_key, 0, 0)) + { + VOID(pthread_mutex_destroy(&archive_mutex)); + } + else + { + zoffset_size= 2 << ((zlibCompileFlags() >> 6) & 3); + switch (sizeof(z_off_t)) { + case 2: + max_zfile_size= INT_MAX16; + break; + case 8: + max_zfile_size= (z_off_t) LONGLONG_MAX; + break; + case 4: + default: + max_zfile_size= INT_MAX32; + } + archive_inited= TRUE; + DBUG_RETURN(FALSE); + } +error: + have_archive_db= SHOW_OPTION_DISABLED; // If we couldn't use handler + DBUG_RETURN(TRUE); } - /* Release the archive handler. @@ -188,6 +246,16 @@ bool archive_db_end() return FALSE; } +ha_archive::ha_archive(TABLE *table_arg) + :handler(&archive_hton, table_arg), delayed_insert(0), bulk_insert(0) +{ + /* Set our original buffer from pre-allocated memory */ + buffer.set((char *)byte_buffer, IO_SIZE, system_charset_info); + + /* The size of the offset value we will use for position() */ + ref_length = zoffset_size; + DBUG_ASSERT(ref_length <= sizeof(z_off_t)); +} /* This method reads the header of a datafile and returns whether or not it was successful. @@ -239,7 +307,7 @@ error: This method reads the header of a meta file and returns whether or not it was successful. *rows will contain the current number of rows in the data file upon success. */ -int ha_archive::read_meta_file(File meta_file, ulonglong *rows) +int ha_archive::read_meta_file(File meta_file, ha_rows *rows) { uchar meta_buffer[META_BUFFER_SIZE]; ulonglong check_point; @@ -253,13 +321,13 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows) /* Parse out the meta data, we ignore version at the moment */ - *rows= uint8korr(meta_buffer + 2); + *rows= (ha_rows)uint8korr(meta_buffer + 2); check_point= uint8korr(meta_buffer + 10); DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); - DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows)); - DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point)); + DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows)); + DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point)); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18])); if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) || @@ -274,10 +342,9 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows) /* This method writes out the header of a meta file and returns whether or not it was successful. By setting dirty you say whether or not the file represents the actual state of the data file. - Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during - a read that the file was dirty we will force a rebuild of this file. + Upon ::open() we set to dirty, and upon ::close() we set to clean. */ -int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty) +int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty) { uchar meta_buffer[META_BUFFER_SIZE]; ulonglong check_point= 0; //Reserved for the future @@ -286,13 +353,13 @@ int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty) meta_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER; meta_buffer[1]= (uchar)ARCHIVE_VERSION; - int8store(meta_buffer + 2, rows); + int8store(meta_buffer + 2, (ulonglong)rows); int8store(meta_buffer + 10, check_point); *(meta_buffer + 18)= (uchar)dirty; DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER)); DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION)); - DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", rows)); - DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point)); + DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong)rows)); + DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point)); DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); @@ -307,14 +374,17 @@ int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty) /* We create the shared memory space that we will use for the open table. + No matter what we try to get or create a share. This is so that a repair + table operation can occur. + See ha_example.cc for a longer description. */ -ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) +ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc) { - ARCHIVE_SHARE *share; char meta_file_name[FN_REFLEN]; uint length; char *tmp_name; + DBUG_ENTER("ha_archive::get_share"); pthread_mutex_lock(&archive_mutex); length=(uint) strlen(table_name); @@ -329,68 +399,47 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) NullS)) { pthread_mutex_unlock(&archive_mutex); - return NULL; + *rc= HA_ERR_OUT_OF_MEM; + DBUG_RETURN(NULL); } share->use_count= 0; share->table_name_length= length; share->table_name= tmp_name; - fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); - fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); + share->crashed= FALSE; + share->archive_write_open= FALSE; + fn_format(share->data_file_name,table_name,"",ARZ, + MY_REPLACE_EXT|MY_UNPACK_FILENAME); + fn_format(meta_file_name,table_name,"",ARM, + MY_REPLACE_EXT|MY_UNPACK_FILENAME); strmov(share->table_name,table_name); /* We will use this lock for rows. */ VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST)); if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) - goto error; + share->crashed= TRUE; - if (read_meta_file(share->meta_file, &share->rows_recorded)) - { - /* - The problem here is that for some reason, probably a crash, the meta - file has been corrupted. So what do we do? Well we try to rebuild it - ourself. Once that happens, we reread it, but if that fails we just - call it quits and return an error. - */ - if (rebuild_meta_file(share->table_name, share->meta_file)) - goto error; - if (read_meta_file(share->meta_file, &share->rows_recorded)) - goto error; - } /* After we read, we set the file to dirty. When we close, we will do the - opposite. + opposite. If the meta file will not open we assume it is crashed and + leave it up to the user to fix. */ - (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); + if (read_meta_file(share->meta_file, &share->rows_recorded)) + share->crashed= TRUE; - /* - It is expensive to open and close the data files and since you can't have - a gzip file that can be both read and written we keep a writer open - that is shared amoung all open tables. - */ - if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) - goto error2; - if (my_hash_insert(&archive_open_tables, (byte*) share)) - goto error3; + VOID(my_hash_insert(&archive_open_tables, (byte*) share)); thr_lock_init(&share->lock); } share->use_count++; + DBUG_PRINT("info", ("archive table %.*s has %d open handles now", + share->table_name_length, share->table_name, + share->use_count)); + if (share->crashed) + *rc= HA_ERR_CRASHED_ON_USAGE; pthread_mutex_unlock(&archive_mutex); - return share; - -error3: - /* We close, but ignore errors since we already have errors */ - (void)gzclose(share->archive_write); -error2: - my_close(share->meta_file,MYF(0)); -error: - pthread_mutex_unlock(&archive_mutex); - VOID(pthread_mutex_destroy(&share->mutex)); - my_free((gptr) share, MYF(0)); - - return NULL; + DBUG_RETURN(share); } @@ -398,33 +447,71 @@ error: Free the share. See ha_example.cc for a description. */ -int ha_archive::free_share(ARCHIVE_SHARE *share) +int ha_archive::free_share() { int rc= 0; + DBUG_ENTER("ha_archive::free_share"); + DBUG_PRINT("info", ("archive table %.*s has %d open handles on entrance", + share->table_name_length, share->table_name, + share->use_count)); + pthread_mutex_lock(&archive_mutex); if (!--share->use_count) { hash_delete(&archive_open_tables, (byte*) share); thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->mutex)); - (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE); - if (gzclose(share->archive_write) == Z_ERRNO) - rc= 1; + if (share->crashed) + (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); + else + (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE); + if (share->archive_write_open) + if (gzclose(share->archive_write) == Z_ERRNO) + rc= 1; if (my_close(share->meta_file, MYF(0))) rc= 1; my_free((gptr) share, MYF(0)); } pthread_mutex_unlock(&archive_mutex); - return rc; + DBUG_RETURN(rc); +} + +int ha_archive::init_archive_writer() +{ + DBUG_ENTER("ha_archive::init_archive_writer"); + (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); + + /* + It is expensive to open and close the data files and since you can't have + a gzip file that can be both read and written we keep a writer open + that is shared amoung all open tables. + */ + if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) + { + share->crashed= TRUE; + DBUG_RETURN(1); + } + share->archive_write_open= TRUE; + info(HA_STATUS_TIME); + share->approx_file_size= (ulong) data_file_length; + DBUG_RETURN(0); } -/* +/* We just implement one additional file extension. */ +static const char *ha_archive_exts[] = { + ARZ, + ARM, + NullS +}; + const char **ha_archive::bas_ext() const -{ static const char *ext[]= { ARZ, ARN, ARM, NullS }; return ext; } +{ + return ha_archive_exts; +} /* @@ -433,21 +520,44 @@ const char **ha_archive::bas_ext() const Init out lock. We open the file we will read from. */ -int ha_archive::open(const char *name, int mode, uint test_if_locked) +int ha_archive::open(const char *name, int mode, uint open_options) { + int rc= 0; DBUG_ENTER("ha_archive::open"); - if (!(share= get_share(name, table))) - DBUG_RETURN(1); + DBUG_PRINT("info", ("archive table was opened for crash %s", + (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no")); + share= get_share(name, &rc); + + if (rc == HA_ERR_CRASHED_ON_USAGE && !(open_options & HA_OPEN_FOR_REPAIR)) + { + /* purecov: begin inspected */ + free_share(); + DBUG_RETURN(rc); + /* purecov: end */ + } + else if (rc == HA_ERR_OUT_OF_MEM) + { + DBUG_RETURN(rc); + } + thr_lock_data_init(&share->lock,&lock,NULL); if ((archive= gzopen(share->data_file_name, "rb")) == NULL) { - (void)free_share(share); //We void since we already have an error - DBUG_RETURN(errno ? errno : -1); + if (errno == EROFS || errno == EACCES) + DBUG_RETURN(my_errno= errno); + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); } - DBUG_RETURN(0); + DBUG_PRINT("info", ("archive table was crashed %s", + rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no")); + if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR) + { + DBUG_RETURN(0); + } + else + DBUG_RETURN(rc); } @@ -477,7 +587,7 @@ int ha_archive::close(void) if (gzclose(archive) == Z_ERRNO) rc= 1; /* then also close share */ - rc|= free_share(share); + rc|= free_share(); DBUG_RETURN(rc); } @@ -520,7 +630,7 @@ int ha_archive::create(const char *name, TABLE *table_arg, error= my_errno; goto error; } - if ((archive= gzdopen(create_file, "wb")) == NULL) + if ((archive= gzdopen(dup(create_file), "wb")) == NULL) { error= errno; goto error2; @@ -552,6 +662,56 @@ error: DBUG_RETURN(error ? error : -1); } +/* + This is where the actual row is written out. +*/ +int ha_archive::real_write_row(byte *buf, gzFile writer) +{ + z_off_t written, total_row_length; + uint *ptr, *end; + DBUG_ENTER("ha_archive::real_write_row"); + total_row_length= table->s->reclength; + for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields; + ptr != end; ptr++) + total_row_length+= ((Field_blob*) table->field[*ptr])->get_length(); + if (share->approx_file_size > max_zfile_size - total_row_length) + { + info(HA_STATUS_TIME); + share->approx_file_size= (ulong) data_file_length; + if (share->approx_file_size > max_zfile_size - total_row_length) + DBUG_RETURN(HA_ERR_RECORD_FILE_FULL); + } + share->approx_file_size+= total_row_length; + written= gzwrite(writer, buf, table->s->reclength); + DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %lu", (int) written, + table->s->reclength)); + if (!delayed_insert || !bulk_insert) + share->dirty= TRUE; + + if (written != (z_off_t)table->s->reclength) + DBUG_RETURN(errno ? errno : -1); + /* + We should probably mark the table as damagaged if the record is written + but the blob fails. + */ + for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ; + ptr != end ; + ptr++) + { + char *data_ptr; + uint32 size= ((Field_blob*) table->field[*ptr])->get_length(); + + if (size) + { + ((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr); + written= gzwrite(writer, data_ptr, (unsigned)size); + if (written != (z_off_t)size) + DBUG_RETURN(errno ? errno : -1); + } + } + DBUG_RETURN(0); +} + /* Look at ha_archive::open() for an explanation of the row format. @@ -562,48 +722,53 @@ error: for implementing start_bulk_insert() is that we could skip setting dirty to true each time. */ -int ha_archive::write_row(byte * buf) +int ha_archive::write_row(byte *buf) { - z_off_t written; - Field_blob **field; + int rc; DBUG_ENTER("ha_archive::write_row"); - statistic_increment(ha_write_count,&LOCK_status); + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); pthread_mutex_lock(&share->mutex); - written= gzwrite(share->archive_write, buf, table->reclength); - DBUG_PRINT("ha_archive::get_row", ("Wrote %d bytes expected %d", written, table->reclength)); - share->dirty= TRUE; - if (written != (z_off_t)table->reclength) - goto error; + if (!share->archive_write_open) + if (init_archive_writer()) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + /* - We should probably mark the table as damagaged if the record is written - but the blob fails. + Varchar structures are constant in size but are not cleaned up request + to request. The following sets all unused space to null to improve + compression. */ - for (field= table->blob_field ; *field ; field++) + for (Field **field=table->field ; *field ; field++) { - char *ptr; - uint32 size= (*field)->get_length(); - - if (size) + DBUG_PRINT("archive",("Pack is %d\n", (*field)->pack_length())); + DBUG_PRINT("archive",("MyPack is %d\n", (*field)->data_length((char*) buf + (*field)->offset()))); + if ((*field)->real_type() == MYSQL_TYPE_VARCHAR) { - (*field)->get_ptr(&ptr); - written= gzwrite(share->archive_write, ptr, (unsigned)size); - if (written != (z_off_t)size) - goto error; +#ifndef DBUG_OFF + uint actual_length= (*field)->data_length((char*) buf + (*field)->offset()); + uint offset= (*field)->offset() + actual_length + + (actual_length > 255 ? 2 : 1); + DBUG_PRINT("archive",("Offset is %d -> %d\n", actual_length, offset)); +#endif + /* + if ((*field)->pack_length() + (*field)->offset() != offset) + bzero(buf + offset, (size_t)((*field)->pack_length() + (actual_length > 255 ? 2 : 1) - (*field)->data_length)); + */ } } + share->rows_recorded++; + rc= real_write_row(buf, share->archive_write); pthread_mutex_unlock(&share->mutex); - DBUG_RETURN(0); -error: - pthread_mutex_unlock(&share->mutex); - DBUG_RETURN(errno ? errno : -1); + DBUG_RETURN(rc); } - /* All calls that need to scan the table start with this method. If we are told that it is a table scan we rewind the file to the beginning, otherwise @@ -613,11 +778,15 @@ error: int ha_archive::rnd_init(bool scan) { DBUG_ENTER("ha_archive::rnd_init"); + + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); /* We rewind the file so that we can read from the beginning if scan */ if (scan) { scan_rows= share->rows_recorded; + DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows)); records= 0; /* @@ -629,6 +798,7 @@ int ha_archive::rnd_init(bool scan) pthread_mutex_lock(&share->mutex); if (share->dirty == TRUE) { + DBUG_PRINT("info", ("archive flushing out rows for scan")); gzflush(share->archive_write, Z_SYNC_FLUSH); share->dirty= FALSE; } @@ -650,13 +820,14 @@ int ha_archive::rnd_init(bool scan) int ha_archive::get_row(gzFile file_to_read, byte *buf) { int read; // Bytes read, gzread() returns int + uint *ptr, *end; char *last; size_t total_blob_length= 0; - Field_blob **field; DBUG_ENTER("ha_archive::get_row"); - read= gzread(file_to_read, buf, table->reclength); - DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %d", read, table->reclength)); + read= gzread(file_to_read, buf, table->s->reclength); + DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", (int) read, + table->s->reclength)); if (read == Z_STREAM_ERROR) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -665,28 +836,35 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) if (read == 0) DBUG_RETURN(HA_ERR_END_OF_FILE); - /* If the record is the wrong size, the file is probably damaged */ - if ((ulong) read != table->reclength) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + /* + If the record is the wrong size, the file is probably damaged, unless + we are dealing with a delayed insert or a bulk insert. + */ + if ((ulong) read != table->s->reclength) + DBUG_RETURN(HA_ERR_END_OF_FILE); /* Calculate blob length, we use this for our buffer */ - for (field=table->blob_field; *field ; field++) - total_blob_length += (*field)->get_length(); + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) + total_blob_length += ((Field_blob*) table->field[*ptr])->get_length(); /* Adjust our row buffer if we need be */ buffer.alloc(total_blob_length); last= (char *)buffer.ptr(); /* Loop through our blobs and read them */ - for (field=table->blob_field; *field ; field++) + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) { - size_t size= (*field)->get_length(); + size_t size= ((Field_blob*) table->field[*ptr])->get_length(); if (size) { read= gzread(file_to_read, last, size); if ((size_t) read != size) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - (*field)->set_ptr(size, last); + DBUG_RETURN(HA_ERR_END_OF_FILE); + ((Field_blob*) table->field[*ptr])->set_ptr(size, last); last += size; } } @@ -704,11 +882,15 @@ int ha_archive::rnd_next(byte *buf) int rc; DBUG_ENTER("ha_archive::rnd_next"); + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + if (!scan_rows) DBUG_RETURN(HA_ERR_END_OF_FILE); scan_rows--; - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= gztell(archive); rc= get_row(archive, buf); @@ -720,7 +902,7 @@ int ha_archive::rnd_next(byte *buf) } -/* +/* Thanks to the table flag HA_REC_NOT_IN_SEQ this will be called after each call to ha_archive::rnd_next() if an ordering of the rows is needed. @@ -729,7 +911,7 @@ int ha_archive::rnd_next(byte *buf) void ha_archive::position(const byte *record) { DBUG_ENTER("ha_archive::position"); - ha_store_ptr(ref, ref_length, current_position); + my_store_ptr(ref, ref_length, current_position); DBUG_VOID_RETURN; } @@ -744,68 +926,30 @@ void ha_archive::position(const byte *record) int ha_archive::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_archive::rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); - current_position= ha_get_ptr(pos, ref_length); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); + current_position= (z_off_t)my_get_ptr(pos, ref_length); (void)gzseek(archive, current_position, SEEK_SET); DBUG_RETURN(get_row(archive, buf)); } /* - This method rebuilds the meta file. It does this by walking the datafile and - rewriting the meta file. + This method repairs the meta file. It does this by walking the datafile and + rewriting the meta file. Currently it does this by calling optimize with + the extended flag. */ -int ha_archive::rebuild_meta_file(char *table_name, File meta_file) +int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt) { - int rc; - byte *buf; - ulonglong rows_recorded= 0; - gzFile rebuild_file; /* Archive file we are working with */ - char data_file_name[FN_REFLEN]; - DBUG_ENTER("ha_archive::rebuild_meta_file"); - - /* - Open up the meta file to recreate it. - */ - fn_format(data_file_name, table_name, "", ARZ, - MY_REPLACE_EXT|MY_UNPACK_FILENAME); - if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL) - DBUG_RETURN(errno ? errno : -1); - - if ((rc= read_data_header(rebuild_file))) - goto error; + DBUG_ENTER("ha_archive::repair"); + check_opt->flags= T_EXTEND; + int rc= optimize(thd, check_opt); - /* - We malloc up the buffer we will use for counting the rows. - I know, this malloc'ing memory but this should be a very - rare event. - */ - if (!(buf= (byte*) my_malloc(table->rec_buff_length > sizeof(ulonglong) +1 ? - table->rec_buff_length : sizeof(ulonglong) +1 , - MYF(MY_WME)))) - { - rc= HA_ERR_CRASHED_ON_USAGE; - goto error; - } + if (rc) + DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR); - while (!(rc= get_row(rebuild_file, buf))) - rows_recorded++; - - /* - Only if we reach the end of the file do we assume we can rewrite. - At this point we reset rc to a non-message state. - */ - if (rc == HA_ERR_END_OF_FILE) - { - (void)write_meta_file(meta_file, rows_recorded, FALSE); - rc= 0; - } - - my_free((gptr) buf, MYF(0)); -error: - gzclose(rebuild_file); - - DBUG_RETURN(rc); + share->crashed= FALSE; + DBUG_RETURN(0); } /* @@ -815,56 +959,117 @@ error: int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) { DBUG_ENTER("ha_archive::optimize"); - int read; // Bytes read, gzread() returns int - gzFile reader, writer; - char block[IO_SIZE]; + int rc; + gzFile writer; char writer_filename[FN_REFLEN]; + /* Open up the writer if we haven't yet */ + if (!share->archive_write_open) + init_archive_writer(); + + /* Flush any waiting data */ + gzflush(share->archive_write, Z_SYNC_FLUSH); + /* Lets create a file to contain the new data */ fn_format(writer_filename, share->table_name, "", ARN, MY_REPLACE_EXT|MY_UNPACK_FILENAME); - /* Closing will cause all data waiting to be flushed, to be flushed */ - gzclose(share->archive_write); + if ((writer= gzopen(writer_filename, "wb")) == NULL) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - if ((reader= gzopen(share->data_file_name, "rb")) == NULL) - DBUG_RETURN(-1); + /* + An extended rebuild is a lot more effort. We open up each row and re-record it. + Any dead rows are removed (aka rows that may have been partially recorded). + */ - if ((writer= gzopen(writer_filename, "wb")) == NULL) + if (check_opt->flags == T_EXTEND) { - gzclose(reader); - DBUG_RETURN(-1); - } + byte *buf; - while ((read= gzread(reader, block, IO_SIZE))) - gzwrite(writer, block, read); + /* + First we create a buffer that we can use for reading rows, and can pass + to get_row(). + */ + if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME)))) + { + rc= HA_ERR_OUT_OF_MEM; + goto error; + } - gzclose(reader); - gzclose(writer); + /* + Now we will rewind the archive file so that we are positioned at the + start of the file. + */ + rc= read_data_header(archive); + + /* + Assuming now error from rewinding the archive file, we now write out the + new header for out data file. + */ + if (!rc) + rc= write_data_header(writer); + + /* + On success of writing out the new header, we now fetch each row and + insert it into the new archive file. + */ + if (!rc) + { + share->rows_recorded= 0; + while (!(rc= get_row(archive, buf))) + { + real_write_row(buf, writer); + share->rows_recorded++; + } + } + DBUG_PRINT("info", ("recovered %lu archive rows", + (ulong) share->rows_recorded)); + + my_free((char*)buf, MYF(0)); + if (rc && rc != HA_ERR_END_OF_FILE) + goto error; + } + else + { + /* + The quick method is to just read the data raw, and then compress it directly. + */ + int read; // Bytes read, gzread() returns int + char block[IO_SIZE]; + if (gzrewind(archive) == -1) + { + rc= HA_ERR_CRASHED_ON_USAGE; + goto error; + } + + while ((read= gzread(archive, block, IO_SIZE))) + gzwrite(writer, block, read); + } + + gzflush(writer, Z_SYNC_FLUSH); + share->dirty= FALSE; + gzclose(share->archive_write); + share->archive_write= writer; my_rename(writer_filename,share->data_file_name,MYF(0)); - /* - We reopen the file in case some IO is waiting to go through. - In theory the table is closed right after this operation, - but it is possible for IO to still happen. - I may be being a bit too paranoid right here. + /* + Now we need to reopen our read descriptor since it has changed. */ - if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) - DBUG_RETURN(errno ? errno : -1); - share->dirty= FALSE; + gzclose(archive); + if ((archive= gzopen(share->data_file_name, "rb")) == NULL) + { + rc= HA_ERR_CRASHED_ON_USAGE; + goto error; + } + DBUG_RETURN(0); -} +error: + gzclose(writer); -/* - No transactions yet, so this is pretty dull. -*/ -int ha_archive::external_lock(THD *thd, int lock_type) -{ - DBUG_ENTER("ha_archive::external_lock"); - DBUG_RETURN(0); + DBUG_RETURN(rc); } /* @@ -874,6 +1079,11 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { + if (lock_type == TL_WRITE_DELAYED) + delayed_insert= TRUE; + else + delayed_insert= FALSE; + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { /* @@ -908,108 +1118,144 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, } -/****************************************************************************** - - Everything below here is default, please look at ha_example.cc for - descriptions. - - ******************************************************************************/ - -int ha_archive::update_row(const byte * old_data, byte * new_data) +/* + Hints for optimizer, see ha_tina for more information +*/ +int ha_archive::info(uint flag) { + DBUG_ENTER("ha_archive::info"); + /* + This should be an accurate number now, though bulk and delayed inserts can + cause the number to be inaccurate. + */ + records= share->rows_recorded; + deleted= 0; + /* Costs quite a bit more to get all information */ + if (flag & HA_STATUS_TIME) + { + MY_STAT file_stat; // Stat information for the data file - DBUG_ENTER("ha_archive::update_row"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} + VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME))); -int ha_archive::delete_row(const byte * buf) -{ - DBUG_ENTER("ha_archive::delete_row"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} + mean_rec_length= table->s->reclength + buffer.alloced_length(); + data_file_length= file_stat.st_size; + create_time= file_stat.st_ctime; + update_time= file_stat.st_mtime; + max_data_file_length= share->rows_recorded * mean_rec_length; + } + delete_length= 0; + index_file_length=0; -int ha_archive::index_read(byte * buf, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) -{ - DBUG_ENTER("ha_archive::index_read"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); + DBUG_RETURN(0); } -int ha_archive::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) + +/* + This method tells us that a bulk insert operation is about to occur. We set + a flag which will keep write_row from saying that its data is dirty. This in + turn will keep selects from causing a sync to occur. + Basically, yet another optimizations to keep compression working well. +*/ +void ha_archive::start_bulk_insert(ha_rows rows) { - DBUG_ENTER("ha_archive::index_read_idx"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); + DBUG_ENTER("ha_archive::start_bulk_insert"); + if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT) + bulk_insert= TRUE; + DBUG_VOID_RETURN; } -int ha_archive::index_next(byte * buf) +/* + Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert + flag, and set the share dirty so that the next select will call sync for us. +*/ +int ha_archive::end_bulk_insert() { - DBUG_ENTER("ha_archive::index_next"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); + DBUG_ENTER("ha_archive::end_bulk_insert"); + bulk_insert= FALSE; + share->dirty= TRUE; + DBUG_RETURN(0); } -int ha_archive::index_prev(byte * buf) +/* + We cancel a truncate command. The only way to delete an archive table is to drop it. + This is done for security reasons. In a later version we will enable this by + allowing the user to select a different row format. +*/ +int ha_archive::delete_all_rows() { - DBUG_ENTER("ha_archive::index_prev"); + DBUG_ENTER("ha_archive::delete_all_rows"); DBUG_RETURN(HA_ERR_WRONG_COMMAND); } -int ha_archive::index_first(byte * buf) +/* + We just return state if asked. +*/ +bool ha_archive::is_crashed() const { - DBUG_ENTER("ha_archive::index_first"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); + DBUG_ENTER("ha_archive::is_crashed"); + DBUG_RETURN(share->crashed); } -int ha_archive::index_last(byte * buf) +/* + Simple scan of the tables to make sure everything is ok. +*/ + +int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt) { - DBUG_ENTER("ha_archive::index_last"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} + int rc= 0; + byte *buf; + const char *old_proc_info=thd->proc_info; + ha_rows count= share->rows_recorded; + DBUG_ENTER("ha_archive::check"); + thd->proc_info= "Checking table"; + /* Flush any waiting data */ + gzflush(share->archive_write, Z_SYNC_FLUSH); -int ha_archive::info(uint flag) -{ - DBUG_ENTER("ha_archive::info"); + /* + First we create a buffer that we can use for reading rows, and can pass + to get_row(). + */ + if (!(buf= (byte*) my_malloc(table->s->reclength, MYF(MY_WME)))) + rc= HA_ERR_OUT_OF_MEM; - /* This is a lie, but you don't want the optimizer to see zero or 1 */ - records= share->rows_recorded; - deleted= 0; + /* + Now we will rewind the archive file so that we are positioned at the + start of the file. + */ + if (!rc) + read_data_header(archive); - DBUG_RETURN(0); -} + if (!rc) + while (!(rc= get_row(archive, buf))) + count--; -int ha_archive::extra(enum ha_extra_function operation) -{ - DBUG_ENTER("ha_archive::extra"); - DBUG_RETURN(0); -} + my_free((char*)buf, MYF(0)); -int ha_archive::reset(void) -{ - DBUG_ENTER("ha_archive::reset"); - DBUG_RETURN(0); -} + thd->proc_info= old_proc_info; -ha_rows ha_archive::records_in_range(uint inx, key_range *min_key, - key_range *max_key) -{ - DBUG_ENTER("ha_archive::records_in_range "); - DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND + if ((rc && rc != HA_ERR_END_OF_FILE) || count) + { + share->crashed= FALSE; + DBUG_RETURN(HA_ADMIN_CORRUPT); + } + else + { + DBUG_RETURN(HA_ADMIN_OK); + } } /* - We cancel a truncate command. The only way to delete an archive table is to drop it. - This is done for security reasons. In a later version we will enable this by - allowing the user to select a different row format. + Check and repair the table if needed. */ -int ha_archive::delete_all_rows() +bool ha_archive::check_and_repair(THD *thd) { - DBUG_ENTER("ha_archive::delete_all_rows"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); + HA_CHECK_OPT check_opt; + DBUG_ENTER("ha_archive::check_and_repair"); + + check_opt.init(); + + DBUG_RETURN(repair(thd, &check_opt)); } #endif /* HAVE_ARCHIVE_DB */ diff --git a/sql/examples/ha_archive.h b/sql/ha_archive.h index 3c5dccfdb6f..76765b98bc9 100644 --- a/sql/examples/ha_archive.h +++ b/sql/ha_archive.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -32,10 +31,13 @@ typedef struct st_archive_share { uint table_name_length,use_count; pthread_mutex_t mutex; THR_LOCK lock; - File meta_file; /* Meta file we use */ - gzFile archive_write; /* Archive file we are working with */ - bool dirty; /* Flag for if a flush should occur */ - ulonglong rows_recorded; /* Number of rows in tables */ + File meta_file; /* Meta file we use */ + gzFile archive_write; /* Archive file we are working with */ + bool archive_write_open; + bool dirty; /* Flag for if a flush should occur */ + bool crashed; /* Meta file is crashed */ + ha_rows rows_recorded; /* Number of rows in tables */ + z_off_t approx_file_size; /* Approximate archive data file size */ } ARCHIVE_SHARE; /* @@ -52,17 +54,12 @@ class ha_archive: public handler z_off_t current_position; /* The position of the row we just read */ byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */ String buffer; /* Buffer used for blob storage */ - ulonglong scan_rows; /* Number of rows left in scan */ + ha_rows scan_rows; /* Number of rows left in scan */ + bool delayed_insert; /* If the insert is delayed */ + bool bulk_insert; /* If we are performing a bulk insert */ public: - ha_archive(TABLE *table): handler(table) - { - /* Set our original buffer from pre-allocated memory */ - buffer.set((char*)byte_buffer, IO_SIZE, system_charset_info); - - /* The size of the offset value we will use for position() */ - ref_length = sizeof(z_off_t); - } + ha_archive(TABLE *table_arg); ~ha_archive() { } @@ -72,59 +69,45 @@ public: ulong table_flags() const { return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT | - HA_FILE_BASED); + HA_FILE_BASED | HA_CAN_INSERT_DELAYED | HA_CAN_GEOMETRY); } ulong index_flags(uint idx, uint part, bool all_parts) const { return 0; } - /* - Have to put something here, there is no real limit as far as - archive is concerned. - */ - uint max_supported_record_length() const { return UINT_MAX; } - /* - Called in test_quick_select to determine if indexes should be used. - */ - virtual double scan_time() { return (double) (records) / 20.0+10; } - /* The next method will never be called */ - virtual double read_time(uint index, uint ranges, ha_rows rows) - { return (double) rows / 20.0+1; } int open(const char *name, int mode, uint test_if_locked); int close(void); int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); + int real_write_row(byte *buf, gzFile writer); int delete_all_rows(); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); int rnd_init(bool scan=1); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); int get_row(gzFile file_to_read, byte *buf); - int read_meta_file(File meta_file, ulonglong *rows); - int write_meta_file(File meta_file, ulonglong rows, bool dirty); - ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table); - int free_share(ARCHIVE_SHARE *share); - int rebuild_meta_file(char *table_name, File meta_file); + int read_meta_file(File meta_file, ha_rows *rows); + int write_meta_file(File meta_file, ha_rows rows, bool dirty); + ARCHIVE_SHARE *get_share(const char *table_name, int *rc); + int free_share(); + int init_archive_writer(); + bool auto_repair() const { return 1; } // For the moment we just do this int read_data_header(gzFile file_to_read); int write_data_header(gzFile file_to_write); void position(const byte *record); int info(uint); - int extra(enum ha_extra_function operation); - int reset(void); - int external_lock(THD *thd, int lock_type); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); int optimize(THD* thd, HA_CHECK_OPT* check_opt); + int repair(THD* thd, HA_CHECK_OPT* check_opt); + void start_bulk_insert(ha_rows rows); + int end_bulk_insert(); + enum row_type get_row_type() const + { + return ROW_TYPE_COMPRESSED; + } THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); + bool is_crashed() const; + int check(THD* thd, HA_CHECK_OPT* check_opt); + bool check_and_repair(THD *thd); }; bool archive_db_init(void); diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index e8dbf7ab12d..2a5fe775ca6 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -103,7 +102,39 @@ static int write_status(DB *status_block, char *buff, uint length); static void update_status(BDB_SHARE *share, TABLE *table); static void berkeley_noticecall(DB_ENV *db_env, db_notices notice); - +static int berkeley_close_connection(THD *thd); +static int berkeley_commit(THD *thd, bool all); +static int berkeley_rollback(THD *thd, bool all); + +handlerton berkeley_hton = { + "BerkeleyDB", + SHOW_OPTION_YES, + "Supports transactions and page-level locking", + DB_TYPE_BERKELEY_DB, + berkeley_init, + 0, /* slot */ + 0, /* savepoint size */ + berkeley_close_connection, + NULL, /* savepoint_set */ + NULL, /* savepoint_rollback */ + NULL, /* savepoint_release */ + berkeley_commit, + berkeley_rollback, + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_CLOSE_CURSORS_AT_COMMIT +}; + +typedef struct st_berkeley_trx_data { + DB_TXN *all; + DB_TXN *stmt; + uint bdb_lock_count; +} berkeley_trx_data; /* General functions */ @@ -111,6 +142,9 @@ bool berkeley_init(void) { DBUG_ENTER("berkeley_init"); + if (have_berkeley_db != SHOW_OPTION_YES) + goto error; + if (!berkeley_tmpdir) berkeley_tmpdir=mysql_tmpdir; if (!berkeley_home) @@ -136,7 +170,7 @@ bool berkeley_init(void) berkeley_log_file_size= max(berkeley_log_file_size, 10*1024*1024L); if (db_env_create(&db_env,0)) - DBUG_RETURN(1); /* purecov: inspected */ + goto error; db_env->set_errcall(db_env,berkeley_print_error); db_env->set_errpfx(db_env,"bdb"); db_env->set_noticecall(db_env, berkeley_noticecall); @@ -164,16 +198,18 @@ bool berkeley_init(void) DB_INIT_LOG | DB_INIT_MPOOL | DB_INIT_TXN | DB_CREATE | DB_THREAD, 0666)) { - db_env->close(db_env,0); /* purecov: inspected */ - db_env=0; /* purecov: inspected */ - goto err; + db_env->close(db_env,0); + db_env=0; + goto error; } (void) hash_init(&bdb_open_tables,system_charset_info,32,0,0, (hash_get_key) bdb_get_key,0,0); pthread_mutex_init(&bdb_mutex,MY_MUTEX_INIT_FAST); -err: - DBUG_RETURN(db_env == 0); + DBUG_RETURN(FALSE); +error: + have_berkeley_db= SHOW_OPTION_DISABLED; // If we couldn't use handler + DBUG_RETURN(TRUE); } @@ -191,6 +227,13 @@ bool berkeley_end(void) DBUG_RETURN(error != 0); } +static int berkeley_close_connection(THD *thd) +{ + my_free((gptr)thd->ha_data[berkeley_hton.slot], MYF(0)); + return 0; +} + + bool berkeley_flush_logs() { int error; @@ -209,26 +252,29 @@ bool berkeley_flush_logs() DBUG_RETURN(result); } - -int berkeley_commit(THD *thd, void *trans) +static int berkeley_commit(THD *thd, bool all) { DBUG_ENTER("berkeley_commit"); - DBUG_PRINT("trans",("ending transaction %s", - trans == thd->transaction.stmt.bdb_tid ? "stmt" : "all")); - int error=txn_commit((DB_TXN*) trans,0); + DBUG_PRINT("trans",("ending transaction %s", all ? "all" : "stmt")); + berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; + DB_TXN **txn= all ? &trx->all : &trx->stmt; + int error=txn_commit(*txn,0); + *txn=0; #ifndef DBUG_OFF if (error) - DBUG_PRINT("error",("error: %d",error)); /* purecov: inspected */ + DBUG_PRINT("error",("error: %d",error)); #endif DBUG_RETURN(error); } -int berkeley_rollback(THD *thd, void *trans) +static int berkeley_rollback(THD *thd, bool all) { DBUG_ENTER("berkeley_rollback"); - DBUG_PRINT("trans",("aborting transaction %s", - trans == thd->transaction.stmt.bdb_tid ? "stmt" : "all")); - int error=txn_abort((DB_TXN*) trans); + DBUG_PRINT("trans",("aborting transaction %s", all ? "all" : "stmt")); + berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; + DB_TXN **txn= all ? &trx->all : &trx->stmt; + int error=txn_abort(*txn); + *txn=0; DBUG_RETURN(error); } @@ -262,7 +308,7 @@ int berkeley_show_logs(Protocol *protocol) { protocol->prepare_for_resend(); protocol->store(*a, system_charset_info); - protocol->store("BDB", 3, system_charset_info); + protocol->store(STRING_WITH_LEN("BDB"), system_charset_info); if (f && *f && strcmp(*a, *f) == 0) { f++; @@ -341,11 +387,27 @@ void berkeley_cleanup_log_files(void) ** Berkeley DB tables *****************************************************************************/ -static const char *ha_bdb_bas_exts[]= { ha_berkeley_ext, NullS }; -const char **ha_berkeley::bas_ext() const -{ return ha_bdb_bas_exts; } +ha_berkeley::ha_berkeley(TABLE *table_arg) + :handler(&berkeley_hton, table_arg), alloc_ptr(0), rec_buff(0), file(0), + int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ | + HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT | + HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | + HA_CAN_GEOMETRY | + HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX), + changed_rows(0), last_dup_key((uint) -1), version(0), using_ignore(0) +{} +static const char *ha_berkeley_exts[] = { + ha_berkeley_ext, + NullS +}; + +const char **ha_berkeley::bas_ext() const +{ + return ha_berkeley_exts; +} + ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const { ulong flags= (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_KEYREAD_ONLY @@ -360,7 +422,8 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const } switch (table->key_info[idx].key_part[i].field->key_type()) { case HA_KEYTYPE_TEXT: - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: /* As BDB stores only one copy of equal strings, we can't use key read on these. Binary collations do support key read though. @@ -377,6 +440,15 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const } +void ha_berkeley::get_auto_primary_key(byte *to) +{ + pthread_mutex_lock(&share->mutex); + share->auto_ident++; + int5store(to,share->auto_ident); + pthread_mutex_unlock(&share->mutex); +} + + static int berkeley_cmp_hidden_key(DB* file, const DBT *new_key, const DBT *saved_key) { @@ -395,9 +467,11 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; uint key_length=new_key->size; + DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size); for (; key_part != end && (int) key_length > 0; key_part++) { int cmp; + uint length; if (key_part->null_bit) { if (*new_key_ptr != *saved_key_ptr++) @@ -406,11 +480,12 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) if (!*new_key_ptr++) continue; } - if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr, - key_part->length))) + if ((cmp= key_part->field->pack_cmp(new_key_ptr,saved_key_ptr, + key_part->length, + key->table->insert_or_update))) return cmp; - uint length=key_part->field->packed_col_length(new_key_ptr, - key_part->length); + length= key_part->field->packed_col_length(new_key_ptr, + key_part->length); new_key_ptr+=length; key_length-=length; saved_key_ptr+=key_part->field->packed_col_length(saved_key_ptr, @@ -436,7 +511,7 @@ berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key) for (; key_part != end && (int) key_length > 0 ; key_part++) { int cmp; - if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,0))) + if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,0,0))) return cmp; new_key_ptr+=key_part->length; key_length-= key_part->length; @@ -446,6 +521,7 @@ berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key) } #endif + /* Compare key against row */ static bool @@ -457,6 +533,7 @@ berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length) for (; key_part != end && (int) key_length > 0; key_part++) { int cmp; + uint length; if (key_part->null_bit) { key_length--; @@ -470,27 +547,34 @@ berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length) if (!*key++) // Null value continue; } - if ((cmp=key_part->field->pack_cmp(key,key_part->length))) + /* + Last argument has to be 0 as we are also using this to function to see + if a key like 'a ' matched a row with 'a' + */ + if ((cmp= key_part->field->pack_cmp(key, key_part->length, 0))) return cmp; - uint length=key_part->field->packed_col_length(key,key_part->length); - key+=length; - key_length-=length; + length= key_part->field->packed_col_length(key,key_part->length); + key+= length; + key_length-= length; } return 0; // Identical keys } + int ha_berkeley::open(const char *name, int mode, uint test_if_locked) { char name_buff[FN_REFLEN]; uint open_mode=(mode == O_RDONLY ? DB_RDONLY : 0) | DB_THREAD; + uint max_key_length; int error; + TABLE_SHARE *table_share= table->s; DBUG_ENTER("ha_berkeley::open"); /* Open primary key */ hidden_primary_key=0; - if ((primary_key=table->primary_key) >= MAX_KEY) + if ((primary_key= table_share->primary_key) >= MAX_KEY) { // No primary key - primary_key=table->keys; + primary_key= table_share->keys; key_used_on_scan=MAX_KEY; ref_length=hidden_primary_key=BDB_HIDDEN_PRIMARY_KEY_LENGTH; } @@ -498,18 +582,18 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) key_used_on_scan=primary_key; /* Need some extra memory in case of packed keys */ - uint max_key_length= table->max_key_length + MAX_REF_PARTS*3; + max_key_length= table_share->max_key_length + MAX_REF_PARTS*3; if (!(alloc_ptr= my_multi_malloc(MYF(MY_WME), &key_buff, max_key_length, &key_buff2, max_key_length, &primary_key_buff, (hidden_primary_key ? 0 : - table->key_info[table->primary_key].key_length), + table->key_info[table_share->primary_key].key_length), NullS))) DBUG_RETURN(1); /* purecov: inspected */ if (!(rec_buff= (byte*) my_malloc((alloced_rec_buff_length= - table->rec_buff_length), + table_share->rec_buff_length), MYF(MY_WME)))) { my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ @@ -517,7 +601,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) } /* Init shared structure */ - if (!(share=get_share(name,table))) + if (!(share= get_share(name,table))) { my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ @@ -530,7 +614,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) /* Fill in shared structure, if needed */ pthread_mutex_lock(&share->mutex); - file = share->file; + file= share->file; if (!share->use_count++) { if ((error=db_create(&file, db_env, 0))) @@ -541,13 +625,13 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) my_errno=error; /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } - share->file = file; + share->file= file; file->set_bt_compare(file, (hidden_primary_key ? berkeley_cmp_hidden_key : berkeley_cmp_packed_key)); if (!hidden_primary_key) - file->app_private= (void*) (table->key_info+table->primary_key); + file->app_private= (void*) (table->key_info + table_share->primary_key); if ((error= txn_begin(db_env, 0, (DB_TXN**) &transaction, 0)) || (error= (file->open(file, transaction, fn_format(name_buff, name, "", ha_berkeley_ext, @@ -555,7 +639,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) "main", DB_BTREE, open_mode, 0))) || (error= transaction->commit(transaction, 0))) { - free_share(share,table, hidden_primary_key,1); /* purecov: inspected */ + free_share(share, table, hidden_primary_key,1); /* purecov: inspected */ my_free((char*) rec_buff,MYF(0)); /* purecov: inspected */ my_free(alloc_ptr,MYF(0)); /* purecov: inspected */ my_errno=error; /* purecov: inspected */ @@ -567,7 +651,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) key_type[primary_key]=DB_NOOVERWRITE; DB **ptr=key_file; - for (uint i=0, used_keys=0; i < table->keys ; i++, ptr++) + for (uint i=0, used_keys=0; i < table_share->keys ; i++, ptr++) { char part[7]; if (i != primary_key) @@ -599,7 +683,7 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) } } /* Calculate pack_length of primary key */ - share->fixed_length_primary_key=1; + share->fixed_length_primary_key= 1; if (!hidden_primary_key) { ref_length=0; @@ -609,18 +693,19 @@ int ha_berkeley::open(const char *name, int mode, uint test_if_locked) ref_length+= key_part->field->max_packed_col_length(key_part->length); share->fixed_length_primary_key= (ref_length == table->key_info[primary_key].key_length); - share->status|=STATUS_PRIMARY_KEY_INIT; + share->status|= STATUS_PRIMARY_KEY_INIT; } - share->ref_length=ref_length; + share->ref_length= ref_length; } - ref_length=share->ref_length; // If second open + ref_length= share->ref_length; // If second open pthread_mutex_unlock(&share->mutex); transaction=0; cursor=0; key_read=0; block_size=8192; // Berkeley DB block size - share->fixed_length_row=!(table->db_create_options & HA_OPTION_PACK_RECORD); + share->fixed_length_row= !(table_share->db_create_options & + HA_OPTION_PACK_RECORD); get_status(); info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); @@ -660,9 +745,15 @@ bool ha_berkeley::fix_rec_buff_for_blob(ulong length) ulong ha_berkeley::max_row_length(const byte *buf) { - ulong length=table->reclength + table->fields*2; - for (Field_blob **ptr=table->blob_field ; *ptr ; ptr++) - length+= (*ptr)->get_length((char*) buf+(*ptr)->offset())+2; + ulong length= table->s->reclength + table->s->fields*2; + uint *ptr, *end; + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) + { + Field_blob *blob= ((Field_blob*) table->field[*ptr]); + length+= blob->get_length((char*) buf + blob->offset())+2; + } return length; } @@ -678,29 +769,30 @@ ulong ha_berkeley::max_row_length(const byte *buf) int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row) { + byte *ptr; bzero((char*) row,sizeof(*row)); if (share->fixed_length_row) { row->data=(void*) record; - row->size=table->reclength+hidden_primary_key; + row->size= table->s->reclength+hidden_primary_key; if (hidden_primary_key) { if (new_row) get_auto_primary_key(current_ident); - memcpy_fixed((char*) record+table->reclength, (char*) current_ident, + memcpy_fixed((char*) record+table->s->reclength, (char*) current_ident, BDB_HIDDEN_PRIMARY_KEY_LENGTH); } return 0; } - if (table->blob_fields) + if (table->s->blob_fields) { if (fix_rec_buff_for_blob(max_row_length(record))) return HA_ERR_OUT_OF_MEM; /* purecov: inspected */ } /* Copy null bits */ - memcpy(rec_buff, record, table->null_bytes); - byte *ptr=rec_buff + table->null_bytes; + memcpy(rec_buff, record, table->s->null_bytes); + ptr= rec_buff + table->s->null_bytes; for (Field **field=table->field ; *field ; field++) ptr=(byte*) (*field)->pack((char*) ptr, @@ -715,7 +807,7 @@ int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row) ptr+=BDB_HIDDEN_PRIMARY_KEY_LENGTH; } row->data=rec_buff; - row->size= (size_t) (ptr - rec_buff); + row->size= (u_int32_t) (ptr - rec_buff); return 0; } @@ -723,13 +815,13 @@ int ha_berkeley::pack_row(DBT *row, const byte *record, bool new_row) void ha_berkeley::unpack_row(char *record, DBT *row) { if (share->fixed_length_row) - memcpy(record,(char*) row->data,table->reclength+hidden_primary_key); + memcpy(record,(char*) row->data,table->s->reclength+hidden_primary_key); else { /* Copy null bits */ const char *ptr= (const char*) row->data; - memcpy(record, ptr, table->null_bytes); - ptr+=table->null_bytes; + memcpy(record, ptr, table->s->null_bytes); + ptr+= table->s->null_bytes; for (Field **field=table->field ; *field ; field++) ptr= (*field)->unpack(record + (*field)->offset(), ptr); } @@ -740,11 +832,11 @@ void ha_berkeley::unpack_row(char *record, DBT *row) void ha_berkeley::unpack_key(char *record, DBT *key, uint index) { - KEY *key_info=table->key_info+index; + KEY *key_info= table->key_info+index; KEY_PART_INFO *key_part= key_info->key_part, - *end=key_part+key_info->key_parts; + *end= key_part+key_info->key_parts; + char *pos= (char*) key->data; - char *pos=(char*) key->data; for (; key_part != end; key_part++) { if (key_part->null_bit) @@ -768,8 +860,10 @@ void ha_berkeley::unpack_key(char *record, DBT *key, uint index) /* - Create a packed key from from a row - This will never fail as the key buffer is pre allocated. + Create a packed key from a row. This key will be written as such + to the index tree. + + This will never fail as the key buffer is pre-allocated. */ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, @@ -808,14 +902,17 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, key_part->length); key_length-=key_part->length; } - key->size= (buff - (char*) key->data); + key->size= (u_int32_t) (buff - (char*) key->data); DBUG_DUMP("key",(char*) key->data, key->size); DBUG_RETURN(key); } /* - Create a packed key from from a MySQL unpacked key + Create a packed key from from a MySQL unpacked key (like the one that is + sent from the index_read() + + This key is to be used to read a row */ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff, @@ -849,7 +946,7 @@ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff, key_ptr+=key_part->store_length; key_length-=key_part->store_length; } - key->size= (buff - (char*) key->data); + key->size= (u_int32_t) (buff - (char*) key->data); DBUG_DUMP("key",(char*) key->data, key->size); DBUG_RETURN(key); } @@ -861,15 +958,19 @@ int ha_berkeley::write_row(byte * record) int error; DBUG_ENTER("write_row"); - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && record == table->record[0]) - update_auto_increment(); + { + if ((error= update_auto_increment())) + DBUG_RETURN(error); + } if ((error=pack_row(&row, record,1))) DBUG_RETURN(error); /* purecov: inspected */ - if (table->keys + test(hidden_primary_key) == 1) + table->insert_or_update= 1; // For handling of VARCHAR + if (table->s->keys + test(hidden_primary_key) == 1) { error=file->put(file, transaction, create_key(&prim_key, primary_key, key_buff, record), @@ -880,22 +981,15 @@ int ha_berkeley::write_row(byte * record) { DB_TXN *sub_trans = transaction; /* Don't use sub transactions in temporary tables */ - ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) { key_map changed_keys(0); - if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) - { - if ((error=txn_begin(db_env, transaction, &sub_trans, 0))) /* purecov: deadcode */ - break; /* purecov: deadcode */ - DBUG_PRINT("trans",("starting subtransaction")); /* purecov: deadcode */ - } if (!(error=file->put(file, sub_trans, create_key(&prim_key, primary_key, key_buff, record), &row, key_type[primary_key]))) { changed_keys.set_bit(primary_key); - for (uint keynr=0 ; keynr < table->keys ; keynr++) + for (uint keynr=0 ; keynr < table->s->keys ; keynr++) { if (keynr == primary_key) continue; @@ -919,15 +1013,11 @@ int ha_berkeley::write_row(byte * record) if (using_ignore) { int new_error = 0; - if (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS) - { - DBUG_PRINT("trans",("aborting subtransaction")); /* purecov: deadcode */ - new_error=txn_abort(sub_trans); /* purecov: deadcode */ - } - else if (!changed_keys.is_clear_all()) + if (!changed_keys.is_clear_all()) { new_error = 0; - for (uint keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; + for (uint keynr=0; + keynr < table->s->keys+test(hidden_primary_key); keynr++) { if (changed_keys.is_set(keynr)) @@ -945,15 +1035,11 @@ int ha_berkeley::write_row(byte * record) } } } - else if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) - { - DBUG_PRINT("trans",("committing subtransaction")); /* purecov: deadcode */ - error=txn_commit(sub_trans, 0); /* purecov: deadcode */ - } if (error != DB_LOCK_DEADLOCK) break; } } + table->insert_or_update= 0; if (error == DB_KEYEXIST) error=HA_ERR_FOUND_DUPP_KEY; else if (!error) @@ -978,7 +1064,7 @@ int ha_berkeley::key_cmp(uint keynr, const byte * old_row, (new_row[key_part->null_offset] & key_part->null_bit)) return 1; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) { if (key_part->field->cmp_binary((char*) (old_row + key_part->offset), @@ -1005,7 +1091,7 @@ int ha_berkeley::key_cmp(uint keynr, const byte * old_row, int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed, const byte * old_row, DBT *old_key, const byte * new_row, DBT *new_key, - ulong thd_options, bool local_using_ignore) + bool local_using_ignore) { DBT row; int error; @@ -1024,8 +1110,7 @@ int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed, { // Probably a duplicated key; restore old key and row if needed last_dup_key=primary_key; - if (local_using_ignore && - !(thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) + if (local_using_ignore) { int new_error; if ((new_error=pack_row(&row, old_row, 0)) || @@ -1055,8 +1140,7 @@ int ha_berkeley::update_primary_key(DB_TXN *trans, bool primary_key_changed, int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key, const byte *old_row, DBT *old_key, - const byte *new_row, DBT *new_key, - ulong thd_options) + const byte *new_row, DBT *new_key) { int error; DBT tmp_key; @@ -1066,7 +1150,7 @@ int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys, /* Restore the old primary key, and the old row, but don't ignore duplicate key failure */ if ((error=update_primary_key(trans, TRUE, new_row, new_key, - old_row, old_key, thd_options, FALSE))) + old_row, old_key, FALSE))) goto err; /* purecov: inspected */ /* Remove the new key, and put back the old key @@ -1076,7 +1160,7 @@ int ha_berkeley::restore_keys(DB_TXN *trans, key_map *changed_keys, that one just put back the old value. */ if (!changed_keys->is_clear_all()) { - for (keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; keynr++) + for (keynr=0 ; keynr < table->s->keys+test(hidden_primary_key) ; keynr++) { if (changed_keys->is_set(keynr)) { @@ -1103,15 +1187,15 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) DBT prim_key, key, old_prim_key; int error; DB_TXN *sub_trans; - ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; bool primary_key_changed; DBUG_ENTER("update_row"); LINT_INIT(error); - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); + table->insert_or_update= 1; // For handling of VARCHAR if (hidden_primary_key) { primary_key_changed=0; @@ -1134,20 +1218,14 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) { key_map changed_keys(0); - if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) - { - if ((error=txn_begin(db_env, transaction, &sub_trans, 0))) /* purecov: deadcode */ - break; /* purecov: deadcode */ - DBUG_PRINT("trans",("starting subtransaction")); /* purecov: deadcode */ - } /* Start by updating the primary key */ if (!(error=update_primary_key(sub_trans, primary_key_changed, old_row, &old_prim_key, new_row, &prim_key, - thd_options, using_ignore))) + using_ignore))) { // Update all other keys - for (uint keynr=0 ; keynr < table->keys ; keynr++) + for (uint keynr=0 ; keynr < table->s->keys ; keynr++) { if (keynr == primary_key) continue; @@ -1155,15 +1233,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) { if ((error=remove_key(sub_trans, keynr, old_row, &old_prim_key))) { - if (using_ignore && /* purecov: inspected */ - (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) - { - int new_error; - DBUG_PRINT("trans",("aborting subtransaction")); - new_error=txn_abort(sub_trans); - if (new_error) - error = new_error; - } + table->insert_or_update= 0; DBUG_RETURN(error); // Fatal error /* purecov: inspected */ } changed_keys.set_bit(keynr); @@ -1185,30 +1255,21 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) if (using_ignore) { int new_error = 0; - if (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS) - { - DBUG_PRINT("trans",("aborting subtransaction")); /* purecov: deadcode */ - new_error=txn_abort(sub_trans); /* purecov: deadcode */ - } - else if (!changed_keys.is_clear_all()) + if (!changed_keys.is_clear_all()) new_error=restore_keys(transaction, &changed_keys, primary_key, - old_row, &old_prim_key, new_row, &prim_key, - thd_options); + old_row, &old_prim_key, new_row, &prim_key); if (new_error) { - error=new_error; // This shouldn't happen /* purecov: inspected */ - break; /* purecov: inspected */ + /* This shouldn't happen */ + error=new_error; /* purecov: inspected */ + break; /* purecov: inspected */ } } } - else if (using_ignore && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) - { - DBUG_PRINT("trans",("committing subtransaction")); /* purecov: deadcode */ - error=txn_commit(sub_trans, 0); /* purecov: deadcode */ - } if (error != DB_LOCK_DEADLOCK) break; } + table->insert_or_update= 0; if (error == DB_KEYEXIST) error=HA_ERR_FOUND_DUPP_KEY; DBUG_RETURN(error); @@ -1275,7 +1336,9 @@ int ha_berkeley::remove_keys(DB_TXN *trans, const byte *record, DBT *new_record, DBT *prim_key, key_map *keys) { int result = 0; - for (uint keynr=0 ; keynr < table->keys+test(hidden_primary_key) ; keynr++) + for (uint keynr=0; + keynr < table->s->keys+test(hidden_primary_key); + keynr++) { if (keys->is_set(keynr)) { @@ -1295,10 +1358,9 @@ int ha_berkeley::delete_row(const byte * record) { int error; DBT row, prim_key; - key_map keys=table->keys_in_use; - ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; + key_map keys= table->s->keys_in_use; DBUG_ENTER("delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); if ((error=pack_row(&row, record, 0))) DBUG_RETURN((error)); /* purecov: inspected */ @@ -1311,34 +1373,11 @@ int ha_berkeley::delete_row(const byte * record) DB_TXN *sub_trans = transaction; for (uint retry=0 ; retry < berkeley_trans_retry ; retry++) { - if (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS) - { - if ((error=txn_begin(db_env, transaction, &sub_trans, 0))) /* purecov: deadcode */ - break; /* purecov: deadcode */ - DBUG_PRINT("trans",("starting sub transaction")); /* purecov: deadcode */ - } error=remove_keys(sub_trans, record, &row, &prim_key, &keys); - if (!error && (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS)) - { - DBUG_PRINT("trans",("ending sub transaction")); /* purecov: deadcode */ - error=txn_commit(sub_trans, 0); /* purecov: deadcode */ - } if (error) { /* purecov: inspected */ DBUG_PRINT("error",("Got error %d",error)); - if (thd_options & OPTION_INTERNAL_SUBTRANSACTIONS) - { - /* retry */ - int new_error; - DBUG_PRINT("trans",("aborting subtransaction")); - if ((new_error=txn_abort(sub_trans))) - { - error=new_error; // This shouldn't happen - break; - } - } - else - break; // No retry - return error + break; // No retry - return error } if (error != DB_LOCK_DEADLOCK) break; @@ -1355,7 +1394,7 @@ int ha_berkeley::index_init(uint keynr) { int error; DBUG_ENTER("ha_berkeley::index_init"); - DBUG_PRINT("enter",("table: '%s' key: %d", table->real_name, keynr)); + DBUG_PRINT("enter",("table: '%s' key: %d", table->s->table_name, keynr)); /* Under some very rare conditions (like full joins) we may already have @@ -1382,7 +1421,7 @@ int ha_berkeley::index_end() DBUG_ENTER("ha_berkely::index_end"); if (cursor) { - DBUG_PRINT("enter",("table: '%s'", table->real_name)); + DBUG_PRINT("enter",("table: '%s'", table->s->table_name)); error=cursor->c_close(cursor); cursor=0; } @@ -1445,7 +1484,7 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row, int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; DBUG_ENTER("index_read_idx"); current_row.flags=DB_DBT_REALLOC; active_index=MAX_KEY; @@ -1466,7 +1505,7 @@ int ha_berkeley::index_read(byte * buf, const byte * key, int do_prev= 0; DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; bzero((char*) &row,sizeof(row)); if (find_flag == HA_READ_BEFORE_KEY) { @@ -1478,7 +1517,8 @@ int ha_berkeley::index_read(byte * buf, const byte * key, find_flag= HA_READ_AFTER_KEY; do_prev= 1; } - if (key_len == key_info->key_length) + if (key_len == key_info->key_length && + !(table->key_info[active_index].flags & HA_END_SPACE_KEY)) { if (find_flag == HA_READ_AFTER_KEY) key_info->handler.bdb_return_if_eq= 1; @@ -1535,7 +1575,8 @@ int ha_berkeley::index_read_last(byte * buf, const byte * key, uint key_len) KEY *key_info= &table->key_info[active_index]; DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); /* read of partial key */ @@ -1559,7 +1600,8 @@ int ha_berkeley::index_next(byte * buf) { DBT row; DBUG_ENTER("index_next"); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), (char*) buf, active_index, &row, &last_key, 1)); @@ -1570,9 +1612,11 @@ int ha_berkeley::index_next_same(byte * buf, const byte *key, uint keylen) DBT row; int error; DBUG_ENTER("index_next_same"); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); - if (keylen == table->key_info[active_index].key_length) + if (keylen == table->key_info[active_index].key_length && + !(table->key_info[active_index].flags & HA_END_SPACE_KEY)) error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT_DUP), (char*) buf, active_index, &row, &last_key, 1); else @@ -1590,7 +1634,8 @@ int ha_berkeley::index_prev(byte * buf) { DBT row; DBUG_ENTER("index_prev"); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV), (char*) buf, active_index, &row, &last_key, 1)); @@ -1601,7 +1646,8 @@ int ha_berkeley::index_first(byte * buf) { DBT row; DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_FIRST), (char*) buf, active_index, &row, &last_key, 1)); @@ -1611,7 +1657,8 @@ int ha_berkeley::index_last(byte * buf) { DBT row; DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_LAST), (char*) buf, active_index, &row, &last_key, 0)); @@ -1633,7 +1680,8 @@ int ha_berkeley::rnd_next(byte *buf) { DBT row; DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), (char*) buf, primary_key, &row, &last_key, 1)); @@ -1657,6 +1705,7 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos) pos+=key_part->field->packed_col_length((char*) pos,key_part->length); to->size= (uint) (pos- (byte*) to->data); } + DBUG_DUMP("key", (char*) to->data, to->size); return to; } @@ -1664,9 +1713,10 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos) int ha_berkeley::rnd_pos(byte * buf, byte *pos) { DBT db_pos; - statistic_increment(ha_read_rnd_count,&LOCK_status); + DBUG_ENTER("ha_berkeley::rnd_pos"); - + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); active_index= MAX_KEY; DBUG_RETURN(read_row(file->get(file, transaction, get_pos(&db_pos, pos), @@ -1726,14 +1776,14 @@ int ha_berkeley::info(uint flag) if ((flag & HA_STATUS_CONST) || version != share->version) { version=share->version; - for (uint i=0 ; i < table->keys ; i++) + for (uint i=0 ; i < table->s->keys ; i++) { table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= share->rec_per_key[i]; } } /* Don't return key if we got an error for the internal primary key */ - if (flag & HA_STATUS_ERRKEY && last_dup_key < table->keys) + if (flag & HA_STATUS_ERRKEY && last_dup_key < table->s->keys) errkey= last_dup_key; DBUG_RETURN(0); } @@ -1795,61 +1845,65 @@ int ha_berkeley::reset(void) int ha_berkeley::external_lock(THD *thd, int lock_type) { int error=0; + berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; DBUG_ENTER("ha_berkeley::external_lock"); + if (!trx) + { + thd->ha_data[berkeley_hton.slot]= trx= (berkeley_trx_data *) + my_malloc(sizeof(*trx), MYF(MY_ZEROFILL)); + if (!trx) + DBUG_RETURN(1); + } if (lock_type != F_UNLCK) { - if (!thd->transaction.bdb_lock_count++) + if (!trx->bdb_lock_count++) { - DBUG_ASSERT(thd->transaction.stmt.bdb_tid == 0); + DBUG_ASSERT(trx->stmt == 0); transaction=0; // Safety /* First table lock, start transaction */ if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | - OPTION_TABLE_LOCK)) && - !thd->transaction.all.bdb_tid) + OPTION_TABLE_LOCK)) && !trx->all) { /* We have to start a master transaction */ - DBUG_PRINT("trans",("starting transaction all")); - if ((error=txn_begin(db_env, 0, - (DB_TXN**) &thd->transaction.all.bdb_tid, - 0))) + DBUG_PRINT("trans",("starting transaction all: options: 0x%lx", + (ulong) thd->options)); + if ((error=txn_begin(db_env, 0, &trx->all, 0))) { - thd->transaction.bdb_lock_count--; // We didn't get the lock /* purecov: inspected */ - DBUG_RETURN(error); /* purecov: inspected */ + trx->bdb_lock_count--; // We didn't get the lock + DBUG_RETURN(error); } + trans_register_ha(thd, TRUE, &berkeley_hton); if (thd->in_lock_tables) DBUG_RETURN(0); // Don't create stmt trans } DBUG_PRINT("trans",("starting transaction stmt")); - if ((error=txn_begin(db_env, - (DB_TXN*) thd->transaction.all.bdb_tid, - (DB_TXN**) &thd->transaction.stmt.bdb_tid, - 0))) + if ((error=txn_begin(db_env, trx->all, &trx->stmt, 0))) { /* We leave the possible master transaction open */ - thd->transaction.bdb_lock_count--; // We didn't get the lock /* purecov: inspected */ - DBUG_RETURN(error); /* purecov: inspected */ + trx->bdb_lock_count--; // We didn't get the lock + DBUG_RETURN(error); } + trans_register_ha(thd, FALSE, &berkeley_hton); } - transaction= (DB_TXN*) thd->transaction.stmt.bdb_tid; + transaction= trx->stmt; } else { lock.type=TL_UNLOCK; // Unlocked thread_safe_add(share->rows, changed_rows, &share->mutex); changed_rows=0; - if (!--thd->transaction.bdb_lock_count) + if (!--trx->bdb_lock_count) { - if (thd->transaction.stmt.bdb_tid) + if (trx->stmt) { /* - F_UNLOCK is done without a transaction commit / rollback. + F_UNLCK is done without a transaction commit / rollback. This happens if the thread didn't update any rows We must in this case commit the work to keep the row locks */ DBUG_PRINT("trans",("commiting non-updating transaction")); - error=txn_commit((DB_TXN*) thd->transaction.stmt.bdb_tid,0); - thd->transaction.stmt.bdb_tid=0; - transaction=0; + error= txn_commit(trx->stmt,0); + trx->stmt= transaction= 0; } } } @@ -1863,18 +1917,24 @@ int ha_berkeley::external_lock(THD *thd, int lock_type) Under LOCK TABLES, each used tables will force a call to start_stmt. */ -int ha_berkeley::start_stmt(THD *thd) +int ha_berkeley::start_stmt(THD *thd, thr_lock_type lock_type) { int error=0; DBUG_ENTER("ha_berkeley::start_stmt"); - if (!thd->transaction.stmt.bdb_tid) + berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; + DBUG_ASSERT(trx); + /* + note that trx->stmt may have been already initialized as start_stmt() + is called for *each table* not for each storage engine, + and there could be many bdb tables referenced in the query + */ + if (!trx->stmt) { DBUG_PRINT("trans",("starting transaction stmt")); - error=txn_begin(db_env, (DB_TXN*) thd->transaction.all.bdb_tid, - (DB_TXN**) &thd->transaction.stmt.bdb_tid, - 0); + error=txn_begin(db_env, trx->all, &trx->stmt, 0); + trans_register_ha(thd, FALSE, &berkeley_hton); } - transaction= (DB_TXN*) thd->transaction.stmt.bdb_tid; + transaction= trx->stmt; DBUG_RETURN(error); } @@ -1918,9 +1978,7 @@ THR_LOCK_DATA **ha_berkeley::store_lock(THD *thd, THR_LOCK_DATA **to, lock_type <= TL_WRITE) && !thd->in_lock_tables) lock_type = TL_WRITE_ALLOW_WRITE; - lock.type=lock_type; - lock_on_read= ((table->reginfo.lock_type > TL_WRITE_ALLOW_READ) ? DB_RMW : - 0); + lock.type= lock_type; } *to++= &lock; return to; @@ -1974,9 +2032,9 @@ int ha_berkeley::create(const char *name, register TABLE *form, if ((error= create_sub_table(name_buff,"main",DB_BTREE,0))) DBUG_RETURN(error); /* purecov: inspected */ - primary_key=table->primary_key; + primary_key= table->s->primary_key; /* Create the keys */ - for (uint i=0; i < form->keys; i++) + for (uint i=0; i < form->s->keys; i++) { if (i != primary_key) { @@ -1998,7 +2056,7 @@ int ha_berkeley::create(const char *name, register TABLE *form, "status", DB_BTREE, DB_CREATE, 0)))) { char rec_buff[4+MAX_KEY*4]; - uint length= 4+ table->keys*4; + uint length= 4+ table->s->keys*4; bzero(rec_buff, length); error= write_status(status_block, rec_buff, length); status_block->close(status_block,0); @@ -2062,19 +2120,35 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, DB_KEY_RANGE start_range, end_range; DB *kfile=key_file[keynr]; double start_pos,end_pos,rows; - DBUG_ENTER("records_in_range"); - - if ((start_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, - start_key->key, - start_key->length), - &start_range,0)) || - (end_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, - end_key->key, - end_key->length), - &end_range,0))) - DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); // Better than returning an error /* purecov: inspected */ + bool error; + KEY *key_info= &table->key_info[keynr]; + DBUG_ENTER("ha_berkeley::records_in_range"); + + /* Ensure we get maximum range, even for varchar keys with different space */ + key_info->handler.bdb_return_if_eq= -1; + error= ((start_key && kfile->key_range(kfile,transaction, + pack_key(&key, keynr, key_buff, + start_key->key, + start_key->length), + &start_range,0))); + if (error) + { + key_info->handler.bdb_return_if_eq= 0; + // Better than returning an error + DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */ + } + key_info->handler.bdb_return_if_eq= 1; + error= (end_key && kfile->key_range(kfile,transaction, + pack_key(&key, keynr, key_buff, + end_key->key, + end_key->length), + &end_range,0)); + key_info->handler.bdb_return_if_eq= 0; + if (error) + { + // Better than returning an error + DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */ + } if (!start_key) start_pos= 0.0; @@ -2091,20 +2165,20 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, end_pos=end_range.less+end_range.equal; rows=(end_pos-start_pos)*records; DBUG_PRINT("exit",("rows: %g",rows)); - DBUG_RETURN(rows <= 1.0 ? (ha_rows) 1 : (ha_rows) rows); + DBUG_RETURN((ha_rows)(rows <= 1.0 ? 1 : rows)); } -longlong ha_berkeley::get_auto_increment() +ulonglong ha_berkeley::get_auto_increment() { - longlong nr=1; // Default if error or new key + ulonglong nr=1; // Default if error or new key int error; (void) ha_berkeley::extra(HA_EXTRA_KEYREAD); /* Set 'active_index' */ - ha_berkeley::index_init(table->next_number_index); + ha_berkeley::index_init(table->s->next_number_index); - if (!table->next_number_key_offset) + if (!table->s->next_number_key_offset) { // Autoincrement at key-start error=ha_berkeley::index_last(table->record[1]); } @@ -2117,7 +2191,7 @@ longlong ha_berkeley::get_auto_increment() /* Reading next available number for a sub key */ ha_berkeley::create_key(&last_key, active_index, key_buff, table->record[0], - table->next_number_key_offset); + table->s->next_number_key_offset); /* Store for compare */ memcpy(old_key.data=key_buff2, key_buff, (old_key.size=last_key.size)); old_key.app_private=(void*) key_info; @@ -2146,8 +2220,8 @@ longlong ha_berkeley::get_auto_increment() } } if (!error) - nr=(longlong) - table->next_number_field->val_int_offset(table->rec_buff_length)+1; + nr= (ulonglong) + table->next_number_field->val_int_offset(table->s->rec_buff_length)+1; ha_berkeley::index_end(); (void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD); return nr; @@ -2185,7 +2259,7 @@ static void print_msg(THD *thd, const char *table_name, const char *op_name, protocol->store(msg_type); protocol->store(msgbuf); if (protocol->write()) - thd->killed=1; + thd->killed=THD::KILL_CONNECTION; } #endif @@ -2194,6 +2268,8 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) uint i; DB_BTREE_STAT *stat=0; DB_TXN_STAT *txn_stat_ptr= 0; + berkeley_trx_data *trx=(berkeley_trx_data *)thd->ha_data[berkeley_hton.slot]; + DBUG_ASSERT(trx); /* Original bdb documentation says: @@ -2208,13 +2284,10 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) txn_stat_ptr && txn_stat_ptr->st_nactive>=2) { DB_TXN_ACTIVE *atxn_stmt= 0, *atxn_all= 0; - - DB_TXN *txn_all= (DB_TXN*) thd->transaction.all.bdb_tid; - u_int32_t all_id= txn_all->id(txn_all); - - DB_TXN *txn_stmt= (DB_TXN*) thd->transaction.stmt.bdb_tid; - u_int32_t stmt_id= txn_stmt->id(txn_stmt); - + + u_int32_t all_id= trx->all->id(trx->all); + u_int32_t stmt_id= trx->stmt->id(trx->stmt); + DB_TXN_ACTIVE *cur= txn_stat_ptr->st_txnarray; DB_TXN_ACTIVE *end= cur + txn_stat_ptr->st_nactive; for (; cur!=end && (!atxn_stmt || !atxn_all); cur++) @@ -2222,7 +2295,7 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) if (cur->txnid==all_id) atxn_all= cur; if (cur->txnid==stmt_id) atxn_stmt= cur; } - + if (atxn_stmt && atxn_all && log_compare(&atxn_stmt->lsn,&atxn_all->lsn)) { @@ -2232,7 +2305,7 @@ int ha_berkeley::analyze(THD* thd, HA_CHECK_OPT* check_opt) free(txn_stat_ptr); } - for (i=0 ; i < table->keys ; i++) + for (i=0 ; i < table->s->keys ; i++) { if (stat) { @@ -2367,14 +2440,15 @@ static BDB_SHARE *get_share(const char *table_name, TABLE *table) char *tmp_name; DB **key_file; u_int32_t *key_type; + uint keys= table->s->keys; if ((share=(BDB_SHARE *) my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), &share, sizeof(*share), - &rec_per_key, table->keys * sizeof(ha_rows), + &rec_per_key, keys * sizeof(ha_rows), &tmp_name, length+1, - &key_file, (table->keys+1) * sizeof(*key_file), - &key_type, (table->keys+1) * sizeof(u_int32_t), + &key_file, (keys+1) * sizeof(*key_file), + &key_type, (keys+1) * sizeof(u_int32_t), NullS))) { share->rec_per_key = rec_per_key; @@ -2401,7 +2475,7 @@ static int free_share(BDB_SHARE *share, TABLE *table, uint hidden_primary_key, bool mutex_is_locked) { int error, result = 0; - uint keys=table->keys + test(hidden_primary_key); + uint keys= table->s->keys + test(hidden_primary_key); pthread_mutex_lock(&bdb_mutex); if (mutex_is_locked) pthread_mutex_unlock(&share->mutex); /* purecov: inspected */ @@ -2465,8 +2539,8 @@ void ha_berkeley::get_status() } if (!(share->status & STATUS_ROW_COUNT_INIT) && share->status_block) { - share->org_rows=share->rows= - table->max_rows ? table->max_rows : HA_BERKELEY_MAX_ROWS; + share->org_rows= share->rows= + table->s->max_rows ? table->s->max_rows : HA_BERKELEY_MAX_ROWS; if (!share->status_block->cursor(share->status_block, 0, &cursor, 0)) { DBT row; @@ -2481,9 +2555,10 @@ void ha_berkeley::get_status() uint i; uchar *pos=(uchar*) row.data; share->org_rows=share->rows=uint4korr(pos); pos+=4; - for (i=0 ; i < table->keys ; i++) + for (i=0 ; i < table->s->keys ; i++) { - share->rec_per_key[i]=uint4korr(pos); pos+=4; + share->rec_per_key[i]=uint4korr(pos); + pos+=4; } } cursor->c_close(cursor); @@ -2541,7 +2616,7 @@ static void update_status(BDB_SHARE *share, TABLE *table) { char rec_buff[4+MAX_KEY*4], *pos=rec_buff; int4store(pos,share->rows); pos+=4; - for (uint i=0 ; i < table->keys ; i++) + for (uint i=0 ; i < table->s->keys ; i++) { int4store(pos,share->rec_per_key[i]); pos+=4; } @@ -2557,6 +2632,7 @@ end: DBUG_VOID_RETURN; } + /* Return an estimated of the number of rows in the table. Used when sorting to allocate buffers and by the optimizer. @@ -2567,4 +2643,29 @@ ha_rows ha_berkeley::estimate_rows_upper_bound() return share->rows + HA_BERKELEY_EXTRA_ROWS; } +int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2) +{ + if (hidden_primary_key) + return memcmp(ref1, ref2, BDB_HIDDEN_PRIMARY_KEY_LENGTH); + + int result; + Field *field; + KEY *key_info=table->key_info+table->s->primary_key; + KEY_PART_INFO *key_part=key_info->key_part; + KEY_PART_INFO *end=key_part+key_info->key_parts; + + for (; key_part != end; key_part++) + { + field= key_part->field; + result= field->pack_cmp((const char*)ref1, (const char*)ref2, + key_part->length, 0); + if (result) + return result; + ref1+= field->packed_col_length((const char*)ref1, key_part->length); + ref2+= field->packed_col_length((const char*)ref2, key_part->length); + } + + return 0; +} + #endif /* HAVE_BERKELEY_DB */ diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index 50d5d537663..336c90f009a 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -57,7 +56,6 @@ class ha_berkeley: public handler ulong alloced_rec_buff_length; ulong changed_rows; uint primary_key,last_dup_key, hidden_primary_key, version; - u_int32_t lock_on_read; bool key_read, using_ignore; bool fix_rec_buff_for_blob(ulong length); byte current_ident[BDB_HIDDEN_PRIMARY_KEY_LENGTH]; @@ -75,23 +73,17 @@ class ha_berkeley: public handler DBT *prim_key, key_map *keys); int restore_keys(DB_TXN *trans, key_map *changed_keys, uint primary_key, const byte *old_row, DBT *old_key, - const byte *new_row, DBT *new_key, - ulong thd_options); + const byte *new_row, DBT *new_key); int key_cmp(uint keynr, const byte * old_row, const byte * new_row); int update_primary_key(DB_TXN *trans, bool primary_key_changed, const byte * old_row, DBT *old_key, const byte * new_row, DBT *prim_key, - ulong thd_options, bool local_using_ignore); + bool local_using_ignore); int read_row(int error, char *buf, uint keynr, DBT *row, DBT *key, bool); DBT *get_pos(DBT *to, byte *pos); public: - ha_berkeley(TABLE *table): handler(table), alloc_ptr(0),rec_buff(0), file(0), - int_table_flags(HA_REC_NOT_IN_SEQ | HA_FAST_KEY_READ | - HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_NOT_EXACT_COUNT | - HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | - HA_AUTO_PART_KEY | HA_TABLE_SCAN_ON_INDEX), - changed_rows(0),last_dup_key((uint) -1),version(0),using_ignore(0) {} + ha_berkeley(TABLE *table_arg); ~ha_berkeley() {} const char *table_type() const { return "BerkeleyDB"; } ulong index_flags(uint idx, uint part, bool all_parts) const; @@ -101,6 +93,9 @@ class ha_berkeley: public handler uint max_supported_keys() const { return MAX_KEY-1; } uint extra_rec_buf_length() { return BDB_HIDDEN_PRIMARY_KEY_LENGTH; } ha_rows estimate_rows_upper_bound(); + uint max_supported_key_length() const { return UINT_MAX32; } + uint max_supported_key_part_length() const { return UINT_MAX32; } + const key_map *keys_to_use_for_scanning() { return &key_map_full; } bool has_transactions() { return 1;} @@ -131,7 +126,7 @@ class ha_berkeley: public handler int extra(enum ha_extra_function operation); int reset(void); int external_lock(THD *thd, int lock_type); - int start_stmt(THD *thd); + int start_stmt(THD *thd, thr_lock_type lock_type); void position(byte *record); int analyze(THD* thd,HA_CHECK_OPT* check_opt); int optimize(THD* thd, HA_CHECK_OPT* check_opt); @@ -146,16 +141,12 @@ class ha_berkeley: public handler enum thr_lock_type lock_type); void get_status(); - inline void get_auto_primary_key(byte *to) - { - pthread_mutex_lock(&share->mutex); - share->auto_ident++; - int5store(to,share->auto_ident); - pthread_mutex_unlock(&share->mutex); - } - longlong get_auto_increment(); + void get_auto_primary_key(byte *to); + ulonglong get_auto_increment(); void print_error(int error, myf errflag); uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; } + bool primary_key_is_clustered() { return true; } + int cmp_ref(const byte *ref1, const byte *ref2); }; extern bool berkeley_shared_data; @@ -169,6 +160,4 @@ extern TYPELIB berkeley_lock_typelib; bool berkeley_init(void); bool berkeley_end(void); bool berkeley_flush_logs(void); -int berkeley_commit(THD *thd, void *trans); -int berkeley_rollback(THD *thd, void *trans); int berkeley_show_logs(Protocol *protocol); diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc index 8c76b860e50..3f4285ec595 100644 --- a/sql/ha_blackhole.cc +++ b/sql/ha_blackhole.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -24,10 +23,48 @@ #include "ha_blackhole.h" +/* Blackhole storage engine handlerton */ + +handlerton blackhole_hton= { + "BLACKHOLE", + SHOW_OPTION_YES, + "/dev/null storage engine (anything you write to it disappears)", + DB_TYPE_BLACKHOLE_DB, + NULL, + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* release savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_CAN_RECREATE +}; + +/***************************************************************************** +** BLACKHOLE tables +*****************************************************************************/ + +ha_blackhole::ha_blackhole(TABLE *table_arg) + :handler(&blackhole_hton, table_arg) +{} + + +static const char *ha_blackhole_exts[] = { + NullS +}; + const char **ha_blackhole::bas_ext() const -{ - static const char *ext[]= { NullS }; - return ext; +{ + return ha_blackhole_exts; } int ha_blackhole::open(const char *name, int mode, uint test_if_locked) @@ -143,7 +180,7 @@ int ha_blackhole::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ENTER("ha_blackhole::index_read"); - DBUG_RETURN(0); + DBUG_RETURN(HA_ERR_END_OF_FILE); } diff --git a/sql/ha_blackhole.h b/sql/ha_blackhole.h index 177b59fa970..0046a57d10a 100644 --- a/sql/ha_blackhole.h +++ b/sql/ha_blackhole.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -28,9 +27,7 @@ class ha_blackhole: public handler THR_LOCK thr_lock; public: - ha_blackhole(TABLE *table): handler(table) - { - } + ha_blackhole(TABLE *table_arg); ~ha_blackhole() { } diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc new file mode 100644 index 00000000000..dd4dd725be4 --- /dev/null +++ b/sql/ha_federated.cc @@ -0,0 +1,2641 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + + MySQL Federated Storage Engine + + ha_federated.cc - MySQL Federated Storage Engine + Patrick Galbraith and Brian Aker, 2004 + + This is a handler which uses a foreign database as the data file, as + opposed to a handler like MyISAM, which uses .MYD files locally. + + How this handler works + ---------------------------------- + Normal database files are local and as such: You create a table called + 'users', a file such as 'users.MYD' is created. A handler reads, inserts, + deletes, updates data in this file. The data is stored in particular format, + so to read, that data has to be parsed into fields, to write, fields have to + be stored in this format to write to this data file. + + With MySQL Federated storage engine, there will be no local files + for each table's data (such as .MYD). A foreign database will store + the data that would normally be in this file. This will necessitate + the use of MySQL client API to read, delete, update, insert this + data. The data will have to be retrieve via an SQL call "SELECT * + FROM users". Then, to read this data, it will have to be retrieved + via mysql_fetch_row one row at a time, then converted from the + column in this select into the format that the handler expects. + + The create table will simply create the .frm file, and within the + "CREATE TABLE" SQL, there SHALL be any of the following : + + comment=scheme://username:password@hostname:port/database/tablename + comment=scheme://username@hostname/database/tablename + comment=scheme://username:password@hostname/database/tablename + comment=scheme://username:password@hostname/database/tablename + + An example would be: + + comment=mysql://username:password@hostname:port/database/tablename + + ***IMPORTANT*** + + This is a first release, conceptual release + Only 'mysql://' is supported at this release. + + + This comment connection string is necessary for the handler to be + able to connect to the foreign server. + + + The basic flow is this: + + SQL calls issues locally -> + mysql handler API (data in handler format) -> + mysql client API (data converted to SQL calls) -> + foreign database -> mysql client API -> + convert result sets (if any) to handler format -> + handler API -> results or rows affected to local + + What this handler does and doesn't support + ------------------------------------------ + * Tables MUST be created on the foreign server prior to any action on those + tables via the handler, first version. IMPORTANT: IF you MUST use the + federated storage engine type on the REMOTE end, MAKE SURE [ :) ] That + the table you connect to IS NOT a table pointing BACK to your ORIGNAL + table! You know and have heard the screaching of audio feedback? You + know putting two mirror in front of each other how the reflection + continues for eternity? Well, need I say more?! + * There will not be support for transactions. + * There is no way for the handler to know if the foreign database or table + has changed. The reason for this is that this database has to work like a + data file that would never be written to by anything other than the + database. The integrity of the data in the local table could be breached + if there was any change to the foreign database. + * Support for SELECT, INSERT, UPDATE , DELETE, indexes. + * No ALTER TABLE, DROP TABLE or any other Data Definition Language calls. + * Prepared statements will not be used in the first implementation, it + remains to to be seen whether the limited subset of the client API for the + server supports this. + * This uses SELECT, INSERT, UPDATE, DELETE and not HANDLER for its + implementation. + * This will not work with the query cache. + + Method calls + + A two column table, with one record: + + (SELECT) + + "SELECT * FROM foo" + ha_federated::info + ha_federated::scan_time: + ha_federated::rnd_init: share->select_query SELECT * FROM foo + ha_federated::extra + + <for every row of data retrieved> + ha_federated::rnd_next + ha_federated::convert_row_to_internal_format + ha_federated::rnd_next + </for every row of data retrieved> + + ha_federated::rnd_end + ha_federated::extra + ha_federated::reset + + (INSERT) + + "INSERT INTO foo (id, ts) VALUES (2, now());" + + ha_federated::write_row + + ha_federated::reset + + (UPDATE) + + "UPDATE foo SET ts = now() WHERE id = 1;" + + ha_federated::index_init + ha_federated::index_read + ha_federated::index_read_idx + ha_federated::rnd_next + ha_federated::convert_row_to_internal_format + ha_federated::update_row + + ha_federated::extra + ha_federated::extra + ha_federated::extra + ha_federated::external_lock + ha_federated::reset + + + How do I use this handler? + -------------------------- + First of all, you need to build this storage engine: + + ./configure --with-federated-storage-engine + make + + Next, to use this handler, it's very simple. You must + have two databases running, either both on the same host, or + on different hosts. + + One the server that will be connecting to the foreign + host (client), you create your table as such: + + CREATE TABLE test_table ( + id int(20) NOT NULL auto_increment, + name varchar(32) NOT NULL default '', + other int(20) NOT NULL default '0', + PRIMARY KEY (id), + KEY name (name), + KEY other_key (other)) + ENGINE="FEDERATED" + DEFAULT CHARSET=latin1 + COMMENT='root@127.0.0.1:9306/federated/test_federated'; + + Notice the "COMMENT" and "ENGINE" field? This is where you + respectively set the engine type, "FEDERATED" and foreign + host information, this being the database your 'client' database + will connect to and use as the "data file". Obviously, the foreign + database is running on port 9306, so you want to start up your other + database so that it is indeed on port 9306, and your federated + database on a port other than that. In my setup, I use port 5554 + for federated, and port 5555 for the foreign database. + + Then, on the foreign database: + + CREATE TABLE test_table ( + id int(20) NOT NULL auto_increment, + name varchar(32) NOT NULL default '', + other int(20) NOT NULL default '0', + PRIMARY KEY (id), + KEY name (name), + KEY other_key (other)) + ENGINE="<NAME>" <-- whatever you want, or not specify + DEFAULT CHARSET=latin1 ; + + This table is exactly the same (and must be exactly the same), + except that it is not using the federated handler and does + not need the URL. + + + How to see the handler in action + -------------------------------- + + When developing this handler, I compiled the federated database with + debugging: + + ./configure --with-federated-storage-engine + --prefix=/home/mysql/mysql-build/federated/ --with-debug + + Once compiled, I did a 'make install' (not for the purpose of installing + the binary, but to install all the files the binary expects to see in the + diretory I specified in the build with --prefix, + "/home/mysql/mysql-build/federated". + + Then, I started the foreign server: + + /usr/local/mysql/bin/mysqld_safe + --user=mysql --log=/tmp/mysqld.5555.log -P 5555 + + Then, I went back to the directory containing the newly compiled mysqld, + <builddir>/sql/, started up gdb: + + gdb ./mysqld + + Then, withn the (gdb) prompt: + (gdb) run --gdb --port=5554 --socket=/tmp/mysqld.5554 --skip-innodb --debug + + Next, I open several windows for each: + + 1. Tail the debug trace: tail -f /tmp/mysqld.trace|grep ha_fed + 2. Tail the SQL calls to the foreign database: tail -f /tmp/mysqld.5555.log + 3. A window with a client open to the federated server on port 5554 + 4. A window with a client open to the federated server on port 5555 + + I would create a table on the client to the foreign server on port + 5555, and then to the federated server on port 5554. At this point, + I would run whatever queries I wanted to on the federated server, + just always remembering that whatever changes I wanted to make on + the table, or if I created new tables, that I would have to do that + on the foreign server. + + Another thing to look for is 'show variables' to show you that you have + support for federated handler support: + + show variables like '%federat%' + + and: + + show storage engines; + + Both should display the federated storage handler. + + + Testing + ------- + + There is a test for MySQL Federated Storage Handler in ./mysql-test/t, + federatedd.test It starts both a slave and master database using + the same setup that the replication tests use, with the exception that + it turns off replication, and sets replication to ignore the test tables. + After ensuring that you actually do have support for the federated storage + handler, numerous queries/inserts/updates/deletes are run, many derived + from the MyISAM tests, plus som other tests which were meant to reveal + any issues that would be most likely to affect this handler. All tests + should work! ;) + + To run these tests, go into ./mysql-test (based in the directory you + built the server in) + + ./mysql-test-run federatedd + + To run the test, or if you want to run the test and have debug info: + + ./mysql-test-run --debug federated + + This will run the test in debug mode, and you can view the trace and + log files in the ./mysql-test/var/log directory + + ls -l mysql-test/var/log/ + -rw-r--r-- 1 patg patg 17 4 Dec 12:27 current_test + -rw-r--r-- 1 patg patg 692 4 Dec 12:52 manager.log + -rw-rw---- 1 patg patg 21246 4 Dec 12:51 master-bin.000001 + -rw-rw---- 1 patg patg 68 4 Dec 12:28 master-bin.index + -rw-r--r-- 1 patg patg 1620 4 Dec 12:51 master.err + -rw-rw---- 1 patg patg 23179 4 Dec 12:51 master.log + -rw-rw---- 1 patg patg 16696550 4 Dec 12:51 master.trace + -rw-r--r-- 1 patg patg 0 4 Dec 12:28 mysqltest-time + -rw-r--r-- 1 patg patg 2024051 4 Dec 12:51 mysqltest.trace + -rw-rw---- 1 patg patg 94992 4 Dec 12:51 slave-bin.000001 + -rw-rw---- 1 patg patg 67 4 Dec 12:28 slave-bin.index + -rw-rw---- 1 patg patg 249 4 Dec 12:52 slave-relay-bin.000003 + -rw-rw---- 1 patg patg 73 4 Dec 12:28 slave-relay-bin.index + -rw-r--r-- 1 patg patg 1349 4 Dec 12:51 slave.err + -rw-rw---- 1 patg patg 96206 4 Dec 12:52 slave.log + -rw-rw---- 1 patg patg 15706355 4 Dec 12:51 slave.trace + -rw-r--r-- 1 patg patg 0 4 Dec 12:51 warnings + + Of course, again, you can tail the trace log: + + tail -f mysql-test/var/log/master.trace |grep ha_fed + + As well as the slave query log: + + tail -f mysql-test/var/log/slave.log + + Files that comprise the test suit + --------------------------------- + mysql-test/t/federated.test + mysql-test/r/federated.result + mysql-test/r/have_federated_db.require + mysql-test/include/have_federated_db.inc + + + Other tidbits + ------------- + + These were the files that were modified or created for this + Federated handler to work: + + ./configure.in + ./sql/Makefile.am + ./config/ac_macros/ha_federated.m4 + ./sql/handler.cc + ./sql/mysqld.cc + ./sql/set_var.cc + ./sql/field.h + ./sql/sql_string.h + ./mysql-test/mysql-test-run(.sh) + ./mysql-test/t/federated.test + ./mysql-test/r/federated.result + ./mysql-test/r/have_federated_db.require + ./mysql-test/include/have_federated_db.inc + ./sql/ha_federated.cc + ./sql/ha_federated.h + +*/ + + +#include "mysql_priv.h" +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation // gcc: Class implementation +#endif + +#ifdef HAVE_FEDERATED_DB +#include "ha_federated.h" + +#include "m_string.h" +/* Variables for federated share methods */ +static HASH federated_open_tables; // Hash used to track open + // tables +pthread_mutex_t federated_mutex; // This is the mutex we use to + // init the hash +static int federated_init= FALSE; // Variable for checking the + // init state of hash + +/* Federated storage engine handlerton */ + +handlerton federated_hton= { + "FEDERATED", + SHOW_OPTION_YES, + "Federated MySQL storage engine", + DB_TYPE_FEDERATED_DB, + federated_db_init, + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* release savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_ALTER_NOT_SUPPORTED +}; + + +/* Function we use in the creation of our hash to get key. */ + +static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, + my_bool not_used __attribute__ ((unused))) +{ + *length= share->connect_string_length; + return (byte*) share->scheme; +} + +/* + Initialize the federated handler. + + SYNOPSIS + federated_db_init() + void + + RETURN + FALSE OK + TRUE Error +*/ + +bool federated_db_init() +{ + DBUG_ENTER("federated_db_init"); + if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST)) + goto error; + if (hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0, + (hash_get_key) federated_get_key, 0, 0)) + { + VOID(pthread_mutex_destroy(&federated_mutex)); + } + else + { + federated_init= TRUE; + DBUG_RETURN(FALSE); + } +error: + have_federated_db= SHOW_OPTION_DISABLED; // If we couldn't use handler + DBUG_RETURN(TRUE); +} + + +/* + Release the federated handler. + + SYNOPSIS + federated_db_end() + void + + RETURN + FALSE OK +*/ + +bool federated_db_end() +{ + if (federated_init) + { + hash_free(&federated_open_tables); + VOID(pthread_mutex_destroy(&federated_mutex)); + } + federated_init= 0; + return FALSE; +} + +/* + Check (in create) whether the tables exists, and that it can be connected to + + SYNOPSIS + check_foreign_data_source() + share pointer to FEDERATED share + table_create_flag tells us that ::create is the caller, + therefore, return CANT_CREATE_FEDERATED_TABLE + + DESCRIPTION + This method first checks that the connection information that parse url + has populated into the share will be sufficient to connect to the foreign + table, and if so, does the foreign table exist. +*/ + +static int check_foreign_data_source(FEDERATED_SHARE *share, + bool table_create_flag) +{ + char escaped_table_name[NAME_LEN*2]; + char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + uint error_code; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + MYSQL *mysql; + DBUG_ENTER("ha_federated::check_foreign_data_source"); + + /* Zero the length, otherwise the string will have misc chars */ + query.length(0); + + /* error out if we can't alloc memory for mysql_init(NULL) (per Georg) */ + if (!(mysql= mysql_init(NULL))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + /* check if we can connect */ + if (!mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + share->socket, 0)) + { + /* + we want the correct error message, but it to return + ER_CANT_CREATE_FEDERATED_TABLE if called by ::create + */ + error_code= (table_create_flag ? + ER_CANT_CREATE_FEDERATED_TABLE : + ER_CONNECT_TO_FOREIGN_DATA_SOURCE); + + my_sprintf(error_buffer, + (error_buffer, + "database: '%s' username: '%s' hostname: '%s'", + share->database, share->username, share->hostname)); + + my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), error_buffer); + goto error; + } + else + { + int escaped_table_name_length= 0; + /* + Since we do not support transactions at this version, we can let the + client API silently reconnect. For future versions, we will need more + logic to deal with transactions + */ + mysql->reconnect= 1; + /* + Note: I am not using INORMATION_SCHEMA because this needs to work with + versions prior to 5.0 + + if we can connect, then make sure the table exists + + the query will be: SELECT * FROM `tablename` WHERE 1=0 + */ + query.append(FEDERATED_SELECT); + query.append(FEDERATED_STAR); + query.append(FEDERATED_FROM); + query.append(FEDERATED_BTICK); + escaped_table_name_length= + escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name, + sizeof(escaped_table_name), + share->table_name, + share->table_name_length); + query.append(escaped_table_name, escaped_table_name_length); + query.append(FEDERATED_BTICK); + query.append(FEDERATED_WHERE); + query.append(FEDERATED_FALSE); + + if (mysql_real_query(mysql, query.ptr(), query.length())) + { + error_code= table_create_flag ? + ER_CANT_CREATE_FEDERATED_TABLE : ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST; + my_sprintf(error_buffer, (error_buffer, "error: %d '%s'", + mysql_errno(mysql), mysql_error(mysql))); + + my_error(error_code, MYF(0), error_buffer); + goto error; + } + } + error_code=0; + +error: + mysql_close(mysql); + DBUG_RETURN(error_code); +} + + +static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num) +{ + char buf[FEDERATED_QUERY_BUFFER_SIZE]; + int buf_len; + DBUG_ENTER("ha_federated parse_url_error"); + + if (share->scheme) + { + DBUG_PRINT("info", + ("error: parse_url. Returning error code %d freeing share->scheme 0x%lx", + error_num, (long) share->scheme)); + my_free((gptr) share->scheme, MYF(0)); + share->scheme= 0; + } + buf_len= min(table->s->connect_string.length, + FEDERATED_QUERY_BUFFER_SIZE-1); + strmake(buf, table->s->connect_string.str, buf_len); + my_error(error_num, MYF(0), buf); + DBUG_RETURN(error_num); +} + +/* + Parse connection info from table->s->connect_string + + SYNOPSIS + parse_url() + share pointer to FEDERATED share + table pointer to current TABLE class + table_create_flag determines what error to throw + + DESCRIPTION + populates the share with information about the connection + to the foreign database that will serve as the data source. + This string must be specified (currently) in the "comment" field, + listed in the CREATE TABLE statement. + + This string MUST be in the format of any of these: + + scheme://username:password@hostname:port/database/table + scheme://username@hostname/database/table + scheme://username@hostname:port/database/table + scheme://username:password@hostname/database/table + + An Example: + + mysql://joe:joespass@192.168.1.111:9308/federated/testtable + + ***IMPORTANT*** + Currently, only "mysql://" is supported. + + 'password' and 'port' are both optional. + + RETURN VALUE + 0 success + error_num particular error code + +*/ + +static int parse_url(FEDERATED_SHARE *share, TABLE *table, + uint table_create_flag) +{ + uint error_num= (table_create_flag ? + ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE : + ER_FOREIGN_DATA_STRING_INVALID); + DBUG_ENTER("ha_federated::parse_url"); + + share->port= 0; + share->socket= 0; + DBUG_PRINT("info", ("Length %d \n", table->s->connect_string.length)); + DBUG_PRINT("info", ("String %.*s \n", table->s->connect_string.length, + table->s->connect_string.str)); + share->scheme= my_strdup_with_length(table->s->connect_string.str, + table->s->connect_string.length, + MYF(0)); + + share->connect_string_length= table->s->connect_string.length; + DBUG_PRINT("info",("parse_url alloced share->scheme 0x%lx", (long) share->scheme)); + + /* + remove addition of null terminator and store length + for each string in share + */ + if (!(share->username= strstr(share->scheme, "://"))) + goto error; + share->scheme[share->username - share->scheme]= '\0'; + + if (strcmp(share->scheme, "mysql") != 0) + goto error; + + share->username+= 3; + + if (!(share->hostname= strchr(share->username, '@'))) + goto error; + + share->username[share->hostname - share->username]= '\0'; + share->hostname++; + + if ((share->password= strchr(share->username, ':'))) + { + share->username[share->password - share->username]= '\0'; + share->password++; + share->username= share->username; + /* make sure there isn't an extra / or @ */ + if ((strchr(share->password, '/') || strchr(share->hostname, '@'))) + goto error; + /* + Found that if the string is: + user:@hostname:port/database/table + Then password is a null string, so set to NULL + */ + if ((share->password[0] == '\0')) + share->password= NULL; + } + else + share->username= share->username; + + /* make sure there isn't an extra / or @ */ + if ((strchr(share->username, '/')) || (strchr(share->hostname, '@'))) + goto error; + + if (!(share->database= strchr(share->hostname, '/'))) + goto error; + share->hostname[share->database - share->hostname]= '\0'; + share->database++; + + if ((share->sport= strchr(share->hostname, ':'))) + { + share->hostname[share->sport - share->hostname]= '\0'; + share->sport++; + if (share->sport[0] == '\0') + share->sport= NULL; + else + share->port= atoi(share->sport); + } + + if (!(share->table_name= strchr(share->database, '/'))) + goto error; + share->database[share->table_name - share->database]= '\0'; + share->table_name++; + + share->table_name_length= strlen(share->table_name); + + /* make sure there's not an extra / */ + if ((strchr(share->table_name, '/'))) + goto error; + + if (share->hostname[0] == '\0') + share->hostname= NULL; + + if (!share->port) + { + if (strcmp(share->hostname, my_localhost) == 0) + share->socket= my_strdup(MYSQL_UNIX_ADDR, MYF(0)); + else + share->port= MYSQL_PORT; + } + + DBUG_PRINT("info", + ("scheme %s username %s password %s \ + hostname %s port %d database %s tablename %s\n", + share->scheme, share->username, share->password, + share->hostname, share->port, share->database, + share->table_name)); + + DBUG_RETURN(0); + +error: + DBUG_RETURN(parse_url_error(share, table, error_num)); +} + + +/***************************************************************************** +** FEDERATED tables +*****************************************************************************/ + +ha_federated::ha_federated(TABLE *table_arg) + :handler(&federated_hton, table_arg), + mysql(0), stored_result(0) +{} + + +/* + Convert MySQL result set row to handler internal format + + SYNOPSIS + convert_row_to_internal_format() + record Byte pointer to record + row MySQL result set row from fetchrow() + result Result set to use + + DESCRIPTION + This method simply iterates through a row returned via fetchrow with + values from a successful SELECT , and then stores each column's value + in the field object via the field object pointer (pointing to the table's + array of field object pointers). This is how the handler needs the data + to be stored to then return results back to the user + + RETURN VALUE + 0 After fields have had field values stored from record + */ + +uint ha_federated::convert_row_to_internal_format(byte *record, + MYSQL_ROW row, + MYSQL_RES *result) +{ + ulong *lengths; + Field **field; + DBUG_ENTER("ha_federated::convert_row_to_internal_format"); + + lengths= mysql_fetch_lengths(result); + + for (field= table->field; *field; field++) + { + /* + index variable to move us through the row at the + same iterative step as the field + */ + int x= field - table->field; + my_ptrdiff_t old_ptr; + old_ptr= (my_ptrdiff_t) (record - table->record[0]); + (*field)->move_field(old_ptr); + if (!row[x]) + (*field)->set_null(); + else + { + (*field)->set_notnull(); + (*field)->store(row[x], lengths[x], &my_charset_bin); + } + (*field)->move_field(-old_ptr); + } + + DBUG_RETURN(0); +} + +static bool emit_key_part_name(String *to, KEY_PART_INFO *part) +{ + DBUG_ENTER("emit_key_part_name"); + if (to->append(FEDERATED_BTICK) || + to->append(part->field->field_name) || + to->append(FEDERATED_BTICK)) + DBUG_RETURN(1); // Out of memory + DBUG_RETURN(0); +} + +static bool emit_key_part_element(String *to, KEY_PART_INFO *part, + bool needs_quotes, bool is_like, + const byte *ptr, uint len) +{ + Field *field= part->field; + DBUG_ENTER("emit_key_part_element"); + + if (needs_quotes && to->append(FEDERATED_SQUOTE)) + DBUG_RETURN(1); + + if (part->type == HA_KEYTYPE_BIT) + { + char buff[STRING_BUFFER_USUAL_SIZE], *buf= buff; + + *buf++= '0'; + *buf++= 'x'; + buf= octet2hex(buf, (char*) ptr, len); + if (to->append((char*) buff, (uint)(buf - buff))) + DBUG_RETURN(1); + } + else if (part->key_part_flag & HA_BLOB_PART) + { + String blob; + uint blob_length= uint2korr(ptr); + blob.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH, + blob_length, &my_charset_bin); + if (append_escaped(to, &blob)) + DBUG_RETURN(1); + } + else if (part->key_part_flag & HA_VAR_LENGTH_PART) + { + String varchar; + uint var_length= uint2korr(ptr); + varchar.set_quick((char*) ptr+HA_KEY_BLOB_LENGTH, + var_length, &my_charset_bin); + if (append_escaped(to, &varchar)) + DBUG_RETURN(1); + } + else + { + char strbuff[MAX_FIELD_WIDTH]; + String str(strbuff, sizeof(strbuff), part->field->charset()), *res; + + res= field->val_str(&str, (char *)ptr); + + if (field->result_type() == STRING_RESULT) + { + if (append_escaped(to, res)) + DBUG_RETURN(1); + } + else if (to->append(res->ptr(), res->length())) + DBUG_RETURN(1); + } + + if (is_like && to->append(FEDERATED_PERCENT)) + DBUG_RETURN(1); + + if (needs_quotes && to->append(FEDERATED_SQUOTE)) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +/* + Create a WHERE clause based off of values in keys + Note: This code was inspired by key_copy from key.cc + + SYNOPSIS + create_where_from_key () + to String object to store WHERE clause + key_info KEY struct pointer + key byte pointer containing key + key_length length of key + range_type 0 - no range, 1 - min range, 2 - max range + (see enum range_operation) + + DESCRIPTION + Using iteration through all the keys via a KEY_PART_INFO pointer, + This method 'extracts' the value of each key in the byte pointer + *key, and for each key found, constructs an appropriate WHERE clause + + RETURN VALUE + 0 After all keys have been accounted for to create the WHERE clause + 1 No keys found + + Range flags Table per Timour: + + ----------------- + - start_key: + * ">" -> HA_READ_AFTER_KEY + * ">=" -> HA_READ_KEY_OR_NEXT + * "=" -> HA_READ_KEY_EXACT + + - end_key: + * "<" -> HA_READ_BEFORE_KEY + * "<=" -> HA_READ_AFTER_KEY + + records_in_range: + ----------------- + - start_key: + * ">" -> HA_READ_AFTER_KEY + * ">=" -> HA_READ_KEY_EXACT + * "=" -> HA_READ_KEY_EXACT + + - end_key: + * "<" -> HA_READ_BEFORE_KEY + * "<=" -> HA_READ_AFTER_KEY + * "=" -> HA_READ_AFTER_KEY + +0 HA_READ_KEY_EXACT, Find first record else error +1 HA_READ_KEY_OR_NEXT, Record or next record +2 HA_READ_KEY_OR_PREV, Record or previous +3 HA_READ_AFTER_KEY, Find next rec. after key-record +4 HA_READ_BEFORE_KEY, Find next rec. before key-record +5 HA_READ_PREFIX, Key which as same prefix +6 HA_READ_PREFIX_LAST, Last key with the same prefix +7 HA_READ_PREFIX_LAST_OR_PREV, Last or prev key with the same prefix + +Flags that I've found: + +id, primary key, varchar + +id = 'ccccc' +records_in_range: start_key 0 end_key 3 +read_range_first: start_key 0 end_key NULL + +id > 'ccccc' +records_in_range: start_key 3 end_key NULL +read_range_first: start_key 3 end_key NULL + +id < 'ccccc' +records_in_range: start_key NULL end_key 4 +read_range_first: start_key NULL end_key 4 + +id <= 'ccccc' +records_in_range: start_key NULL end_key 3 +read_range_first: start_key NULL end_key 3 + +id >= 'ccccc' +records_in_range: start_key 0 end_key NULL +read_range_first: start_key 1 end_key NULL + +id like 'cc%cc' +records_in_range: start_key 0 end_key 3 +read_range_first: start_key 1 end_key 3 + +id > 'aaaaa' and id < 'ccccc' +records_in_range: start_key 3 end_key 4 +read_range_first: start_key 3 end_key 4 + +id >= 'aaaaa' and id < 'ccccc'; +records_in_range: start_key 0 end_key 4 +read_range_first: start_key 1 end_key 4 + +id >= 'aaaaa' and id <= 'ccccc'; +records_in_range: start_key 0 end_key 3 +read_range_first: start_key 1 end_key 3 + +id > 'aaaaa' and id <= 'ccccc'; +records_in_range: start_key 3 end_key 3 +read_range_first: start_key 3 end_key 3 + +numeric keys: + +id = 4 +index_read_idx: start_key 0 end_key NULL + +id > 4 +records_in_range: start_key 3 end_key NULL +read_range_first: start_key 3 end_key NULL + +id >= 4 +records_in_range: start_key 0 end_key NULL +read_range_first: start_key 1 end_key NULL + +id < 4 +records_in_range: start_key NULL end_key 4 +read_range_first: start_key NULL end_key 4 + +id <= 4 +records_in_range: start_key NULL end_key 3 +read_range_first: start_key NULL end_key 3 + +id like 4 +full table scan, select * from + +id > 2 and id < 8 +records_in_range: start_key 3 end_key 4 +read_range_first: start_key 3 end_key 4 + +id >= 2 and id < 8 +records_in_range: start_key 0 end_key 4 +read_range_first: start_key 1 end_key 4 + +id >= 2 and id <= 8 +records_in_range: start_key 0 end_key 3 +read_range_first: start_key 1 end_key 3 + +id > 2 and id <= 8 +records_in_range: start_key 3 end_key 3 +read_range_first: start_key 3 end_key 3 + +multi keys (id int, name varchar, other varchar) + +id = 1; +records_in_range: start_key 0 end_key 3 +read_range_first: start_key 0 end_key NULL + +id > 4; +id > 2 and name = '333'; remote: id > 2 +id > 2 and name > '333'; remote: id > 2 +id > 2 and name > '333' and other < 'ddd'; remote: id > 2 no results +id > 2 and name >= '333' and other < 'ddd'; remote: id > 2 1 result +id >= 4 and name = 'eric was here' and other > 'eeee'; +records_in_range: start_key 3 end_key NULL +read_range_first: start_key 3 end_key NULL + +id >= 4; +id >= 2 and name = '333' and other < 'ddd'; +remote: `id` >= 2 AND `name` >= '333'; +records_in_range: start_key 0 end_key NULL +read_range_first: start_key 1 end_key NULL + +id < 4; +id < 3 and name = '222' and other <= 'ccc'; remote: id < 3 +records_in_range: start_key NULL end_key 4 +read_range_first: start_key NULL end_key 4 + +id <= 4; +records_in_range: start_key NULL end_key 3 +read_range_first: start_key NULL end_key 3 + +id like 4; +full table scan + +id > 2 and id < 4; +records_in_range: start_key 3 end_key 4 +read_range_first: start_key 3 end_key 4 + +id >= 2 and id < 4; +records_in_range: start_key 0 end_key 4 +read_range_first: start_key 1 end_key 4 + +id >= 2 and id <= 4; +records_in_range: start_key 0 end_key 3 +read_range_first: start_key 1 end_key 3 + +id > 2 and id <= 4; +id = 6 and name = 'eric was here' and other > 'eeee'; +remote: (`id` > 6 AND `name` > 'eric was here' AND `other` > 'eeee') +AND (`id` <= 6) AND ( AND `name` <= 'eric was here') +no results +records_in_range: start_key 3 end_key 3 +read_range_first: start_key 3 end_key 3 + +Summary: + +* If the start key flag is 0 the max key flag shouldn't even be set, + and if it is, the query produced would be invalid. +* Multipart keys, even if containing some or all numeric columns, + are treated the same as non-numeric keys + + If the query is " = " (quotes or not): + - records in range start key flag HA_READ_KEY_EXACT, + end key flag HA_READ_AFTER_KEY (incorrect) + - any other: start key flag HA_READ_KEY_OR_NEXT, + end key flag HA_READ_AFTER_KEY (correct) + +* 'like' queries (of key) + - Numeric, full table scan + - Non-numeric + records_in_range: start_key 0 end_key 3 + other : start_key 1 end_key 3 + +* If the key flag is HA_READ_AFTER_KEY: + if start_key, append > + if end_key, append <= + +* If create_where_key was called by records_in_range: + + - if the key is numeric: + start key flag is 0 when end key is NULL, end key flag is 3 or 4 + - if create_where_key was called by any other function: + start key flag is 1 when end key is NULL, end key flag is 3 or 4 + - if the key is non-numeric, or multipart + When the query is an exact match, the start key flag is 0, + end key flag is 3 for what should be a no-range condition where + you should have 0 and max key NULL, which it is if called by + read_range_first + +Conclusion: + +1. Need logic to determin if a key is min or max when the flag is +HA_READ_AFTER_KEY, and handle appending correct operator accordingly + +2. Need a boolean flag to pass to create_where_from_key, used in the +switch statement. Add 1 to the flag if: + - start key flag is HA_READ_KEY_EXACT and the end key is NULL + +*/ + +bool ha_federated::create_where_from_key(String *to, + KEY *key_info, + const key_range *start_key, + const key_range *end_key, + bool from_records_in_range) +{ + bool both_not_null= + (start_key != NULL && end_key != NULL) ? TRUE : FALSE; + const byte *ptr; + uint remainder, length; + char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE]; + String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); + const key_range *ranges[2]= { start_key, end_key }; + DBUG_ENTER("ha_federated::create_where_from_key"); + + tmp.length(0); + if (start_key == NULL && end_key == NULL) + DBUG_RETURN(1); + + for (int i= 0; i <= 1; i++) + { + bool needs_quotes; + KEY_PART_INFO *key_part; + if (ranges[i] == NULL) + continue; + + if (both_not_null) + { + if (i > 0) + tmp.append(FEDERATED_CONJUNCTION); + else + tmp.append(FEDERATED_OPENPAREN); + } + + for (key_part= key_info->key_part, + remainder= key_info->key_parts, + length= ranges[i]->length, + ptr= ranges[i]->key; ; + remainder--, + key_part++) + { + Field *field= key_part->field; + uint store_length= key_part->store_length; + uint part_length= min(store_length, length); + needs_quotes= 1; + DBUG_DUMP("key, start of loop", (char *) ptr, length); + + if (key_part->null_bit) + { + if (*ptr++) + { + if (emit_key_part_name(&tmp, key_part) || + tmp.append(FEDERATED_ISNULL)) + DBUG_RETURN(1); + continue; + } + } + + if (tmp.append(FEDERATED_OPENPAREN)) + DBUG_RETURN(1); + + switch(ranges[i]->flag) { + case(HA_READ_KEY_EXACT): + DBUG_PRINT("info", ("federated HA_READ_KEY_EXACT %d", i)); + if (store_length >= length || + !needs_quotes || + key_part->type == HA_KEYTYPE_BIT || + field->result_type() != STRING_RESULT) + { + if (emit_key_part_name(&tmp, key_part)) + DBUG_RETURN(1); + + if (from_records_in_range) + { + if (tmp.append(FEDERATED_GE)) + DBUG_RETURN(1); + } + else + { + if (tmp.append(FEDERATED_EQ)) + DBUG_RETURN(1); + } + + if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr, + part_length)) + DBUG_RETURN(1); + } + else + /* LIKE */ + { + if (emit_key_part_name(&tmp, key_part) || + tmp.append(FEDERATED_LIKE) || + emit_key_part_element(&tmp, key_part, needs_quotes, 1, ptr, + part_length)) + DBUG_RETURN(1); + } + break; + case(HA_READ_AFTER_KEY): + DBUG_PRINT("info", ("federated HA_READ_AFTER_KEY %d", i)); + if (store_length >= length) /* end key */ + { + if (emit_key_part_name(&tmp, key_part)) + DBUG_RETURN(1); + + if (i > 0) /* end key */ + { + if (tmp.append(FEDERATED_LE)) + DBUG_RETURN(1); + } + else /* start key */ + { + if (tmp.append(FEDERATED_GT)) + DBUG_RETURN(1); + } + + if (emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr, + part_length)) + { + DBUG_RETURN(1); + } + break; + } + case(HA_READ_KEY_OR_NEXT): + DBUG_PRINT("info", ("federated HA_READ_KEY_OR_NEXT %d", i)); + if (emit_key_part_name(&tmp, key_part) || + tmp.append(FEDERATED_GE) || + emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr, + part_length)) + DBUG_RETURN(1); + break; + case(HA_READ_BEFORE_KEY): + DBUG_PRINT("info", ("federated HA_READ_BEFORE_KEY %d", i)); + if (store_length >= length) + { + if (emit_key_part_name(&tmp, key_part) || + tmp.append(FEDERATED_LT) || + emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr, + part_length)) + DBUG_RETURN(1); + break; + } + case(HA_READ_KEY_OR_PREV): + DBUG_PRINT("info", ("federated HA_READ_KEY_OR_PREV %d", i)); + if (emit_key_part_name(&tmp, key_part) || + tmp.append(FEDERATED_LE) || + emit_key_part_element(&tmp, key_part, needs_quotes, 0, ptr, + part_length)) + DBUG_RETURN(1); + break; + default: + DBUG_PRINT("info",("cannot handle flag %d", ranges[i]->flag)); + DBUG_RETURN(1); + } + if (tmp.append(FEDERATED_CLOSEPAREN)) + DBUG_RETURN(1); + + if (store_length >= length) + break; + DBUG_PRINT("info", ("remainder %d", remainder)); + DBUG_ASSERT(remainder > 1); + length-= store_length; + ptr+= store_length; + if (tmp.append(FEDERATED_AND)) + DBUG_RETURN(1); + + DBUG_PRINT("info", + ("create_where_from_key WHERE clause: %s", + tmp.c_ptr_quick())); + } + } + if (both_not_null) + if (tmp.append(FEDERATED_CLOSEPAREN)) + DBUG_RETURN(1); + + if (to->append(FEDERATED_WHERE)) + DBUG_RETURN(1); + + if (to->append(tmp)) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +/* + Example of simple lock controls. The "share" it creates is structure we will + pass to each federated handler. Do you have to have one of these? Well, you + have pieces that are used for locking, and they are needed to function. +*/ + +static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) +{ + char *select_query; + char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + Field **field; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + FEDERATED_SHARE *share= NULL, tmp_share; + /* + In order to use this string, we must first zero it's length, + or it will contain garbage + */ + query.length(0); + + pthread_mutex_lock(&federated_mutex); + + if (parse_url(&tmp_share, table, 0)) + goto error; + + /* TODO: change tmp_share.scheme to LEX_STRING object */ + if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables, + (byte*) tmp_share.scheme, + tmp_share. + connect_string_length))) + { + query.set_charset(system_charset_info); + query.append(FEDERATED_SELECT); + for (field= table->field; *field; field++) + { + query.append(FEDERATED_BTICK); + query.append((*field)->field_name); + query.append(FEDERATED_BTICK); + query.append(FEDERATED_COMMA); + } + query.length(query.length()- strlen(FEDERATED_COMMA)); + query.append(FEDERATED_FROM); + query.append(FEDERATED_BTICK); + + if (!(share= (FEDERATED_SHARE *) + my_multi_malloc(MYF(MY_WME), + &share, sizeof(*share), + &select_query, + query.length()+table->s->connect_string.length+1, + NullS))) + goto error; + + memcpy(share, &tmp_share, sizeof(tmp_share)); + + share->table_name_length= strlen(share->table_name); + /* TODO: share->table_name to LEX_STRING object */ + query.append(share->table_name, share->table_name_length); + query.append(FEDERATED_BTICK); + share->select_query= select_query; + strmov(share->select_query, query.ptr()); + share->use_count= 0; + DBUG_PRINT("info", + ("share->select_query %s", share->select_query)); + + if (my_hash_insert(&federated_open_tables, (byte*) share)) + goto error; + thr_lock_init(&share->lock); + pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST); + } + share->use_count++; + pthread_mutex_unlock(&federated_mutex); + + return share; + +error: + pthread_mutex_unlock(&federated_mutex); + my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR)); + return NULL; +} + + +/* + Free lock controls. We call this whenever we close a table. + If the table had the last reference to the share then we + free memory associated with it. +*/ + +static int free_share(FEDERATED_SHARE *share) +{ + DBUG_ENTER("free_share"); + + pthread_mutex_lock(&federated_mutex); + if (!--share->use_count) + { + hash_delete(&federated_open_tables, (byte*) share); + my_free((gptr) share->scheme, MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); + thr_lock_delete(&share->lock); + VOID(pthread_mutex_destroy(&share->mutex)); + my_free((gptr) share, MYF(0)); + } + pthread_mutex_unlock(&federated_mutex); + + DBUG_RETURN(0); +} + + +ha_rows ha_federated::records_in_range(uint inx, key_range *start_key, + key_range *end_key) +{ + /* + + We really want indexes to be used as often as possible, therefore + we just need to hard-code the return value to a very low number to + force the issue + +*/ + DBUG_ENTER("ha_federated::records_in_range"); + DBUG_RETURN(FEDERATED_RECORDS_IN_RANGE); +} +/* + If frm_error() is called then we will use this to to find out + what file extentions exist for the storage engine. This is + also used by the default rename_table and delete_table method + in handler.cc. +*/ + +const char **ha_federated::bas_ext() const +{ + static const char *ext[]= + { + NullS + }; + return ext; +} + + +/* + Used for opening tables. The name will be the name of the file. + A table is opened when it needs to be opened. For instance + when a request comes in for a select on the table (tables are not + open and closed for each request, they are cached). + + Called from handler.cc by handler::ha_open(). The server opens + all tables by calling ha_open() which then calls the handler + specific open(). +*/ + +int ha_federated::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_federated::open"); + + if (!(share= get_share(name, table))) + DBUG_RETURN(1); + thr_lock_data_init(&share->lock, &lock, NULL); + + /* Connect to foreign database mysql_real_connect() */ + mysql= mysql_init(0); + + /* + BUG# 17044 Federated Storage Engine is not UTF8 clean + Add set names to whatever charset the table is at open + of table + */ + /* this sets the csname like 'set names utf8' */ + mysql_options(mysql,MYSQL_SET_CHARSET_NAME, + this->table->s->table_charset->csname); + + if (!mysql || !mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + share->socket, 0)) + { + free_share(share); + DBUG_RETURN(stash_remote_error()); + } + /* + Since we do not support transactions at this version, we can let the client + API silently reconnect. For future versions, we will need more logic to + deal with transactions + */ + + mysql->reconnect= 1; + + ref_length= (table->s->primary_key != MAX_KEY ? + table->key_info[table->s->primary_key].key_length : + table->s->reclength); + DBUG_PRINT("info", ("ref_length: %u", ref_length)); + + DBUG_RETURN(0); +} + + +/* + Closes a table. We call the free_share() function to free any resources + that we have allocated in the "shared" structure. + + Called from sql_base.cc, sql_select.cc, and table.cc. + In sql_select.cc it is only used to close up temporary tables or during + the process where a temporary table is converted over to being a + myisam table. + For sql_base.cc look at close_data_tables(). +*/ + +int ha_federated::close(void) +{ + int retval; + DBUG_ENTER("ha_federated::close"); + + /* free the result set */ + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } + /* Disconnect from mysql */ + if (mysql) // QQ is this really needed + mysql_close(mysql); + retval= free_share(share); + DBUG_RETURN(retval); + +} + +/* + + Checks if a field in a record is SQL NULL. + + SYNOPSIS + field_in_record_is_null() + table TABLE pointer, MySQL table object + field Field pointer, MySQL field object + record char pointer, contains record + + DESCRIPTION + This method uses the record format information in table to track + the null bit in record. + + RETURN VALUE + 1 if NULL + 0 otherwise +*/ + +inline uint field_in_record_is_null(TABLE *table, + Field *field, + char *record) +{ + int null_offset; + DBUG_ENTER("ha_federated::field_in_record_is_null"); + + if (!field->null_ptr) + DBUG_RETURN(0); + + null_offset= (uint) ((char*)field->null_ptr - (char*)table->record[0]); + + if (record[null_offset] & field->null_bit) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +/* + write_row() inserts a row. No extra() hint is given currently if a bulk load + is happeneding. buf() is a byte array of data. You can use the field + information to extract the data from the native byte array type. + Example of this would be: + for (Field **field=table->field ; *field ; field++) + { + ... + } + + Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, + sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. +*/ + +int ha_federated::write_row(byte *buf) +{ + char insert_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + char values_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + char insert_field_value_buffer[STRING_BUFFER_USUAL_SIZE]; + Field **field; + + /* The main insert query string */ + String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin); + /* The string containing the values to be added to the insert */ + String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin); + /* The actual value of the field, to be added to the values_string */ + String insert_field_value_string(insert_field_value_buffer, + sizeof(insert_field_value_buffer), + &my_charset_bin); + values_string.length(0); + insert_string.length(0); + insert_field_value_string.length(0); + DBUG_ENTER("ha_federated::write_row"); + + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + + /* + start both our field and field values strings + */ + insert_string.append(FEDERATED_INSERT); + insert_string.append(FEDERATED_BTICK); + insert_string.append(share->table_name, share->table_name_length); + insert_string.append(FEDERATED_BTICK); + insert_string.append(FEDERATED_OPENPAREN); + + values_string.append(FEDERATED_VALUES); + values_string.append(FEDERATED_OPENPAREN); + + /* + loop through the field pointer array, add any fields to both the values + list and the fields list that match the current query id + */ + for (field= table->field; *field; field++) + { + if ((*field)->is_null()) + insert_field_value_string.append(FEDERATED_NULL); + else + { + (*field)->val_str(&insert_field_value_string); + values_string.append('\''); + insert_field_value_string.print(&values_string); + values_string.append('\''); + + insert_field_value_string.length(0); + } + /* append the field name */ + insert_string.append((*field)->field_name); + + /* append the value */ + values_string.append(insert_field_value_string); + insert_field_value_string.length(0); + + /* append commas between both fields and fieldnames */ + /* + unfortunately, we can't use the logic + if *(fields + 1) to make the following + appends conditional because we may not append + if the next field doesn't match the condition: + (((*field)->query_id && (*field)->query_id == current_query_id) + */ + insert_string.append(FEDERATED_COMMA); + values_string.append(FEDERATED_COMMA); + } + + /* + remove trailing comma + */ + insert_string.length(insert_string.length() - strlen(FEDERATED_COMMA)); + /* + if there were no fields, we don't want to add a closing paren + AND, we don't want to chop off the last char '(' + insert will be "INSERT INTO t1 VALUES ();" + */ + if (table->s->fields) + { + /* chops off leading commas */ + values_string.length(values_string.length() - strlen(FEDERATED_COMMA)); + insert_string.append(FEDERATED_CLOSEPAREN); + } + /* we always want to append this, even if there aren't any fields */ + values_string.append(FEDERATED_CLOSEPAREN); + + /* add the values */ + insert_string.append(values_string); + + if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length())) + { + DBUG_RETURN(stash_remote_error()); + } + /* + If the table we've just written a record to contains an auto_increment + field, then store the last_insert_id() value from the foreign server + */ + if (table->next_number_field) + update_auto_increment(); + + DBUG_RETURN(0); +} + +/* + ha_federated::update_auto_increment + + This method ensures that last_insert_id() works properly. What it simply does + is calls last_insert_id() on the foreign database immediately after insert + (if the table has an auto_increment field) and sets the insert id via + thd->insert_id(ID) (as well as storing thd->prev_insert_id) +*/ +void ha_federated::update_auto_increment(void) +{ + THD *thd= current_thd; + DBUG_ENTER("ha_federated::update_auto_increment"); + + thd->insert_id(mysql->last_used_con->insert_id); + DBUG_PRINT("info",("last_insert_id: %ld", (long) auto_increment_value)); + + DBUG_VOID_RETURN; +} + +int ha_federated::optimize(THD* thd, HA_CHECK_OPT* check_opt) +{ + char query_buffer[STRING_BUFFER_USUAL_SIZE]; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + DBUG_ENTER("ha_federated::optimize"); + + query.length(0); + + query.set_charset(system_charset_info); + query.append(FEDERATED_OPTIMIZE); + query.append(FEDERATED_BTICK); + query.append(share->table_name, share->table_name_length); + query.append(FEDERATED_BTICK); + + if (mysql_real_query(mysql, query.ptr(), query.length())) + { + DBUG_RETURN(stash_remote_error()); + } + + DBUG_RETURN(0); +} + + +int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) +{ + char query_buffer[STRING_BUFFER_USUAL_SIZE]; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + DBUG_ENTER("ha_federated::repair"); + + query.length(0); + + query.set_charset(system_charset_info); + query.append(FEDERATED_REPAIR); + query.append(FEDERATED_BTICK); + query.append(share->table_name, share->table_name_length); + query.append(FEDERATED_BTICK); + if (check_opt->flags & T_QUICK) + query.append(FEDERATED_QUICK); + if (check_opt->flags & T_EXTEND) + query.append(FEDERATED_EXTENDED); + if (check_opt->sql_flags & TT_USEFRM) + query.append(FEDERATED_USE_FRM); + + if (mysql_real_query(mysql, query.ptr(), query.length())) + { + DBUG_RETURN(stash_remote_error()); + } + + DBUG_RETURN(0); +} + + +/* + Yes, update_row() does what you expect, it updates a row. old_data will have + the previous row record in it, while new_data will have the newest data in + it. + + Keep in mind that the server can do updates based on ordering if an ORDER BY + clause was used. Consecutive ordering is not guarenteed. + Currently new_data will not have an updated auto_increament record, or + and updated timestamp field. You can do these for federated by doing these: + if (table->timestamp_on_update_now) + update_timestamp(new_row+table->timestamp_on_update_now-1); + if (table->next_number_field && record == table->record[0]) + update_auto_increment(); + + Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. +*/ + +int ha_federated::update_row(const byte *old_data, byte *new_data) +{ + /* + This used to control how the query was built. If there was a + primary key, the query would be built such that there was a where + clause with only that column as the condition. This is flawed, + because if we have a multi-part primary key, it would only use the + first part! We don't need to do this anyway, because + read_range_first will retrieve the correct record, which is what + is used to build the WHERE clause. We can however use this to + append a LIMIT to the end if there is NOT a primary key. Why do + this? Because we only are updating one record, and LIMIT enforces + this. + */ + bool has_a_primary_key= (table->s->primary_key == 0 ? TRUE : FALSE); + /* + buffers for following strings + */ + char field_value_buffer[STRING_BUFFER_USUAL_SIZE]; + char update_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + char where_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + + /* Work area for field values */ + String field_value(field_value_buffer, sizeof(field_value_buffer), + &my_charset_bin); + /* stores the update query */ + String update_string(update_buffer, + sizeof(update_buffer), + &my_charset_bin); + /* stores the WHERE clause */ + String where_string(where_buffer, + sizeof(where_buffer), + &my_charset_bin); + DBUG_ENTER("ha_federated::update_row"); + /* + set string lengths to 0 to avoid misc chars in string + */ + field_value.length(0); + update_string.length(0); + where_string.length(0); + + update_string.append(FEDERATED_UPDATE); + update_string.append(FEDERATED_BTICK); + update_string.append(share->table_name); + update_string.append(FEDERATED_BTICK); + update_string.append(FEDERATED_SET); + +/* + In this loop, we want to match column names to values being inserted + (while building INSERT statement). + + Iterate through table->field (new data) and share->old_field (old_data) + using the same index to create an SQL UPDATE statement. New data is + used to create SET field=value and old data is used to create WHERE + field=oldvalue + */ + + for (Field **field= table->field; *field; field++) + { + where_string.append((*field)->field_name); + update_string.append((*field)->field_name); + update_string.append(FEDERATED_EQ); + + if ((*field)->is_null()) + update_string.append(FEDERATED_NULL); + else + { + /* otherwise = */ + (*field)->val_str(&field_value); + update_string.append('\''); + field_value.print(&update_string); + update_string.append('\''); + field_value.length(0); + } + + if (field_in_record_is_null(table, *field, (char*) old_data)) + where_string.append(FEDERATED_ISNULL); + else + { + where_string.append(FEDERATED_EQ); + (*field)->val_str(&field_value, + (char*) (old_data + (*field)->offset())); + where_string.append('\''); + field_value.print(&where_string); + where_string.append('\''); + field_value.length(0); + } + + /* + Only append conjunctions if we have another field in which + to iterate + */ + if (*(field + 1)) + { + update_string.append(FEDERATED_COMMA); + where_string.append(FEDERATED_AND); + } + } + update_string.append(FEDERATED_WHERE); + update_string.append(where_string); + /* + If this table has not a primary key, then we could possibly + update multiple rows. We want to make sure to only update one! + */ + if (!has_a_primary_key) + update_string.append(FEDERATED_LIMIT1); + + if (mysql_real_query(mysql, update_string.ptr(), update_string.length())) + { + DBUG_RETURN(stash_remote_error()); + } + DBUG_RETURN(0); +} + +/* + This will delete a row. 'buf' will contain a copy of the row to be =deleted. + The server will call this right after the current row has been called (from + either a previous rnd_next() or index call). + If you keep a pointer to the last row or can access a primary key it will + make doing the deletion quite a bit easier. + Keep in mind that the server does no guarentee consecutive deletions. + ORDER BY clauses can be used. + + Called in sql_acl.cc and sql_udf.cc to manage internal table information. + Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select + it is used for removing duplicates while in insert it is used for REPLACE + calls. +*/ + +int ha_federated::delete_row(const byte *buf) +{ + char delete_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + char data_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + + String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin); + String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin); + DBUG_ENTER("ha_federated::delete_row"); + + delete_string.length(0); + delete_string.append(FEDERATED_DELETE); + delete_string.append(FEDERATED_FROM); + delete_string.append(FEDERATED_BTICK); + delete_string.append(share->table_name); + delete_string.append(FEDERATED_BTICK); + delete_string.append(FEDERATED_WHERE); + + for (Field **field= table->field; *field; field++) + { + Field *cur_field= *field; + data_string.length(0); + delete_string.append(cur_field->field_name); + + if (cur_field->is_null()) + { + delete_string.append(FEDERATED_ISNULL); + } + else + { + delete_string.append(FEDERATED_EQ); + cur_field->val_str(&data_string); + delete_string.append('\''); + data_string.print(&delete_string); + delete_string.append('\''); + } + + delete_string.append(FEDERATED_AND); + } + delete_string.length(delete_string.length()-5); // Remove trailing AND + + delete_string.append(FEDERATED_LIMIT1); + DBUG_PRINT("info", + ("Delete sql: %s", delete_string.c_ptr_quick())); + if (mysql_real_query(mysql, delete_string.ptr(), delete_string.length())) + { + DBUG_RETURN(stash_remote_error()); + } + deleted+= (ha_rows) mysql->affected_rows; + records-= (ha_rows) mysql->affected_rows; + DBUG_PRINT("info", + ("rows deleted %ld rows deleted for all time %ld", + (long) mysql->affected_rows, (long) deleted)); + + DBUG_RETURN(0); +} + + +/* + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. This method, which is called in the case of an SQL statement having + a WHERE clause on a non-primary key index, simply calls index_read_idx. +*/ + +int ha_federated::index_read(byte *buf, const byte *key, + uint key_len, ha_rkey_function find_flag) +{ + DBUG_ENTER("ha_federated::index_read"); + + if (stored_result) + mysql_free_result(stored_result); + DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key, + key_len, find_flag, + &stored_result)); +} + + +/* + Positions an index cursor to the index specified in key. Fetches the + row if any. This is only used to read whole keys. + + This method is called via index_read in the case of a WHERE clause using + a primary key index OR is called DIRECTLY when the WHERE clause + uses a PRIMARY KEY index. + + NOTES + This uses an internal result set that is deleted before function + returns. We need to be able to be calable from ha_rnd_pos() +*/ + +int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, + uint key_len, enum ha_rkey_function find_flag) +{ + int retval; + MYSQL_RES *mysql_result; + DBUG_ENTER("ha_federated::index_read_idx"); + + if ((retval= index_read_idx_with_result_set(buf, index, key, + key_len, find_flag, + &mysql_result))) + DBUG_RETURN(retval); + mysql_free_result(mysql_result); + DBUG_RETURN(retval); +} + + +/* + Create result set for rows matching query and return first row + + RESULT + 0 ok In this case *result will contain the result set + table->status == 0 + # error In this case *result will contain 0 + table->status == STATUS_NOT_FOUND +*/ + +int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result) +{ + int retval; + char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + char index_value[STRING_BUFFER_USUAL_SIZE]; + char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + String index_string(index_value, + sizeof(index_value), + &my_charset_bin); + String sql_query(sql_query_buffer, + sizeof(sql_query_buffer), + &my_charset_bin); + key_range range; + DBUG_ENTER("ha_federated::index_read_idx_with_result_set"); + + *result= 0; // In case of errors + index_string.length(0); + sql_query.length(0); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); + + sql_query.append(share->select_query); + + range.key= key; + range.length= key_len; + range.flag= find_flag; + create_where_from_key(&index_string, + &table->key_info[index], + &range, + NULL, 0); + sql_query.append(index_string); + + if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) + { + my_sprintf(error_buffer, (error_buffer, "error: %d '%s'", + mysql_errno(mysql), mysql_error(mysql))); + retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; + goto error; + } + if (!(*result= mysql_store_result(mysql))) + { + retval= HA_ERR_END_OF_FILE; + goto error; + } + if (!(retval= read_next(buf, *result))) + DBUG_RETURN(retval); + + mysql_free_result(*result); + *result= 0; + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(retval); + +error: + table->status= STATUS_NOT_FOUND; + my_error(retval, MYF(0), error_buffer); + DBUG_RETURN(retval); +} + + +/* Initialized at each key walk (called multiple times unlike rnd_init()) */ + +int ha_federated::index_init(uint keynr) +{ + DBUG_ENTER("ha_federated::index_init"); + DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr)); + active_index= keynr; + DBUG_RETURN(0); +} + + +/* + Read first range +*/ + +int ha_federated::read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range_arg, bool sorted) +{ + char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + int retval; + String sql_query(sql_query_buffer, + sizeof(sql_query_buffer), + &my_charset_bin); + DBUG_ENTER("ha_federated::read_range_first"); + + DBUG_ASSERT(!(start_key == NULL && end_key == NULL)); + + sql_query.length(0); + sql_query.append(share->select_query); + create_where_from_key(&sql_query, + &table->key_info[active_index], + start_key, end_key, 0); + + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } + if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) + { + retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; + goto error; + } + sql_query.length(0); + + if (!(stored_result= mysql_store_result(mysql))) + { + retval= HA_ERR_END_OF_FILE; + goto error; + } + + retval= read_next(table->record[0], stored_result); + DBUG_RETURN(retval); + +error: + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(retval); +} + + +int ha_federated::read_range_next() +{ + int retval; + DBUG_ENTER("ha_federated::read_range_next"); + retval= rnd_next(table->record[0]); + DBUG_RETURN(retval); +} + + +/* Used to read forward through the index. */ +int ha_federated::index_next(byte *buf) +{ + DBUG_ENTER("ha_federated::index_next"); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); + DBUG_RETURN(read_next(buf, stored_result)); +} + + +/* + rnd_init() is called when the system wants the storage engine to do a table + scan. + + This is the method that gets data for the SELECT calls. + + See the federated in the introduction at the top of this file to see when + rnd_init() is called. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. +*/ + +int ha_federated::rnd_init(bool scan) +{ + DBUG_ENTER("ha_federated::rnd_init"); + /* + The use of the 'scan' flag is incredibly important for this handler + to work properly, especially with updates containing WHERE clauses + using indexed columns. + + When the initial query contains a WHERE clause of the query using an + indexed column, it's index_read_idx that selects the exact record from + the foreign database. + + When there is NO index in the query, either due to not having a WHERE + clause, or the WHERE clause is using columns that are not indexed, a + 'full table scan' done by rnd_init, which in this situation simply means + a 'select * from ...' on the foreign table. + + In other words, this 'scan' flag gives us the means to ensure that if + there is an index involved in the query, we want index_read_idx to + retrieve the exact record (scan flag is 0), and do not want rnd_init + to do a 'full table scan' and wipe out that result set. + + Prior to using this flag, the problem was most apparent with updates. + + An initial query like 'UPDATE tablename SET anything = whatever WHERE + indexedcol = someval', index_read_idx would get called, using a query + constructed with a WHERE clause built from the values of index ('indexcol' + in this case, having a value of 'someval'). mysql_store_result would + then get called (this would be the result set we want to use). + + After this rnd_init (from sql_update.cc) would be called, it would then + unecessarily call "select * from table" on the foreign table, then call + mysql_store_result, which would wipe out the correct previous result set + from the previous call of index_read_idx's that had the result set + containing the correct record, hence update the wrong row! + + */ + + if (scan) + { + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } + + if (mysql_real_query(mysql, + share->select_query, + strlen(share->select_query))) + goto error; + + stored_result= mysql_store_result(mysql); + if (!stored_result) + goto error; + } + DBUG_RETURN(0); + +error: + DBUG_RETURN(stash_remote_error()); +} + + +int ha_federated::rnd_end() +{ + DBUG_ENTER("ha_federated::rnd_end"); + DBUG_RETURN(index_end()); +} + + +int ha_federated::index_end(void) +{ + DBUG_ENTER("ha_federated::index_end"); + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } + active_index= MAX_KEY; + DBUG_RETURN(0); +} + +/* + This is called for each row of the table scan. When you run out of records + you should return HA_ERR_END_OF_FILE. Fill buff up with the row information. + The Field structure for the table is the key to getting data into buf + in a manner that will allow the server to understand it. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. +*/ + +int ha_federated::rnd_next(byte *buf) +{ + DBUG_ENTER("ha_federated::rnd_next"); + + if (stored_result == 0) + { + /* + Return value of rnd_init is not always checked (see records.cc), + so we can get here _even_ if there is _no_ pre-fetched result-set! + TODO: fix it. We can delete this in 5.1 when rnd_init() is checked. + */ + DBUG_RETURN(1); + } + DBUG_RETURN(read_next(buf, stored_result)); +} + + +/* + ha_federated::read_next + + reads from a result set and converts to mysql internal + format + + SYNOPSIS + field_in_record_is_null() + buf byte pointer to record + result mysql result set + + DESCRIPTION + This method is a wrapper method that reads one record from a result + set and converts it to the internal table format + + RETURN VALUE + 1 error + 0 no error +*/ + +int ha_federated::read_next(byte *buf, MYSQL_RES *result) +{ + int retval; + MYSQL_ROW row; + DBUG_ENTER("ha_federated::read_next"); + + table->status= STATUS_NOT_FOUND; // For easier return + + /* Fetch a row, insert it back in a row format. */ + if (!(row= mysql_fetch_row(result))) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + if (!(retval= convert_row_to_internal_format(buf, row, result))) + table->status= 0; + + DBUG_RETURN(retval); +} + + +/* + store reference to current row so that we can later find it for + a re-read, update or delete. + + In case of federated, a reference is either a primary key or + the whole record. + + Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. +*/ + +void ha_federated::position(const byte *record) +{ + DBUG_ENTER("ha_federated::position"); + if (table->s->primary_key != MAX_KEY) + key_copy(ref, (byte *)record, table->key_info + table->s->primary_key, + ref_length); + else + memcpy(ref, record, ref_length); + DBUG_VOID_RETURN; +} + + +/* + This is like rnd_next, but you are given a position to use to determine the + row. The position will be of the type that you stored in ref. + + This method is required for an ORDER BY + + Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. +*/ + +int ha_federated::rnd_pos(byte *buf, byte *pos) +{ + int result; + DBUG_ENTER("ha_federated::rnd_pos"); + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); + if (table->s->primary_key != MAX_KEY) + { + /* We have a primary key, so use index_read_idx to find row */ + result= index_read_idx(buf, table->s->primary_key, pos, + ref_length, HA_READ_KEY_EXACT); + } + else + { + /* otherwise, get the old record ref as obtained in ::position */ + memcpy(buf, pos, ref_length); + result= 0; + } + table->status= result ? STATUS_NOT_FOUND : 0; + DBUG_RETURN(result); +} + + +/* + ::info() is used to return information to the optimizer. + Currently this table handler doesn't implement most of the fields + really needed. SHOW also makes use of this data + Another note, you will probably want to have the following in your + code: + if (records < 2) + records = 2; + The reason is that the server will optimize for cases of only a single + record. If in a table scan you don't know the number of records + it will probably be better to set records to two so you can return + as many records as you need. + Along with records a few more variables you may wish to set are: + records + deleted + data_file_length + index_file_length + delete_length + check_time + Take a look at the public variables in handler.h for more information. + + Called in: + filesort.cc + ha_heap.cc + item_sum.cc + opt_sum.cc + sql_delete.cc + sql_delete.cc + sql_derived.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_table.cc + sql_union.cc + sql_update.cc + +*/ + +int ha_federated::info(uint flag) +{ + char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + char status_buf[FEDERATED_QUERY_BUFFER_SIZE]; + char escaped_table_name[FEDERATED_QUERY_BUFFER_SIZE]; + int error; + uint error_code; + MYSQL_RES *result= 0; + MYSQL_ROW row; + String status_query_string(status_buf, sizeof(status_buf), &my_charset_bin); + DBUG_ENTER("ha_federated::info"); + + error_code= ER_QUERY_ON_FOREIGN_DATA_SOURCE; + /* we want not to show table status if not needed to do so */ + if (flag & (HA_STATUS_VARIABLE | HA_STATUS_CONST)) + { + status_query_string.length(0); + status_query_string.append(FEDERATED_INFO); + status_query_string.append(FEDERATED_SQUOTE); + + escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name, + sizeof(escaped_table_name), + share->table_name, + share->table_name_length); + status_query_string.append(escaped_table_name); + status_query_string.append(FEDERATED_SQUOTE); + + if (mysql_real_query(mysql, status_query_string.ptr(), + status_query_string.length())) + goto error; + + status_query_string.length(0); + + result= mysql_store_result(mysql); + if (!result) + goto error; + + if (!mysql_num_rows(result)) + goto error; + + if (!(row= mysql_fetch_row(result))) + goto error; + + if (flag & HA_STATUS_VARIABLE | HA_STATUS_CONST) + { + /* + deleted is set in ha_federated::info + */ + /* + need to figure out what this means as far as federated is concerned, + since we don't have a "file" + + data_file_length = ? + index_file_length = ? + delete_length = ? + */ + if (row[4] != NULL) + records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error); + + mean_rec_length= table->s->reclength; + data_file_length= records * mean_rec_length; + + if (row[12] != NULL) + update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); + if (row[13] != NULL) + check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); + } + + /* + size of IO operations (This is based on a good guess, no high science + involved) + */ + block_size= 4096; + } + + if (result) + mysql_free_result(result); + + DBUG_RETURN(0); + +error: + if (result) + mysql_free_result(result); + + my_sprintf(error_buffer, (error_buffer, ": %d : %s", + mysql_errno(mysql), mysql_error(mysql))); + my_error(error_code, MYF(0), error_buffer); + DBUG_RETURN(error_code); +} + + +/* + Used to delete all rows in a table. Both for cases of truncate and + for cases where the optimizer realizes that all rows will be + removed as a result of a SQL statement. + + Called from item_sum.cc by Item_func_group_concat::clear(), + Item_sum_count_distinct::clear(), and Item_func_group_concat::clear(). + Called from sql_delete.cc by mysql_delete(). + Called from sql_select.cc by JOIN::reinit(). + Called from sql_union.cc by st_select_lex_unit::exec(). +*/ + +int ha_federated::delete_all_rows() +{ + char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + DBUG_ENTER("ha_federated::delete_all_rows"); + + query.length(0); + + query.set_charset(system_charset_info); + query.append(FEDERATED_TRUNCATE); + query.append(FEDERATED_BTICK); + query.append(share->table_name); + query.append(FEDERATED_BTICK); + + /* + TRUNCATE won't return anything in mysql_affected_rows + */ + if (mysql_real_query(mysql, query.ptr(), query.length())) + { + DBUG_RETURN(stash_remote_error()); + } + deleted+= records; + records= 0; + DBUG_RETURN(0); +} + + +/* + The idea with handler::store_lock() is the following: + + The statement decided which locks we should need for the table + for updates/deletes/inserts we get WRITE locks, for SELECT... we get + read locks. + + Before adding the lock into the table lock handler (see thr_lock.c) + mysqld calls store lock with the requested locks. Store lock can now + modify a write lock to a read lock (or some other lock), ignore the + lock (if we don't want to use MySQL table locks at all) or add locks + for many tables (like we do when we are using a MERGE handler). + + Berkeley DB for federated changes all WRITE locks to TL_WRITE_ALLOW_WRITE + (which signals that we are doing WRITES, but we are still allowing other + reader's and writer's. + + When releasing locks, store_lock() are also called. In this case one + usually doesn't have to do anything. + + In some exceptional cases MySQL may send a request for a TL_IGNORE; + This means that we are requesting the same lock as last time and this + should also be ignored. (This may happen when someone does a flush + table when we have opened a part of the tables, in which case mysqld + closes and reopens the tables and tries to get the same locks at last + time). In the future we will probably try to remove this. + + Called from lock.cc by get_lock_data(). +*/ + +THR_LOCK_DATA **ha_federated::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + DBUG_ENTER("ha_federated::store_lock"); + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + { + /* + Here is where we get into the guts of a row level lock. + If TL_UNLOCK is set + If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers + */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd->in_lock_tables) + lock_type= TL_WRITE_ALLOW_WRITE; + + /* + In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. + */ + + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) + lock_type= TL_READ; + + lock.type= lock_type; + } + + *to++= &lock; + + DBUG_RETURN(to); +} + +/* + create() does nothing, since we have no local setup of our own. + FUTURE: We should potentially connect to the foreign database and +*/ + +int ha_federated::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) +{ + int retval; + FEDERATED_SHARE tmp_share; // Only a temporary share, to test the url + DBUG_ENTER("ha_federated::create"); + + if (!(retval= parse_url(&tmp_share, table_arg, 1))) + retval= check_foreign_data_source(&tmp_share, 1); + + my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(retval); + +} + + +int ha_federated::stash_remote_error() +{ + DBUG_ENTER("ha_federated::stash_remote_error()"); + remote_error_number= mysql_errno(mysql); + strmake(remote_error_buf, mysql_error(mysql), sizeof(remote_error_buf)-1); + DBUG_RETURN(HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM); +} + + +bool ha_federated::get_error_message(int error, String* buf) +{ + DBUG_ENTER("ha_federated::get_error_message"); + DBUG_PRINT("enter", ("error: %d", error)); + if (error == HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM) + { + buf->append(STRING_WITH_LEN("Error on remote system: ")); + buf->qs_append(remote_error_number); + buf->append(STRING_WITH_LEN(": ")); + buf->append(remote_error_buf); + + remote_error_number= 0; + remote_error_buf[0]= '\0'; + } + DBUG_PRINT("exit", ("message: %s", buf->ptr())); + DBUG_RETURN(FALSE); +} + +#endif /* HAVE_FEDERATED_DB */ diff --git a/sql/ha_federated.h b/sql/ha_federated.h new file mode 100644 index 00000000000..09c934cb493 --- /dev/null +++ b/sql/ha_federated.h @@ -0,0 +1,312 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Please read ha_exmple.cc before reading this file. + Please keep in mind that the federated storage engine implements all methods + that are required to be implemented. handler.h has a full list of methods + that you can implement. +*/ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +#include <mysql.h> + +/* + handler::print_error has a case statement for error numbers. + This value is (10000) is far out of range and will envoke the + default: case. + (Current error range is 120-159 from include/my_base.h) +*/ +#define HA_FEDERATED_ERROR_WITH_REMOTE_SYSTEM 10000 + +#define FEDERATED_QUERY_BUFFER_SIZE STRING_BUFFER_USUAL_SIZE * 5 +#define FEDERATED_RECORDS_IN_RANGE 2 + +#define FEDERATED_INFO " SHOW TABLE STATUS LIKE " +#define FEDERATED_INFO_LEN sizeof(FEDERATED_INFO) +#define FEDERATED_SELECT "SELECT " +#define FEDERATED_SELECT_LEN sizeof(FEDERATED_SELECT) +#define FEDERATED_WHERE " WHERE " +#define FEDERATED_WHERE_LEN sizeof(FEDERATED_WHERE) +#define FEDERATED_FROM " FROM " +#define FEDERATED_FROM_LEN sizeof(FEDERATED_FROM) +#define FEDERATED_PERCENT "%" +#define FEDERATED_PERCENT_LEN sizeof(FEDERATED_PERCENT) +#define FEDERATED_IS " IS " +#define FEDERATED_IS_LEN sizeof(FEDERATED_IS) +#define FEDERATED_NULL " NULL " +#define FEDERATED_NULL_LEN sizeof(FEDERATED_NULL) +#define FEDERATED_ISNULL " IS NULL " +#define FEDERATED_ISNULL_LEN sizeof(FEDERATED_ISNULL) +#define FEDERATED_LIKE " LIKE " +#define FEDERATED_LIKE_LEN sizeof(FEDERATED_LIKE) +#define FEDERATED_TRUNCATE "TRUNCATE " +#define FEDERATED_TRUNCATE_LEN sizeof(FEDERATED_TRUNCATE) +#define FEDERATED_DELETE "DELETE " +#define FEDERATED_DELETE_LEN sizeof(FEDERATED_DELETE) +#define FEDERATED_INSERT "INSERT INTO " +#define FEDERATED_INSERT_LEN sizeof(FEDERATED_INSERT) +#define FEDERATED_OPTIMIZE "OPTIMIZE TABLE " +#define FEDERATED_OPTIMIZE_LEN sizeof(FEDERATED_OPTIMIZE) +#define FEDERATED_REPAIR "REPAIR TABLE " +#define FEDERATED_REPAIR_LEN sizeof(FEDERATED_REPAIR) +#define FEDERATED_QUICK " QUICK" +#define FEDERATED_QUICK_LEN sizeof(FEDERATED_QUICK) +#define FEDERATED_EXTENDED " EXTENDED" +#define FEDERATED_EXTENDED_LEN sizeof(FEDERATED_EXTENDED) +#define FEDERATED_USE_FRM " USE_FRM" +#define FEDERATED_USE_FRM_LEN sizeof(FEDERATED_USE_FRM) +#define FEDERATED_LIMIT1 " LIMIT 1" +#define FEDERATED_LIMIT1_LEN sizeof(FEDERATED_LIMIT1) +#define FEDERATED_VALUES "VALUES " +#define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES) +#define FEDERATED_UPDATE "UPDATE " +#define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE) +#define FEDERATED_SET " SET " +#define FEDERATED_SET_LEN sizeof(FEDERATED_SET) +#define FEDERATED_AND " AND " +#define FEDERATED_AND_LEN sizeof(FEDERATED_AND) +#define FEDERATED_CONJUNCTION ") AND (" +#define FEDERATED_CONJUNCTION_LEN sizeof(FEDERATED_CONJUNCTION) +#define FEDERATED_OR " OR " +#define FEDERATED_OR_LEN sizeof(FEDERATED_OR) +#define FEDERATED_NOT " NOT " +#define FEDERATED_NOT_LEN sizeof(FEDERATED_NOT) +#define FEDERATED_STAR "* " +#define FEDERATED_STAR_LEN sizeof(FEDERATED_STAR) +#define FEDERATED_SPACE " " +#define FEDERATED_SPACE_LEN sizeof(FEDERATED_SPACE) +#define FEDERATED_SQUOTE "'" +#define FEDERATED_SQUOTE_LEN sizeof(FEDERATED_SQUOTE) +#define FEDERATED_COMMA ", " +#define FEDERATED_COMMA_LEN sizeof(FEDERATED_COMMA) +#define FEDERATED_BTICK "`" +#define FEDERATED_BTICK_LEN sizeof(FEDERATED_BTICK) +#define FEDERATED_OPENPAREN " (" +#define FEDERATED_OPENPAREN_LEN sizeof(FEDERATED_OPENPAREN) +#define FEDERATED_CLOSEPAREN ") " +#define FEDERATED_CLOSEPAREN_LEN sizeof(FEDERATED_CLOSEPAREN) +#define FEDERATED_NE " != " +#define FEDERATED_NE_LEN sizeof(FEDERATED_NE) +#define FEDERATED_GT " > " +#define FEDERATED_GT_LEN sizeof(FEDERATED_GT) +#define FEDERATED_LT " < " +#define FEDERATED_LT_LEN sizeof(FEDERATED_LT) +#define FEDERATED_LE " <= " +#define FEDERATED_LE_LEN sizeof(FEDERATED_LE) +#define FEDERATED_GE " >= " +#define FEDERATED_GE_LEN sizeof(FEDERATED_GE) +#define FEDERATED_EQ " = " +#define FEDERATED_EQ_LEN sizeof(FEDERATED_EQ) +#define FEDERATED_FALSE " 1=0" +#define FEDERATED_FALSE_LEN sizeof(FEDERATED_FALSE) + +/* + FEDERATED_SHARE is a structure that will be shared amoung all open handlers + The example implements the minimum of what you will probably need. +*/ +typedef struct st_federated_share { + /* + the primary select query to be used in rnd_init + */ + char *select_query; + /* + remote host info, parse_url supplies + */ + char *scheme; + char *connect_string; + char *hostname; + char *username; + char *password; + char *database; + char *table_name; + char *table; + char *socket; + char *sport; + ushort port; + uint table_name_length, connect_string_length, use_count; + pthread_mutex_t mutex; + THR_LOCK lock; +} FEDERATED_SHARE; + +/* + Class definition for the storage engine +*/ +class ha_federated: public handler +{ + THR_LOCK_DATA lock; /* MySQL lock */ + FEDERATED_SHARE *share; /* Shared lock info */ + MYSQL *mysql; /* MySQL connection */ + MYSQL_RES *stored_result; + uint fetch_num; // stores the fetch num + MYSQL_ROW_OFFSET current_position; // Current position used by ::position() + int remote_error_number; + char remote_error_buf[FEDERATED_QUERY_BUFFER_SIZE]; + +private: + /* + return 0 on success + return errorcode otherwise + */ + uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row, + MYSQL_RES *result); + bool create_where_from_key(String *to, KEY *key_info, + const key_range *start_key, + const key_range *end_key, + bool records_in_range); + int stash_remote_error(); + +public: + ha_federated(TABLE *table_arg); + ~ha_federated() + { + } + /* The name that will be used for display purposes */ + const char *table_type() const { return "FEDERATED"; } + /* + The name of the index type that will be used for display + don't implement this method unless you really have indexes + */ + // perhaps get index type + const char *index_type(uint inx) { return "REMOTE"; } + const char **bas_ext() const; + /* + This is a list of flags that says what the storage engine + implements. The current table flags are documented in + handler.h + */ + ulong table_flags() const + { + /* fix server to be able to get remote server table flags */ + return (HA_NOT_EXACT_COUNT | + HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_REC_NOT_IN_SEQ | + HA_AUTO_PART_KEY | HA_CAN_INDEX_BLOBS| HA_NO_PREFIX_CHAR_KEYS | + HA_NULL_IN_KEY + ); + } + /* + This is a bitmap of flags that says how the storage engine + implements indexes. The current index flags are documented in + handler.h. If you do not implement indexes, just return zero + here. + + part is the key part to check. First key part is 0 + If all_parts it's set, MySQL want to know the flags for the combined + index up to and including 'part'. + */ + /* fix server to be able to get remote server index flags */ + ulong index_flags(uint inx, uint part, bool all_parts) const + { + return (HA_READ_NEXT | HA_READ_RANGE | HA_READ_AFTER_KEY); + } + uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + uint max_supported_keys() const { return MAX_KEY; } + uint max_supported_key_parts() const { return MAX_REF_PARTS; } + uint max_supported_key_length() const { return MAX_KEY_LENGTH; } + /* + Called in test_quick_select to determine if indexes should be used. + Normally, we need to know number of blocks . For federated we need to + know number of blocks on remote side, and number of packets and blocks + on the network side (?) + Talk to Kostja about this - how to get the + number of rows * ... + disk scan time on other side (block size, size of the row) + network time ... + The reason for "records * 1000" is that such a large number forces + this to use indexes " + */ + double scan_time() + { + DBUG_PRINT("info", ("records %ld", (long) records)); + return (double)(records*1000); + } + /* + The next method will never be called if you do not implement indexes. + */ + double read_time(uint index, uint ranges, ha_rows rows) + { + /* + Per Brian, this number is bugus, but this method must be implemented, + and at a later date, he intends to document this issue for handler code + */ + return (double) rows / 20.0+1; + } + + const key_map *keys_to_use_for_scanning() { return &key_map_full; } + /* + Everything below are methods that we implment in ha_federated.cc. + + Most of these methods are not obligatory, skip them and + MySQL will treat them as not implemented + */ + int open(const char *name, int mode, uint test_if_locked); // required + int close(void); // required + + int write_row(byte *buf); + int update_row(const byte *old_data, byte *new_data); + int delete_row(const byte *buf); + int index_init(uint keynr); + int index_read(byte *buf, const byte *key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_idx(byte *buf, uint idx, const byte *key, + uint key_len, enum ha_rkey_function find_flag); + int index_next(byte *buf); + int index_end(); + int read_range_first(const key_range *start_key, + const key_range *end_key, + bool eq_range, bool sorted); + int read_range_next(); + /* + unlike index_init(), rnd_init() can be called two times + without rnd_end() in between (it only makes sense if scan=1). + then the second call should prepare for the new table scan + (e.g if rnd_init allocates the cursor, second call should + position it to the start of the table, no need to deallocate + and allocate it again + */ + int rnd_init(bool scan); //required + int rnd_end(); + int rnd_next(byte *buf); //required + int rnd_pos(byte *buf, byte *pos); //required + void position(const byte *record); //required + int info(uint); //required + + void update_auto_increment(void); + int repair(THD* thd, HA_CHECK_OPT* check_opt); + int optimize(THD* thd, HA_CHECK_OPT* check_opt); + + int delete_all_rows(void); + int create(const char *name, TABLE *form, + HA_CREATE_INFO *create_info); //required + ha_rows records_in_range(uint inx, key_range *start_key, + key_range *end_key); + uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } + + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type); //required + virtual bool get_error_message(int error, String *buf); + + int read_next(byte *buf, MYSQL_RES *result); + int index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result); +}; + +bool federated_db_init(void); +bool federated_db_end(void); diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 3e981087df7..d1a931b07f2 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -23,12 +22,48 @@ #include <myisampack.h> #include "ha_heap.h" +handlerton heap_hton= { + "MEMORY", + SHOW_OPTION_YES, + "Hash based, stored in memory, useful for temporary tables", + DB_TYPE_HEAP, + NULL, + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* release savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_CAN_RECREATE +}; + /***************************************************************************** ** HEAP tables *****************************************************************************/ +ha_heap::ha_heap(TABLE *table_arg) + :handler(&heap_hton, table_arg), file(0), records_changed(0), + key_stat_version(0) +{} + + +static const char *ha_heap_exts[] = { + NullS +}; + const char **ha_heap::bas_ext() const -{ static const char *ext[1]= { NullS }; return ext; } +{ + return ha_heap_exts; +} /* Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to @@ -98,16 +133,17 @@ int ha_heap::close(void) void ha_heap::set_keys_for_scanning(void) { btree_keys.clear_all(); - for (uint i= 0 ; i < table->keys ; i++) + for (uint i= 0 ; i < table->s->keys ; i++) { if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE) btree_keys.set_bit(i); } } + void ha_heap::update_key_stats() { - for (uint i= 0; i < table->keys; i++) + for (uint i= 0; i < table->s->keys; i++) { KEY *key=table->key_info+i; if (!key->rec_per_key) @@ -131,17 +167,21 @@ void ha_heap::update_key_stats() key_stat_version= file->s->key_stat_version; } + int ha_heap::write_row(byte * buf) { int res; - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); + { + if ((res= update_auto_increment())) + return res; + } res= heap_write(file,buf); - if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > - file->s->records) + if (!res && (++records_changed*HEAP_STATS_UPDATE_THRESHOLD > + file->s->records)) { /* We can perform this safely since only one writer at the time is @@ -155,7 +195,7 @@ int ha_heap::write_row(byte * buf) int ha_heap::update_row(const byte * old_data, byte * new_data) { int res; - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); res= heap_update(file,old_data,new_data); @@ -174,9 +214,9 @@ int ha_heap::update_row(const byte * old_data, byte * new_data) int ha_heap::delete_row(const byte * buf) { int res; - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); res= heap_delete(file,buf); - if (!res && table->tmp_table == NO_TMP_TABLE && + if (!res && table->s->tmp_table == NO_TMP_TABLE && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) { /* @@ -192,7 +232,8 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error = heap_rkey(file,buf,active_index, key, key_len, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -201,7 +242,8 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len, int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error= heap_rkey(file, buf, active_index, key, key_len, HA_READ_PREFIX_LAST); table->status= error ? STATUS_NOT_FOUND : 0; @@ -211,7 +253,8 @@ int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error = heap_rkey(file, buf, index, key, key_len, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -220,7 +263,8 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, int ha_heap::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=heap_rnext(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -229,7 +273,8 @@ int ha_heap::index_next(byte * buf) int ha_heap::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); int error=heap_rprev(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -238,7 +283,8 @@ int ha_heap::index_prev(byte * buf) int ha_heap::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); int error=heap_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -247,7 +293,8 @@ int ha_heap::index_first(byte * buf) int ha_heap::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); int error=heap_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -260,7 +307,8 @@ int ha_heap::rnd_init(bool scan) int ha_heap::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=heap_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -269,10 +317,11 @@ int ha_heap::rnd_next(byte *buf) int ha_heap::rnd_pos(byte * buf, byte *pos) { int error; - HEAP_PTR position; - statistic_increment(ha_read_rnd_count,&LOCK_status); - memcpy_fixed((char*) &position,pos,sizeof(HEAP_PTR)); - error=heap_rrnd(file, buf, position); + HEAP_PTR heap_position; + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); + memcpy_fixed((char*) &heap_position, pos, sizeof(HEAP_PTR)); + error=heap_rrnd(file, buf, heap_position); table->status=error ? STATUS_NOT_FOUND: 0; return error; } @@ -284,19 +333,19 @@ void ha_heap::position(const byte *record) int ha_heap::info(uint flag) { - HEAPINFO info; - (void) heap_info(file,&info,flag); - - records = info.records; - deleted = info.deleted; - errkey = info.errkey; - mean_rec_length=info.reclength; - data_file_length=info.data_length; - index_file_length=info.index_length; - max_data_file_length= info.max_records* info.reclength; - delete_length= info.deleted * info.reclength; + HEAPINFO hp_info; + (void) heap_info(file,&hp_info,flag); + + records= hp_info.records; + deleted= hp_info.deleted; + errkey= hp_info.errkey; + mean_rec_length= hp_info.reclength; + data_file_length= hp_info.data_length; + index_file_length= hp_info.index_length; + max_data_file_length= hp_info.max_records* hp_info.reclength; + delete_length= hp_info.deleted * hp_info.reclength; if (flag & HA_STATUS_AUTO) - auto_increment_value= info.auto_increment; + auto_increment_value= hp_info.auto_increment; /* If info() is called for the first time after open(), we will still have to update the key statistics. Hoping that a table lock is now @@ -315,7 +364,7 @@ int ha_heap::extra(enum ha_extra_function operation) int ha_heap::delete_all_rows() { heap_clear(file); - if (table->tmp_table == NO_TMP_TABLE) + if (table->s->tmp_table == NO_TMP_TABLE) { /* We can perform this safely since only one writer at the time is @@ -494,23 +543,25 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key, int ha_heap::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { - uint key, parts, mem_per_row= 0; + uint key, parts, mem_per_row= 0, keys= table_arg->s->keys; uint auto_key= 0, auto_key_type= 0; ha_rows max_rows; HP_KEYDEF *keydef; HA_KEYSEG *seg; char buff[FN_REFLEN]; int error; + TABLE_SHARE *share= table_arg->s; + bool found_real_auto_increment= 0; - for (key= parts= 0; key < table_arg->keys; key++) + for (key= parts= 0; key < keys; key++) parts+= table_arg->key_info[key].key_parts; - if (!(keydef= (HP_KEYDEF*) my_malloc(table_arg->keys * sizeof(HP_KEYDEF) + + if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) + parts * sizeof(HA_KEYSEG), MYF(MY_WME)))) return my_errno; - seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + table_arg->keys); - for (key= 0; key < table_arg->keys; key++) + seg= my_reinterpret_cast(HA_KEYSEG*) (keydef + keys); + for (key= 0; key < keys; key++) { KEY *pos= table_arg->key_info+key; KEY_PART_INFO *key_part= pos->key_part; @@ -533,22 +584,26 @@ int ha_heap::create(const char *name, TABLE *table_arg, default: DBUG_ASSERT(0); // cannot happen } - keydef[key].algorithm= ((pos->algorithm == HA_KEY_ALG_UNDEF) ? - HA_KEY_ALG_HASH : pos->algorithm); for (; key_part != key_part_end; key_part++, seg++) { Field *field= key_part->field; + if (pos->algorithm == HA_KEY_ALG_BTREE) seg->type= field->key_type(); else { - if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT) + if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT && + seg->type != HA_KEYTYPE_VARTEXT1 && + seg->type != HA_KEYTYPE_VARTEXT2 && + seg->type != HA_KEYTYPE_VARBINARY1 && + seg->type != HA_KEYTYPE_VARBINARY2) seg->type= HA_KEYTYPE_BINARY; } seg->start= (uint) key_part->offset; seg->length= (uint) key_part->length; - seg->flag = 0; + seg->flag= key_part->key_part_flag; + seg->charset= field->charset(); if (field->null_ptr) { @@ -562,7 +617,7 @@ int ha_heap::create(const char *name, TABLE *table_arg, } if (field->flags & AUTO_INCREMENT_FLAG && table_arg->found_next_number_field && - key == table_arg->next_number_index) + key == share->next_number_index) { /* Store key number and type for found auto_increment key @@ -573,21 +628,29 @@ int ha_heap::create(const char *name, TABLE *table_arg, } } } - mem_per_row+= MY_ALIGN(table_arg->reclength + 1, sizeof(char*)); + mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*)); + max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size / + (ulonglong) mem_per_row); + if (table_arg->found_next_number_field) + { + keydef[share->next_number_index].flag|= HA_AUTO_KEY; + found_real_auto_increment= share->next_number_key_offset == 0; + } HP_CREATE_INFO hp_create_info; hp_create_info.auto_key= auto_key; hp_create_info.auto_key_type= auto_key_type; hp_create_info.auto_increment= (create_info->auto_increment_value ? create_info->auto_increment_value - 1 : 0); hp_create_info.max_table_size=current_thd->variables.max_heap_table_size; + hp_create_info.with_auto_increment= found_real_auto_increment; max_rows = (ha_rows) (hp_create_info.max_table_size / mem_per_row); error= heap_create(fn_format(buff,name,"","", MY_REPLACE_EXT|MY_UNPACK_FILENAME), - table_arg->keys,keydef, table_arg->reclength, - (ulong) ((table_arg->max_rows < max_rows && - table_arg->max_rows) ? - table_arg->max_rows : max_rows), - (ulong) table_arg->min_rows, &hp_create_info); + keys, keydef, share->reclength, + (ulong) ((share->max_rows < max_rows && + share->max_rows) ? + share->max_rows : max_rows), + (ulong) share->min_rows, &hp_create_info); my_free((gptr) keydef, MYF(0)); if (file) info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); @@ -602,7 +665,7 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info) create_info->auto_increment_value= auto_increment_value; } -longlong ha_heap::get_auto_increment() +ulonglong ha_heap::get_auto_increment() { ha_heap::info(HA_STATUS_AUTO); return auto_increment_value; diff --git a/sql/ha_heap.h b/sql/ha_heap.h index 3598a270efd..18389c1298d 100644 --- a/sql/ha_heap.h +++ b/sql/ha_heap.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -31,15 +30,20 @@ class ha_heap: public handler uint records_changed; uint key_stat_version; public: - ha_heap(TABLE *table): handler(table), file(0), records_changed(0), - key_stat_version(0) {} + ha_heap(TABLE *table); ~ha_heap() {} - const char *table_type() const { return "HEAP"; } + const char *table_type() const + { + return (table->in_use->variables.sql_mode & MODE_MYSQL323) ? + "HEAP" : "MEMORY"; + } const char *index_type(uint inx) { return ((table->key_info[inx].algorithm == HA_KEY_ALG_BTREE) ? "BTREE" : "HASH"); } + /* Rows also use a fixed-size format */ + enum row_type get_row_type() const { return ROW_TYPE_FIXED; } const char **bas_ext() const; ulong table_flags() const { @@ -66,7 +70,7 @@ public: int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag); int index_read_idx(byte * buf, uint idx, const byte * key, @@ -95,6 +99,12 @@ public: THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); + int cmp_ref(const byte *ref1, const byte *ref2) + { + HEAP_PTR ptr1=*(HEAP_PTR*)ref1; + HEAP_PTR ptr2=*(HEAP_PTR*)ref2; + return ptr1 < ptr2? -1 : (ptr1 > ptr2? 1 : 0); + } private: void update_key_stats(); }; diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 3d1724efb91..cbefa9d3949 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & Innobase Oy +/* Copyright (C) 2000-2005 MySQL AB & Innobase Oy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -14,15 +13,14 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -/* This file defines the InnoDB handler: the interface between MySQL and -InnoDB +/* This file defines the InnoDB handler: the interface between MySQL and InnoDB NOTE: You can only use noninlined InnoDB functions in this file, because we have disables the InnoDB inlining in this file. */ -/* TODO list for the InnoDB handler in 4.1: - - Remove the flag innodb_active_trans from thd and replace it with a - function call innodb_active_trans(thd), which looks at the InnoDB +/* TODO list for the InnoDB handler in 5.0: + - Remove the flag trx->active_trans and look at the InnoDB trx struct state field + - fix savepoint functions to use savepoint storage area - Find out what kind of problems the OS X case-insensitivity causes to table and database names; should we 'normalize' the names like we do in Windows? @@ -46,9 +44,62 @@ have disables the InnoDB inlining in this file. */ #include "ha_innodb.h" -pthread_mutex_t innobase_mutex; +pthread_mutex_t innobase_share_mutex, /* to protect innobase_open_files */ + prepare_commit_mutex; /* to force correct commit order in + binlog */ +ulong commit_threads= 0; +pthread_mutex_t commit_threads_m; +pthread_cond_t commit_cond; +pthread_mutex_t commit_cond_m; bool innodb_inited= 0; +/*-----------------------------------------------------------------*/ +/* These variables are used to implement (semi-)synchronous MySQL binlog +replication for InnoDB tables. */ + +pthread_cond_t innobase_repl_cond; /* Posix cond variable; + this variable is signaled + when enough binlog has been + sent to slave, so that a + waiting trx can return the + 'ok' message to the client + for a commit */ +pthread_mutex_t innobase_repl_cond_mutex; /* Posix cond variable mutex + that also protects the next + innobase_repl_... variables */ +uint innobase_repl_state; /* 1 if synchronous replication + is switched on and is working + ok; else 0 */ +uint innobase_repl_file_name_inited = 0; /* This is set to 1 when + innobase_repl_file_name + contains meaningful data */ +char* innobase_repl_file_name; /* The binlog name up to which + we have sent some binlog to + the slave */ +my_off_t innobase_repl_pos; /* The position in that file + up to which we have sent the + binlog to the slave */ +uint innobase_repl_n_wait_threads = 0; /* This tells how many + transactions currently are + waiting for the binlog to be + sent to the client */ +uint innobase_repl_wait_file_name_inited = 0; /* This is set to 1 + when we know the 'smallest' + wait position */ +char* innobase_repl_wait_file_name; /* NULL, or the 'smallest' + innobase_repl_file_name that + a transaction is waiting for */ +my_off_t innobase_repl_wait_pos; /* The smallest position in + that file that a trx is + waiting for: the trx can + proceed and send an 'ok' to + the client when MySQL has sent + the binlog up to this position + to the slave */ +/*-----------------------------------------------------------------*/ + + + /* Store MySQL definition of 'byte': in Linux it is char while InnoDB uses unsigned char; the header univ.i which we include next defines 'byte' as a macro which expands to 'unsigned char' */ @@ -80,28 +131,28 @@ extern "C" { #include "../innobase/include/fsp0fsp.h" #include "../innobase/include/sync0sync.h" #include "../innobase/include/fil0fil.h" +#include "../innobase/include/trx0xa.h" } #define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */ #define HA_INNOBASE_RANGE_COUNT 100 -uint innobase_init_flags = 0; -ulong innobase_cache_size = 0; +ulong innobase_large_page_size = 0; -/* The default values for the following, type long, start-up parameters -are declared in mysqld.cc: */ +/* The default values for the following, type long or longlong, start-up +parameters are declared in mysqld.cc: */ long innobase_mirrored_log_groups, innobase_log_files_in_group, - innobase_log_file_size, innobase_log_buffer_size, - innobase_buffer_pool_awe_mem_mb, - innobase_buffer_pool_size, innobase_additional_mem_pool_size, - innobase_file_io_threads, innobase_lock_wait_timeout, - innobase_thread_concurrency, innobase_force_recovery, + innobase_log_buffer_size, innobase_buffer_pool_awe_mem_mb, + innobase_additional_mem_pool_size, innobase_file_io_threads, + innobase_lock_wait_timeout, innobase_force_recovery, innobase_open_files; +longlong innobase_buffer_pool_size, innobase_log_file_size; + /* The default values for the following char* start-up parameters are determined in innobase_init below: */ - + char* innobase_data_home_dir = NULL; char* innobase_data_file_path = NULL; char* innobase_log_group_home_dir = NULL; @@ -113,15 +164,15 @@ char* innobase_unix_file_flush_method = NULL; /* Below we have boolean-valued start-up parameters, and their default values */ -uint innobase_flush_log_at_trx_commit = 1; +ulong innobase_fast_shutdown = 1; my_bool innobase_log_archive = FALSE;/* unused */ +my_bool innobase_use_doublewrite = TRUE; +my_bool innobase_use_checksums = TRUE; +my_bool innobase_use_large_pages = FALSE; my_bool innobase_use_native_aio = FALSE; -my_bool innobase_fast_shutdown = TRUE; -my_bool innobase_very_fast_shutdown = FALSE; /* this can be set to - 1 just prior calling - innobase_end() */ my_bool innobase_file_per_table = FALSE; my_bool innobase_locks_unsafe_for_binlog = FALSE; +my_bool innobase_rollback_on_timeout = FALSE; my_bool innobase_create_status_file = FALSE; static char *internal_innobase_data_file_path = NULL; @@ -134,10 +185,6 @@ it every INNOBASE_WAKE_INTERVAL'th step. */ #define INNOBASE_WAKE_INTERVAL 32 ulong innobase_active_counter = 0; -char* innobase_home = NULL; - -char innodb_dummy_stmt_trx_handle = 'D'; - static HASH innobase_open_tables; #ifdef __NETWARE__ /* some special cleanup for NetWare */ @@ -148,6 +195,133 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, my_bool not_used __attribute__((unused))); static INNOBASE_SHARE *get_share(const char *table_name); static void free_share(INNOBASE_SHARE *share); +static int innobase_close_connection(THD* thd); +static int innobase_commit(THD* thd, bool all); +static int innobase_rollback(THD* thd, bool all); +static int innobase_rollback_to_savepoint(THD* thd, void *savepoint); +static int innobase_savepoint(THD* thd, void *savepoint); +static int innobase_release_savepoint(THD* thd, void *savepoint); + +handlerton innobase_hton = { + "InnoDB", + SHOW_OPTION_YES, + "Supports transactions, row-level locking, and foreign keys", + DB_TYPE_INNODB, + innobase_init, + 0, /* slot */ + sizeof(trx_named_savept_t), /* savepoint size. TODO: use it */ + innobase_close_connection, + innobase_savepoint, + innobase_rollback_to_savepoint, + innobase_release_savepoint, + innobase_commit, /* commit */ + innobase_rollback, /* rollback */ + innobase_xa_prepare, /* prepare */ + innobase_xa_recover, /* recover */ + innobase_commit_by_xid, /* commit_by_xid */ + innobase_rollback_by_xid, /* rollback_by_xid */ + innobase_create_cursor_view, + innobase_set_cursor_view, + innobase_close_cursor_view, + HTON_NO_FLAGS +}; + +/********************************************************************* +Commits a transaction in an InnoDB database. */ + +void +innobase_commit_low( +/*================*/ + trx_t* trx); /* in: transaction handle */ + +struct show_var_st innodb_status_variables[]= { + {"buffer_pool_pages_data", + (char*) &export_vars.innodb_buffer_pool_pages_data, SHOW_LONG}, + {"buffer_pool_pages_dirty", + (char*) &export_vars.innodb_buffer_pool_pages_dirty, SHOW_LONG}, + {"buffer_pool_pages_flushed", + (char*) &export_vars.innodb_buffer_pool_pages_flushed, SHOW_LONG}, + {"buffer_pool_pages_free", + (char*) &export_vars.innodb_buffer_pool_pages_free, SHOW_LONG}, + {"buffer_pool_pages_latched", + (char*) &export_vars.innodb_buffer_pool_pages_latched, SHOW_LONG}, + {"buffer_pool_pages_misc", + (char*) &export_vars.innodb_buffer_pool_pages_misc, SHOW_LONG}, + {"buffer_pool_pages_total", + (char*) &export_vars.innodb_buffer_pool_pages_total, SHOW_LONG}, + {"buffer_pool_read_ahead_rnd", + (char*) &export_vars.innodb_buffer_pool_read_ahead_rnd, SHOW_LONG}, + {"buffer_pool_read_ahead_seq", + (char*) &export_vars.innodb_buffer_pool_read_ahead_seq, SHOW_LONG}, + {"buffer_pool_read_requests", + (char*) &export_vars.innodb_buffer_pool_read_requests, SHOW_LONG}, + {"buffer_pool_reads", + (char*) &export_vars.innodb_buffer_pool_reads, SHOW_LONG}, + {"buffer_pool_wait_free", + (char*) &export_vars.innodb_buffer_pool_wait_free, SHOW_LONG}, + {"buffer_pool_write_requests", + (char*) &export_vars.innodb_buffer_pool_write_requests, SHOW_LONG}, + {"data_fsyncs", + (char*) &export_vars.innodb_data_fsyncs, SHOW_LONG}, + {"data_pending_fsyncs", + (char*) &export_vars.innodb_data_pending_fsyncs, SHOW_LONG}, + {"data_pending_reads", + (char*) &export_vars.innodb_data_pending_reads, SHOW_LONG}, + {"data_pending_writes", + (char*) &export_vars.innodb_data_pending_writes, SHOW_LONG}, + {"data_read", + (char*) &export_vars.innodb_data_read, SHOW_LONG}, + {"data_reads", + (char*) &export_vars.innodb_data_reads, SHOW_LONG}, + {"data_writes", + (char*) &export_vars.innodb_data_writes, SHOW_LONG}, + {"data_written", + (char*) &export_vars.innodb_data_written, SHOW_LONG}, + {"dblwr_pages_written", + (char*) &export_vars.innodb_dblwr_pages_written, SHOW_LONG}, + {"dblwr_writes", + (char*) &export_vars.innodb_dblwr_writes, SHOW_LONG}, + {"log_waits", + (char*) &export_vars.innodb_log_waits, SHOW_LONG}, + {"log_write_requests", + (char*) &export_vars.innodb_log_write_requests, SHOW_LONG}, + {"log_writes", + (char*) &export_vars.innodb_log_writes, SHOW_LONG}, + {"os_log_fsyncs", + (char*) &export_vars.innodb_os_log_fsyncs, SHOW_LONG}, + {"os_log_pending_fsyncs", + (char*) &export_vars.innodb_os_log_pending_fsyncs, SHOW_LONG}, + {"os_log_pending_writes", + (char*) &export_vars.innodb_os_log_pending_writes, SHOW_LONG}, + {"os_log_written", + (char*) &export_vars.innodb_os_log_written, SHOW_LONG}, + {"page_size", + (char*) &export_vars.innodb_page_size, SHOW_LONG}, + {"pages_created", + (char*) &export_vars.innodb_pages_created, SHOW_LONG}, + {"pages_read", + (char*) &export_vars.innodb_pages_read, SHOW_LONG}, + {"pages_written", + (char*) &export_vars.innodb_pages_written, SHOW_LONG}, + {"row_lock_current_waits", + (char*) &export_vars.innodb_row_lock_current_waits, SHOW_LONG}, + {"row_lock_time", + (char*) &export_vars.innodb_row_lock_time, SHOW_LONGLONG}, + {"row_lock_time_avg", + (char*) &export_vars.innodb_row_lock_time_avg, SHOW_LONG}, + {"row_lock_time_max", + (char*) &export_vars.innodb_row_lock_time_max, SHOW_LONG}, + {"row_lock_waits", + (char*) &export_vars.innodb_row_lock_waits, SHOW_LONG}, + {"rows_deleted", + (char*) &export_vars.innodb_rows_deleted, SHOW_LONG}, + {"rows_inserted", + (char*) &export_vars.innodb_rows_inserted, SHOW_LONG}, + {"rows_read", + (char*) &export_vars.innodb_rows_read, SHOW_LONG}, + {"rows_updated", + (char*) &export_vars.innodb_rows_updated, SHOW_LONG}, + {NullS, NullS, SHOW_LONG}}; /* General functions */ @@ -160,7 +334,7 @@ innodb_srv_conc_enter_innodb( /*=========================*/ trx_t* trx) /* in: transaction handle */ { - if (srv_thread_concurrency >= 500) { + if (UNIV_LIKELY(!srv_thread_concurrency)) { return; } @@ -177,7 +351,7 @@ innodb_srv_conc_exit_innodb( /*========================*/ trx_t* trx) /* in: transaction handle */ { - if (srv_thread_concurrency >= 500) { + if (UNIV_LIKELY(!srv_thread_concurrency)) { return; } @@ -215,9 +389,20 @@ documentation, see handler.cc. */ void innobase_release_temporary_latches( /*===============================*/ - void* innobase_tid) + THD *thd) { - innobase_release_stat_resources((trx_t*)innobase_tid); + trx_t* trx; + + if (!innodb_inited) { + + return; + } + + trx = (trx_t*) thd->ha_data[innobase_hton.slot]; + + if (trx) { + innobase_release_stat_resources(trx); + } } /************************************************************************ @@ -278,11 +463,11 @@ convert_error_code_to_mysql( } else if (error == (int) DB_LOCK_WAIT_TIMEOUT) { - /* Since we rolled back the whole transaction, we must - tell it also to MySQL so that MySQL knows to empty the - cached binlog for this transaction */ + /* Starting from 5.0.13, we let MySQL just roll back the + latest SQL statement in a lock wait timeout. Previously, we + rolled back the whole transaction. */ - if (thd) { + if (thd && row_rollback_on_timeout) { ha_rollback(thd); } @@ -332,13 +517,13 @@ convert_error_code_to_mysql( return(HA_ERR_NO_SAVEPOINT); } else if (error == (int) DB_LOCK_TABLE_FULL) { - /* Since we rolled back the whole transaction, we must - tell it also to MySQL so that MySQL knows to empty the - cached binlog for this transaction */ + /* Since we rolled back the whole transaction, we must + tell it also to MySQL so that MySQL knows to empty the + cached binlog for this transaction */ - if (thd) { - ha_rollback(thd); - } + if (thd) { + ha_rollback(thd); + } return(HA_ERR_LOCK_TABLE_FULL); } else { @@ -364,7 +549,7 @@ innobase_mysql_prepare_print_arbitrary_thd(void) } /***************************************************************** -Relases the mutex reserved by innobase_mysql_prepare_print_arbitrary_thd(). +Releases the mutex reserved by innobase_mysql_prepare_print_arbitrary_thd(). NOTE that /mysql/innobase/lock/lock0lock.c must contain the prototype for this function! */ extern "C" @@ -376,37 +561,42 @@ innobase_mysql_end_print_arbitrary_thd(void) } /***************************************************************** -Prints info of a THD object (== user session thread) to the -standard output. NOTE that /mysql/innobase/trx/trx0trx.c must contain -the prototype for this function! */ +Prints info of a THD object (== user session thread) to the given file. +NOTE that /mysql/innobase/trx/trx0trx.c must contain the prototype for +this function! */ extern "C" void innobase_mysql_print_thd( /*=====================*/ - FILE* f, /* in: output stream */ - void* input_thd)/* in: pointer to a MySQL THD object */ + FILE* f, /* in: output stream */ + void* input_thd, /* in: pointer to a MySQL THD object */ + uint max_query_len) /* in: max query length to print, or 0 to + use the default max length */ { const THD* thd; + const Security_context *sctx; const char* s; - char buf[301]; thd = (const THD*) input_thd; + /* We probably want to have original user as part of debug output. */ + sctx = &thd->main_security_ctx; + fprintf(f, "MySQL thread id %lu, query id %lu", - thd->thread_id, thd->query_id); - if (thd->host) { + thd->thread_id, (ulong) thd->query_id); + if (sctx->host) { putc(' ', f); - fputs(thd->host, f); + fputs(sctx->host, f); } - if (thd->ip) { + if (sctx->ip) { putc(' ', f); - fputs(thd->ip, f); + fputs(sctx->ip, f); } - if (thd->user) { + if (sctx->user) { putc(' ', f); - fputs(thd->user, f); + fputs(sctx->user, f); } if ((s = thd->proc_info)) { @@ -415,31 +605,81 @@ innobase_mysql_print_thd( } if ((s = thd->query)) { - /* determine the length of the query string */ - uint32 i, len; - - len = thd->query_length; - - if (len > 300) { - len = 300; /* ADDITIONAL SAFETY: print at most - 300 chars to reduce the probability of - a seg fault if there is a race in - thd->query_length in MySQL; after - May 14, 2004 probably no race any more, - but better be safe */ + /* 3100 is chosen because currently 3000 is the maximum + max_query_len we ever give this. */ + char buf[3100]; + uint len; + + /* If buf is too small, we dynamically allocate storage + in this. */ + char* dyn_str = NULL; + + /* Points to buf or dyn_str. */ + char* str = buf; + + if (max_query_len == 0) + { + /* ADDITIONAL SAFETY: the default is to print at + most 300 chars to reduce the probability of a + seg fault if there is a race in + thd->query_length in MySQL; after May 14, 2004 + probably no race any more, but better be + safe */ + max_query_len = 300; } - /* Use strmake to reduce the timeframe - for a race, compared to fwrite() */ - i= (uint) (strmake(buf, s, len) - buf); + len = min(thd->query_length, max_query_len); + + if (len > (sizeof(buf) - 1)) + { + dyn_str = my_malloc(len + 1, MYF(0)); + str = dyn_str; + } + + /* Use strmake to reduce the timeframe for a race, + compared to fwrite() */ + len = (uint) (strmake(str, s, len) - str); putc('\n', f); - fwrite(buf, 1, i, f); + fwrite(str, 1, len, f); + + if (dyn_str) + { + my_free(dyn_str, MYF(0)); + } } putc('\n', f); } /********************************************************************** +Get the variable length bounds of the given character set. + +NOTE that the exact prototype of this function has to be in +/innobase/data/data0type.ic! */ +extern "C" +void +innobase_get_cset_width( +/*====================*/ + ulint cset, /* in: MySQL charset-collation code */ + ulint* mbminlen, /* out: minimum length of a char (in bytes) */ + ulint* mbmaxlen) /* out: maximum length of a char (in bytes) */ +{ + CHARSET_INFO* cs; + ut_ad(cset < 256); + ut_ad(mbminlen); + ut_ad(mbmaxlen); + + cs = all_charsets[cset]; + if (cs) { + *mbminlen = cs->mbminlen; + *mbmaxlen = cs->mbmaxlen; + } else { + ut_a(cset == 0); + *mbminlen = *mbmaxlen = 0; + } +} + +/********************************************************************** Compares NUL-terminated UTF-8 strings case insensitively. NOTE that the exact prototype of this function has to be in @@ -505,9 +745,10 @@ innobase_mysql_tmpfile(void) if (fd2 < 0) { DBUG_PRINT("error",("Got error %d on dup",fd2)); my_errno=errno; - my_error(EE_OUT_OF_FILERESOURCES, - MYF(ME_BELL+ME_WAITTANG), filename, my_errno); - } + my_error(EE_OUT_OF_FILERESOURCES, + MYF(ME_BELL+ME_WAITTANG), + filename, my_errno); + } my_close(fd, MYF(MY_WME)); } return(fd2); @@ -528,25 +769,21 @@ check_trx_exists( ut_ad(thd == current_thd); - trx = (trx_t*) thd->transaction.all.innobase_tid; + trx = (trx_t*) thd->ha_data[innobase_hton.slot]; if (trx == NULL) { DBUG_ASSERT(thd != NULL); trx = trx_allocate_for_mysql(); trx->mysql_thd = thd; - trx->mysql_query_str = &((*thd).query); - - thd->transaction.all.innobase_tid = trx; - - /* The execution of a single SQL statement is denoted by - a 'transaction' handle which is a dummy pointer: InnoDB - remembers internally where the latest SQL statement - started, and if error handling requires rolling back the - latest statement, InnoDB does a rollback to a savepoint. */ - - thd->transaction.stmt.innobase_tid = - (void*)&innodb_dummy_stmt_trx_handle; + trx->mysql_query_str = &(thd->query); + trx->active_trans = 0; + + /* Update the info whether we should skip XA steps that eat + CPU time */ + trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + + thd->ha_data[innobase_hton.slot] = trx; } else { if (trx->magic_n != TRX_MAGIC_N) { mem_analyze_corruption((byte*)trx); @@ -570,6 +807,24 @@ check_trx_exists( return(trx); } + +/************************************************************************* +Construct ha_innobase handler. */ + +ha_innobase::ha_innobase(TABLE *table_arg) + :handler(&innobase_hton, table_arg), + int_table_flags(HA_REC_NOT_IN_SEQ | + HA_NULL_IN_KEY | + HA_CAN_INDEX_BLOBS | + HA_CAN_SQL_HANDLER | + HA_NOT_EXACT_COUNT | + HA_PRIMARY_KEY_IN_READ_INDEX | + HA_CAN_GEOMETRY | + HA_TABLE_SCAN_ON_INDEX), + start_of_scan(0), + num_write_row(0) +{} + /************************************************************************* Updates the user_thd field in a handle and also allocates a new InnoDB transaction handle if needed, and updates the transaction fields in the @@ -583,7 +838,7 @@ ha_innobase::update_thd( { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; - + trx = check_trx_exists(thd); if (prebuilt->trx != trx) { @@ -596,6 +851,45 @@ ha_innobase::update_thd( return(0); } +/************************************************************************* +Registers that InnoDB takes part in an SQL statement, so that MySQL knows to +roll back the statement if the statement results in an error. This MUST be +called for every SQL statement that may be rolled back by MySQL. Calling this +several times to register the same statement is allowed, too. */ +inline +void +innobase_register_stmt( +/*===================*/ + THD* thd) /* in: MySQL thd (connection) object */ +{ + /* Register the statement */ + trans_register_ha(thd, FALSE, &innobase_hton); +} + +/************************************************************************* +Registers an InnoDB transaction in MySQL, so that the MySQL XA code knows +to call the InnoDB prepare and commit, or rollback for the transaction. This +MUST be called for every transaction for which the user may call commit or +rollback. Calling this several times to register the same transaction is +allowed, too. +This function also registers the current SQL statement. */ +inline +void +innobase_register_trx_and_stmt( +/*===========================*/ + THD* thd) /* in: MySQL thd (connection) object */ +{ + /* NOTE that actually innobase_register_stmt() registers also + the transaction in the AUTOCOMMIT=1 mode. */ + + innobase_register_stmt(thd); + + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + + /* No autocommit mode, register for a transaction */ + trans_register_ha(thd, TRUE, &innobase_hton); + } +} /* BACKGROUND INFO: HOW THE MYSQL QUERY CACHE WORKS WITH INNODB ------------------------------------------------------------ @@ -650,7 +944,14 @@ returns TRUE for all tables in the query. If thd is not in the autocommit state, this function also starts a new transaction for thd if there is no active trx yet, and assigns a consistent -read view to it if there is no read view yet. */ +read view to it if there is no read view yet. + +Why a deadlock of threads is not possible: the query cache calls this function +at the start of a SELECT processing. Then the calling thread cannot be +holding any InnoDB semaphores. The calling thread is holding the +query cache mutex, and this function will reserver the InnoDB kernel mutex. +Thus, the 'rank' in sync0sync.h of the MySQL query cache mutex is above +the InnoDB kernel mutex. */ my_bool innobase_query_caching_of_table_permitted( @@ -665,8 +966,9 @@ innobase_query_caching_of_table_permitted( char* full_name, /* in: concatenation of database name, the null character '\0', and the table name */ - uint full_name_len) /* in: length of the full name, i.e. + uint full_name_len, /* in: length of the full name, i.e. len(dbname) + len(tablename) + 1 */ + ulonglong *unused) /* unused for this engine */ { ibool is_autocommit; trx_t* trx; @@ -677,14 +979,20 @@ innobase_query_caching_of_table_permitted( if (thd->variables.tx_isolation == ISO_SERIALIZABLE) { /* In the SERIALIZABLE mode we add LOCK IN SHARE MODE to every plain SELECT if AUTOCOMMIT is not on. */ - + return((my_bool)FALSE); } - trx = (trx_t*) thd->transaction.all.innobase_tid; + trx = check_trx_exists(thd); + if (trx->has_search_latch) { + ut_print_timestamp(stderr); + sql_print_error("The calling thread is holding the adaptive " + "search, latch though calling " + "innobase_query_caching_of_table_permitted."); - if (trx == NULL) { - trx = check_trx_exists(thd); + mutex_enter_noninline(&kernel_mutex); + trx_print(stderr, trx, 1024); + mutex_exit_noninline(&kernel_mutex); } innobase_release_stat_resources(trx); @@ -718,7 +1026,7 @@ innobase_query_caching_of_table_permitted( return((my_bool)TRUE); } - + /* Normalize the table name to InnoDB format */ memcpy(norm_name, full_name, full_name_len); @@ -732,7 +1040,11 @@ innobase_query_caching_of_table_permitted( /* The call of row_search_.. will start a new transaction if it is not yet started */ - thd->transaction.all.innodb_active_trans = 1; + if (trx->active_trans == 0) { + + innobase_register_trx_and_stmt(thd); + trx->active_trans = 1; + } if (row_search_check_if_query_cache_permitted(trx, norm_name)) { @@ -762,6 +1074,10 @@ innobase_invalidate_query_cache( ulint full_name_len) /* in: full name length where also the null chars count */ { + /* Note that the sync0sync.h rank of the query cache mutex is just + above the InnoDB kernel mutex. The caller of this function must not + have latches of a lower rank. */ + /* Argument TRUE below means we are using transactions */ #ifdef HAVE_QUERY_CACHE query_cache.invalidate((THD*)(trx->mysql_thd), @@ -788,7 +1104,19 @@ mysql_get_identifier_quote_char( return(EOF); } return(get_quote_char_for_identifier((THD*) trx->mysql_thd, - name, namelen)); + name, (int) namelen)); +} + +/************************************************************************** +Determines if the currently running transaction has been interrupted. */ +extern "C" +ibool +trx_is_interrupted( +/*===============*/ + /* out: TRUE if interrupted */ + trx_t* trx) /* in: transaction */ +{ + return(trx && trx->mysql_thd && ((THD*) trx->mysql_thd)->killed); } /************************************************************************** @@ -840,7 +1168,12 @@ ha_innobase::init_table_handle_for_HANDLER(void) /* Set the MySQL flag to mark that there is an active transaction */ - current_thd->transaction.all.innodb_active_trans = 1; + if (prebuilt->trx->active_trans == 0) { + + innobase_register_trx_and_stmt(current_thd); + + prebuilt->trx->active_trans = 1; + } /* We did the necessary inits in this function, no need to repeat them in row_search_for_mysql */ @@ -863,6 +1196,8 @@ ha_innobase::init_table_handle_for_HANDLER(void) prebuilt->read_just_key = FALSE; prebuilt->used_in_HANDLER = TRUE; + + prebuilt->keep_other_fields_on_keyread = FALSE; } /************************************************************************* @@ -871,7 +1206,7 @@ Opens an InnoDB database. */ bool innobase_init(void) /*===============*/ - /* out: TRUE if error */ + /* out: &innobase_hton, or NULL on error */ { static char current_dir[3]; /* Set if using current lib */ int err; @@ -880,6 +1215,30 @@ innobase_init(void) DBUG_ENTER("innobase_init"); + if (have_innodb != SHOW_OPTION_YES) + goto error; + + ut_a(DATA_MYSQL_TRUE_VARCHAR == (ulint)MYSQL_TYPE_VARCHAR); + + /* Check that values don't overflow on 32-bit systems. */ + if (sizeof(ulint) == 4) { + if (innobase_buffer_pool_size > UINT_MAX32) { + sql_print_error( + "innobase_buffer_pool_size can't be over 4GB" + " on 32-bit systems"); + + goto error; + } + + if (innobase_log_file_size > UINT_MAX32) { + sql_print_error( + "innobase_log_file_size can't be over 4GB" + " on 32-bit systems"); + + goto error; + } + } + os_innodb_umask = (ulint)my_umask; /* First calculate the default path for innodb_data_home_dir etc., @@ -930,7 +1289,7 @@ innobase_init(void) copy of it: */ internal_innobase_data_file_path = my_strdup(innobase_data_file_path, - MYF(MY_WME)); + MYF(MY_FAE)); ret = (bool) srv_parse_data_file_paths_and_sizes( internal_innobase_data_file_path, @@ -943,18 +1302,20 @@ innobase_init(void) if (ret == FALSE) { sql_print_error( "InnoDB: syntax error in innodb_data_file_path"); - DBUG_RETURN(TRUE); + my_free(internal_innobase_data_file_path, + MYF(MY_ALLOW_ZERO_PTR)); + goto error; } /* -------------- Log files ---------------------------*/ /* The default dir for log files is the datadir of MySQL */ - + if (!innobase_log_group_home_dir) { innobase_log_group_home_dir = default_path; } -#ifdef UNIV_LOG_ARCHIVE +#ifdef UNIV_LOG_ARCHIVE /* Since innodb_log_arch_dir has no relevance under MySQL, starting from 4.0.6 we always set it the same as innodb_log_group_home_dir: */ @@ -969,11 +1330,12 @@ innobase_init(void) &srv_log_group_home_dirs); if (ret == FALSE || innobase_mirrored_log_groups != 1) { - fprintf(stderr, - "InnoDB: syntax error in innodb_log_group_home_dir\n" - "InnoDB: or a wrong number of mirrored log groups\n"); + sql_print_error("syntax error in innodb_log_group_home_dir, or a " + "wrong number of mirrored log groups"); - DBUG_RETURN(TRUE); + my_free(internal_innobase_data_file_path, + MYF(MY_ALLOW_ZERO_PTR)); + goto error; } /* --------------------------------------------------*/ @@ -988,7 +1350,6 @@ innobase_init(void) srv_log_archive_on = (ulint) innobase_log_archive; #endif /* UNIV_LOG_ARCHIVE */ srv_log_buffer_size = (ulint) innobase_log_buffer_size; - srv_flush_log_at_trx_commit = (ulint) innobase_flush_log_at_trx_commit; /* We set srv_pool_size here in units of 1 kB. InnoDB internally changes the value so that it becomes the number of database pages. */ @@ -996,14 +1357,14 @@ innobase_init(void) if (innobase_buffer_pool_awe_mem_mb == 0) { /* Careful here: we first convert the signed long int to ulint and only after that divide */ - + srv_pool_size = ((ulint) innobase_buffer_pool_size) / 1024; } else { srv_use_awe = TRUE; srv_pool_size = (ulint) (1024 * innobase_buffer_pool_awe_mem_mb); srv_awe_window_size = (ulint) innobase_buffer_pool_size; - + /* Note that what the user specified as innodb_buffer_pool_size is actually the AWE memory window size in this case, and the real buffer pool size is @@ -1015,10 +1376,15 @@ innobase_init(void) srv_n_file_io_threads = (ulint) innobase_file_io_threads; srv_lock_wait_timeout = (ulint) innobase_lock_wait_timeout; - srv_thread_concurrency = (ulint) innobase_thread_concurrency; srv_force_recovery = (ulint) innobase_force_recovery; - srv_fast_shutdown = (ibool) innobase_fast_shutdown; + srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite; + srv_use_checksums = (ibool) innobase_use_checksums; + + os_use_large_pages = (ibool) innobase_use_large_pages; + os_large_page_size = (ulint) innobase_large_page_size; + + row_rollback_on_timeout = (ibool) innobase_rollback_on_timeout; srv_file_per_table = (ibool) innobase_file_per_table; srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog; @@ -1028,13 +1394,14 @@ innobase_init(void) srv_print_verbose_log = mysqld_embedded ? 0 : 1; - /* Store the default charset-collation number of this MySQL + /* Store the default charset-collation number of this MySQL installation */ data_mysql_default_charset_coll = (ulint)default_charset_info->number; - data_mysql_latin1_swedish_charset_coll = - (ulint)my_charset_latin1.number; + ut_a(DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL == + my_charset_latin1.number); + ut_a(DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number); /* Store the latin1_swedish_ci character ordering table to InnoDB. For non-latin1_swedish_ci charsets we use the MySQL comparison functions, @@ -1056,13 +1423,18 @@ innobase_init(void) err = innobase_start_or_create_for_mysql(); if (err != DB_SUCCESS) { - - DBUG_RETURN(1); + my_free(internal_innobase_data_file_path, + MYF(MY_ALLOW_ZERO_PTR)); + goto error; } (void) hash_init(&innobase_open_tables,system_charset_info, 32, 0, 0, (hash_get_key) innobase_get_key, 0, 0); - pthread_mutex_init(&innobase_mutex, MY_MUTEX_INIT_FAST); + pthread_mutex_init(&innobase_share_mutex, MY_MUTEX_INIT_FAST); + pthread_mutex_init(&prepare_commit_mutex, MY_MUTEX_INIT_FAST); + pthread_mutex_init(&commit_threads_m, MY_MUTEX_INIT_FAST); + pthread_mutex_init(&commit_cond_m, MY_MUTEX_INIT_FAST); + pthread_cond_init(&commit_cond, NULL); innodb_inited= 1; /* If this is a replication slave and we needed to do a crash recovery, @@ -1073,14 +1445,17 @@ innobase_init(void) THIS DOES NOT WORK CURRENTLY because replication seems to initialize glob_mi also after innobase_init. */ - + /* if (trx_sys_mysql_master_log_pos != -1) { ut_memcpy(glob_mi.log_file_name, trx_sys_mysql_master_log_name, 1 + ut_strlen(trx_sys_mysql_master_log_name)); glob_mi.pos = trx_sys_mysql_master_log_pos; } */ - DBUG_RETURN(0); + DBUG_RETURN(FALSE); +error: + have_innodb= SHOW_OPTION_DISABLED; // If we couldn't use handler + DBUG_RETURN(TRUE); } /*********************************************************************** @@ -1100,23 +1475,21 @@ innobase_end(void) set_panic_flag_for_netware(); } #endif - if (innodb_inited) - { - if (innobase_very_fast_shutdown) { - srv_very_fast_shutdown = TRUE; - fprintf(stderr, -"InnoDB: MySQL has requested a very fast shutdown without flushing\n" -"InnoDB: the InnoDB buffer pool to data files. At the next mysqld startup\n" -"InnoDB: InnoDB will do a crash recovery!\n"); + if (innodb_inited) { - } - - innodb_inited= 0; - if (innobase_shutdown_for_mysql() != DB_SUCCESS) - err= 1; - hash_free(&innobase_open_tables); - my_free(internal_innobase_data_file_path,MYF(MY_ALLOW_ZERO_PTR)); - pthread_mutex_destroy(&innobase_mutex); + srv_fast_shutdown = (ulint) innobase_fast_shutdown; + innodb_inited = 0; + if (innobase_shutdown_for_mysql() != DB_SUCCESS) { + err = 1; + } + hash_free(&innobase_open_tables); + my_free(internal_innobase_data_file_path, + MYF(MY_ALLOW_ZERO_PTR)); + pthread_mutex_destroy(&innobase_share_mutex); + pthread_mutex_destroy(&prepare_commit_mutex); + pthread_mutex_destroy(&commit_threads_m); + pthread_mutex_destroy(&commit_cond_m); + pthread_cond_destroy(&commit_cond); } DBUG_RETURN(err); @@ -1154,13 +1527,15 @@ innobase_commit_low( } #ifdef HAVE_REPLICATION - if (current_thd->slave_thread) { + THD *thd=current_thd; + + if (thd && thd->slave_thread) { /* Update the replication position info inside InnoDB */ trx->mysql_master_log_file_name = active_mi->rli.group_master_log_name; - trx->mysql_master_log_pos= ((ib_longlong) - active_mi->rli.future_group_master_log_pos); + trx->mysql_master_log_pos = ((ib_longlong) + active_mi->rli.future_group_master_log_pos); } #endif /* HAVE_REPLICATION */ @@ -1204,7 +1579,12 @@ innobase_start_trx_and_assign_read_view( /* Set the MySQL flag to mark that there is an active transaction */ - current_thd->transaction.all.innodb_active_trans = 1; + if (trx->active_trans == 0) { + + innobase_register_trx_and_stmt(current_thd); + + trx->active_trans = 1; + } DBUG_RETURN(0); } @@ -1212,16 +1592,15 @@ innobase_start_trx_and_assign_read_view( /********************************************************************* Commits a transaction in an InnoDB database or marks an SQL statement ended. */ - +static int innobase_commit( /*============*/ /* out: 0 */ THD* thd, /* in: MySQL thread handle of the user for whom the transaction should be committed */ - void* trx_handle)/* in: InnoDB trx handle or - &innodb_dummy_stmt_trx_handle: the latter means - that the current SQL statement ended */ + bool all) /* in: TRUE - commit transaction + FALSE - the current SQL statement ended */ { trx_t* trx; @@ -1230,43 +1609,83 @@ innobase_commit( trx = check_trx_exists(thd); + /* Update the info whether we should skip XA steps that eat CPU time */ + trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch first to obey the latching order. */ - innobase_release_stat_resources(trx); + if (trx->has_search_latch) { + trx_search_latch_release_if_reserved(trx); + } - /* The flag thd->transaction.all.innodb_active_trans is set to 1 in + /* The flag trx->active_trans is set to 1 in 1. ::external_lock(), 2. ::start_stmt(), 3. innobase_query_caching_of_table_permitted(), 4. innobase_savepoint(), 5. ::init_table_handle_for_HANDLER(), - 6. innobase_start_trx_and_assign_read_view() + 6. innobase_start_trx_and_assign_read_view(), + 7. ::transactional_table_lock() and it is only set to 0 in a commit or a rollback. If it is 0 we know there cannot be resources to be freed and we could return immediately. For the time being, we play safe and do the cleanup though there should be nothing to clean up. */ - if (thd->transaction.all.innodb_active_trans == 0 + if (trx->active_trans == 0 && trx->conc_state != TRX_NOT_STARTED) { - - fprintf(stderr, -"InnoDB: Error: thd->transaction.all.innodb_active_trans == 0\n" -"InnoDB: but trx->conc_state != TRX_NOT_STARTED\n"); - } - if (trx_handle != (void*)&innodb_dummy_stmt_trx_handle + sql_print_error("trx->active_trans == 0, but trx->conc_state != " + "TRX_NOT_STARTED"); + } + if (all || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { - - /* We were instructed to commit the whole transaction, or + + /* We were instructed to commit the whole transaction, or this is an SQL statement end and autocommit is on */ + /* We need current binlog position for ibbackup to work. + Note, the position is current because of prepare_commit_mutex */ +retry: + if (srv_commit_concurrency > 0) + { + pthread_mutex_lock(&commit_cond_m); + commit_threads++; + if (commit_threads > srv_commit_concurrency) + { + commit_threads--; + pthread_cond_wait(&commit_cond, &commit_cond_m); + pthread_mutex_unlock(&commit_cond_m); + goto retry; + } + else + pthread_mutex_unlock(&commit_cond_m); + } + + trx->mysql_log_file_name = mysql_bin_log.get_log_fname(); + trx->mysql_log_offset = + (ib_longlong)mysql_bin_log.get_log_file()->pos_in_file; + innobase_commit_low(trx); - thd->transaction.all.innodb_active_trans = 0; + if (srv_commit_concurrency > 0) + { + pthread_mutex_lock(&commit_cond_m); + commit_threads--; + pthread_cond_signal(&commit_cond); + pthread_mutex_unlock(&commit_cond_m); + } + + if (trx->active_trans == 2) { + + pthread_mutex_unlock(&prepare_commit_mutex); + } + + trx->active_trans = 0; + } else { /* We just mark the SQL statement ended and do not do a transaction commit */ @@ -1274,7 +1693,7 @@ innobase_commit( if (trx->auto_inc_lock) { /* If we had reserved the auto-inc lock for some table in this SQL statement we release it now */ - + row_unlock_table_autoinc_for_mysql(trx); } /* Store the current undo_no of the transaction so that we @@ -1286,12 +1705,20 @@ innobase_commit( /* Tell the InnoDB server that there might be work for utility threads: */ + if (trx->declared_to_be_inside_innodb) { + /* Release our possible ticket in the FIFO */ + srv_conc_force_exit_innodb(trx); + } srv_active_wake_master_thread(); DBUG_RETURN(0); } +/* TODO: put the +MySQL-4.1 functionality back to 5.0. This is needed to get InnoDB Hot Backup +to work. */ + /********************************************************************* This is called when MySQL writes the binlog entry for the current transaction. Writes to the InnoDB tablespace info which tells where the @@ -1317,18 +1744,51 @@ innobase_report_binlog_offset_and_commit( ut_a(trx != NULL); - trx->mysql_log_file_name = log_file_name; + trx->mysql_log_file_name = log_file_name; trx->mysql_log_offset = (ib_longlong)end_offset; - + trx->flush_log_later = TRUE; - innobase_commit(thd, trx_handle); + innobase_commit(thd, TRUE); trx->flush_log_later = FALSE; return(0); } +#if 0 +/*********************************************************************** +This function stores the binlog offset and flushes logs. */ + +void +innobase_store_binlog_offset_and_flush_log( +/*=======================================*/ + char *binlog_name, /* in: binlog name */ + longlong offset) /* in: binlog offset */ +{ + mtr_t mtr; + + assert(binlog_name != NULL); + + /* Start a mini-transaction */ + mtr_start_noninline(&mtr); + + /* Update the latest MySQL binlog name and offset info + in trx sys header */ + + trx_sys_update_mysql_binlog_offset( + binlog_name, + offset, + TRX_SYS_MYSQL_LOG_INFO, &mtr); + + /* Commits the mini-transaction */ + mtr_commit(&mtr); + + /* Synchronous flush of the log buffer to disk */ + log_buffer_flush_to_disk(); +} +#endif + /********************************************************************* This is called after MySQL has written the binlog entry for the current transaction. Flushes the InnoDB log files to disk if required. */ @@ -1337,20 +1797,23 @@ int innobase_commit_complete( /*=====================*/ /* out: 0 */ - void* trx_handle) /* in: InnoDB trx handle */ + THD* thd) /* in: user thread */ { trx_t* trx; - if (srv_flush_log_at_trx_commit == 0) { + trx = (trx_t*) thd->ha_data[innobase_hton.slot]; - return(0); - } + if (trx && trx->active_trans) { - trx = (trx_t*)trx_handle; + trx->active_trans = 0; - ut_a(trx != NULL); + if (UNIV_UNLIKELY(srv_flush_log_at_trx_commit == 0)) { - trx_commit_complete_for_mysql(trx); + return(0); + } + + trx_commit_complete_for_mysql(trx); + } return(0); } @@ -1358,15 +1821,14 @@ innobase_commit_complete( /********************************************************************* Rolls back a transaction or the latest SQL statement. */ -int +static int innobase_rollback( /*==============*/ /* out: 0 or error number */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ - void* trx_handle)/* in: InnoDB trx handle or a dummy stmt handle; - the latter means we roll back the latest SQL - statement */ + bool all) /* in: TRUE - commit transaction + FALSE - the current SQL statement ended */ { int error = 0; trx_t* trx; @@ -1376,6 +1838,9 @@ innobase_rollback( trx = check_trx_exists(thd); + /* Update the info whether we should skip XA steps that eat CPU time */ + trx->support_xa = (ibool)(thd->variables.innodb_support_xa); + /* Release a possible FIFO ticket and search latch. Since we will reserve the kernel mutex, we have to release the search system latch first to obey the latching order. */ @@ -1386,15 +1851,15 @@ innobase_rollback( /* If we had reserved the auto-inc lock for some table (if we come here to roll back the latest SQL statement) we release it now before a possibly lengthy rollback */ - + row_unlock_table_autoinc_for_mysql(trx); } - if (trx_handle != (void*)&innodb_dummy_stmt_trx_handle + if (all || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { error = trx_rollback_for_mysql(trx); - thd->transaction.all.innodb_active_trans = 0; + trx->active_trans = 0; } else { error = trx_rollback_last_sql_stat_for_mysql(trx); } @@ -1403,23 +1868,54 @@ innobase_rollback( } /********************************************************************* -Rolls back a transaction to a savepoint. */ +Rolls back a transaction */ int +innobase_rollback_trx( +/*==================*/ + /* out: 0 or error number */ + trx_t* trx) /* in: transaction */ +{ + int error = 0; + + DBUG_ENTER("innobase_rollback_trx"); + DBUG_PRINT("trans", ("aborting transaction")); + + /* Release a possible FIFO ticket and search latch. Since we will + reserve the kernel mutex, we have to release the search system latch + first to obey the latching order. */ + + innobase_release_stat_resources(trx); + + if (trx->auto_inc_lock) { + /* If we had reserved the auto-inc lock for some table (if + we come here to roll back the latest SQL statement) we + release it now before a possibly lengthy rollback */ + + row_unlock_table_autoinc_for_mysql(trx); + } + + error = trx_rollback_for_mysql(trx); + + DBUG_RETURN(convert_error_code_to_mysql(error, NULL)); +} + +/********************************************************************* +Rolls back a transaction to a savepoint. */ + +static int innobase_rollback_to_savepoint( /*===========================*/ /* out: 0 if success, HA_ERR_NO_SAVEPOINT if no savepoint with the given name */ THD* thd, /* in: handle to the MySQL thread of the user whose transaction should be rolled back */ - char* savepoint_name, /* in: savepoint name */ - my_off_t* binlog_cache_pos)/* out: position which corresponds to the - savepoint in the binlog cache of this - transaction, not defined if error */ + void *savepoint) /* in: savepoint data */ { ib_longlong mysql_binlog_cache_pos; int error = 0; trx_t* trx; + char name[64]; DBUG_ENTER("innobase_rollback_to_savepoint"); @@ -1431,38 +1927,66 @@ innobase_rollback_to_savepoint( innobase_release_stat_resources(trx); - error = trx_rollback_to_savepoint_for_mysql(trx, savepoint_name, + /* TODO: use provided savepoint data area to store savepoint data */ + + longlong2str((ulint)savepoint, name, 36); + + error = (int) trx_rollback_to_savepoint_for_mysql(trx, name, &mysql_binlog_cache_pos); - *binlog_cache_pos = (my_off_t)mysql_binlog_cache_pos; + DBUG_RETURN(convert_error_code_to_mysql(error, NULL)); +} + +/********************************************************************* +Release transaction savepoint name. */ +static +int +innobase_release_savepoint( +/*=======================*/ + /* out: 0 if success, HA_ERR_NO_SAVEPOINT if + no savepoint with the given name */ + THD* thd, /* in: handle to the MySQL thread of the user + whose transaction should be rolled back */ + void* savepoint) /* in: savepoint data */ +{ + int error = 0; + trx_t* trx; + char name[64]; + + DBUG_ENTER("innobase_release_savepoint"); + + trx = check_trx_exists(thd); + + /* TODO: use provided savepoint data area to store savepoint data */ + + longlong2str((ulint)savepoint, name, 36); + + error = (int) trx_release_savepoint_for_mysql(trx, name); DBUG_RETURN(convert_error_code_to_mysql(error, NULL)); } /********************************************************************* Sets a transaction savepoint. */ - +static int innobase_savepoint( /*===============*/ /* out: always 0, that is, always succeeds */ THD* thd, /* in: handle to the MySQL thread */ - char* savepoint_name, /* in: savepoint name */ - my_off_t binlog_cache_pos)/* in: offset up to which the current - transaction has cached log entries to its - binlog cache, not defined if no transaction - active, or we are in the autocommit state, or - binlogging is not switched on */ + void* savepoint) /* in: savepoint data */ { int error = 0; trx_t* trx; DBUG_ENTER("innobase_savepoint"); - if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { - /* In the autocommit state there is no sense to set a - savepoint: we return immediate success */ - DBUG_RETURN(0); - } + /* + In the autocommit mode there is no sense to set a savepoint + (unless we are in sub-statement), so SQL layer ensures that + this method is never called in such situation. + */ + DBUG_ASSERT(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || + thd->in_sub_stmt); trx = check_trx_exists(thd); @@ -1472,40 +1996,53 @@ innobase_savepoint( innobase_release_stat_resources(trx); - /* Setting a savepoint starts a transaction inside InnoDB since - it allocates resources for it (memory to store the savepoint name, - for example) */ + /* cannot happen outside of transaction */ + DBUG_ASSERT(trx->active_trans); - thd->transaction.all.innodb_active_trans = 1; + /* TODO: use provided savepoint data area to store savepoint data */ + char name[64]; + longlong2str((ulint)savepoint,name,36); - error = trx_savepoint_for_mysql(trx, savepoint_name, - (ib_longlong)binlog_cache_pos); + error = (int) trx_savepoint_for_mysql(trx, name, (ib_longlong)0); DBUG_RETURN(convert_error_code_to_mysql(error, NULL)); } /********************************************************************* Frees a possible InnoDB trx object associated with the current THD. */ - +static int innobase_close_connection( /*======================*/ /* out: 0 or error number */ THD* thd) /* in: handle to the MySQL thread of the user - whose transaction should be rolled back */ + whose resources should be free'd */ { trx_t* trx; - trx = (trx_t*)thd->transaction.all.innobase_tid; + trx = (trx_t*)thd->ha_data[innobase_hton.slot]; - if (NULL != trx) { - innobase_rollback(thd, (void*)trx); + ut_a(trx); - trx_free_for_mysql(trx); + if (trx->active_trans == 0 + && trx->conc_state != TRX_NOT_STARTED) { - thd->transaction.all.innobase_tid = NULL; + sql_print_error("trx->active_trans == 0, but trx->conc_state != " + "TRX_NOT_STARTED"); } + + if (trx->conc_state != TRX_NOT_STARTED && + global_system_variables.log_warnings) + sql_print_warning("MySQL is closing a connection that has an active " + "InnoDB transaction. %lu row modifications will " + "roll back.", + (ulong)trx->undo_no.low); + + innobase_rollback_trx(trx); + + trx_free_for_mysql(trx); + return(0); } @@ -1515,18 +2052,41 @@ innobase_close_connection( *****************************************************************************/ /******************************************************************** +Get the record format from the data dictionary. */ +enum row_type +ha_innobase::get_row_type() const +/*=============================*/ + /* out: ROW_TYPE_REDUNDANT or ROW_TYPE_COMPACT */ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + + if (prebuilt && prebuilt->table) { + if (prebuilt->table->comp) { + return(ROW_TYPE_COMPACT); + } else { + return(ROW_TYPE_REDUNDANT); + } + } + ut_ad(0); + return(ROW_TYPE_NOT_USED); +} + +/******************************************************************** Gives the file extension of an InnoDB single-table tablespace. */ +static const char* ha_innobase_exts[] = { + ".ibd", + NullS +}; const char** ha_innobase::bas_ext() const /*========================*/ /* out: file extension string */ { - static const char* ext[] = {".ibd", NullS}; - - return(ext); + return ha_innobase_exts; } + /********************************************************************* Normalizes a table name string. A normalized name consists of the database name catenated to '/' and table name. An example: @@ -1611,7 +2171,8 @@ ha_innobase::open( fields when packed actually became 1 byte longer, when we also stored the string length as the first byte. */ - upd_and_key_val_buff_len = table->reclength + table->max_key_length + upd_and_key_val_buff_len = + table->s->reclength + table->s->max_key_length + MAX_REF_PARTS * 3; if (!(mysql_byte*) my_multi_malloc(MYF(MY_WME), &upd_buff, upd_and_key_val_buff_len, @@ -1628,48 +2189,49 @@ ha_innobase::open( norm_name, NULL); if (NULL == ib_table) { ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB error:\n" -"Cannot find table %s from the internal data dictionary\n" -"of InnoDB though the .frm file for the table exists. Maybe you\n" -"have deleted and recreated InnoDB data files but have forgotten\n" -"to delete the corresponding .frm files of InnoDB tables, or you\n" -"have moved .frm files to another database?\n" -"Look from section 15.1 of http://www.innodb.com/ibman.html\n" -"how you can resolve the problem.\n", - norm_name); + sql_print_error("Cannot find table %s from the internal data " + "dictionary\nof InnoDB though the .frm file " + "for the table exists. Maybe you\nhave " + "deleted and recreated InnoDB data files but " + "have forgotten\nto delete the corresponding " + ".frm files of InnoDB tables, or you\n" + "have moved .frm files to another database?\n" + "See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n" + "how you can resolve the problem.\n", + norm_name); free_share(share); - my_free((char*) upd_buff, MYF(0)); + my_free((gptr) upd_buff, MYF(0)); my_errno = ENOENT; - DBUG_RETURN(1); + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } if (ib_table->ibd_file_missing && !thd->tablespace_op) { ut_print_timestamp(stderr); - fprintf(stderr, " InnoDB error:\n" -"MySQL is trying to open a table handle but the .ibd file for\n" -"table %s does not exist.\n" -"Have you deleted the .ibd file from the database directory under\n" -"the MySQL datadir, or have you used DISCARD TABLESPACE?\n" -"Look from section 15.1 of http://www.innodb.com/ibman.html\n" -"how you can resolve the problem.\n", - norm_name); + sql_print_error("MySQL is trying to open a table handle but " + "the .ibd file for\ntable %s does not exist.\n" + "Have you deleted the .ibd file from the " + "database directory under\nthe MySQL datadir, " + "or have you used DISCARD TABLESPACE?\n" + "See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n" + "how you can resolve the problem.\n", + norm_name); free_share(share); - my_free((char*) upd_buff, MYF(0)); + my_free((gptr) upd_buff, MYF(0)); my_errno = ENOENT; dict_table_decrement_handle_count(ib_table); - - DBUG_RETURN(1); + DBUG_RETURN(HA_ERR_NO_SUCH_TABLE); } innobase_prebuilt = row_create_prebuilt(ib_table); - ((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len = table->reclength; + ((row_prebuilt_t*)innobase_prebuilt)->mysql_row_len = + table->s->reclength; /* Looks like MySQL-3.23 sometimes has primary key number != 0 */ - primary_key = table->primary_key; + primary_key = table->s->primary_key; key_used_on_scan = primary_key; /* Allocate a buffer for a 'row reference'. A row reference is @@ -1680,34 +2242,30 @@ ha_innobase::open( if (!row_table_got_default_clust_index(ib_table)) { if (primary_key >= MAX_KEY) { - fprintf(stderr, - "InnoDB: Error: table %s has a primary key in InnoDB\n" - "InnoDB: data dictionary, but not in MySQL!\n", name); + sql_print_error("Table %s has a primary key in InnoDB data " + "dictionary, but not in MySQL!", name); } ((row_prebuilt_t*)innobase_prebuilt) ->clust_index_was_generated = FALSE; - /* - MySQL allocates the buffer for ref. key_info->key_length - includes space for all key columns + one byte for each column - that may be NULL. ref_length must be as exact as possible to - save space, because all row reference buffers are allocated - based on ref_length. - */ - + /* MySQL allocates the buffer for ref. key_info->key_length + includes space for all key columns + one byte for each column + that may be NULL. ref_length must be as exact as possible to + save space, because all row reference buffers are allocated + based on ref_length. */ + ref_length = table->key_info[primary_key].key_length; } else { if (primary_key != MAX_KEY) { - fprintf(stderr, - "InnoDB: Error: table %s has no primary key in InnoDB\n" - "InnoDB: data dictionary, but has one in MySQL!\n" - "InnoDB: If you created the table with a MySQL\n" - "InnoDB: version < 3.23.54 and did not define a primary\n" - "InnoDB: key, but defined a unique key with all non-NULL\n" - "InnoDB: columns, then MySQL internally treats that key\n" - "InnoDB: as the primary key. You can fix this error by\n" - "InnoDB: dump + DROP + CREATE + reimport of the table.\n", - name); + sql_print_error("Table %s has no primary key in InnoDB data " + "dictionary, but has one in MySQL! If you " + "created the table with a MySQL version < " + "3.23.54 and did not define a primary key, " + "but defined a unique key with all non-NULL " + "columns, then MySQL internally treats that " + "key as the primary key. You can fix this " + "error by dump + DROP + CREATE + reimport " + "of the table.", name); } ((row_prebuilt_t*)innobase_prebuilt) @@ -1715,26 +2273,21 @@ ha_innobase::open( ref_length = DATA_ROW_ID_LEN; - /* - If we automatically created the clustered index, then - MySQL does not know about it, and MySQL must NOT be aware - of the index used on scan, to make it avoid checking if we - update the column of the index. That is why we assert below - that key_used_on_scan is the undefined value MAX_KEY. - The column is the row id in the automatical generation case, - and it will never be updated anyway. - */ - + /* If we automatically created the clustered index, then + MySQL does not know about it, and MySQL must NOT be aware + of the index used on scan, to make it avoid checking if we + update the column of the index. That is why we assert below + that key_used_on_scan is the undefined value MAX_KEY. + The column is the row id in the automatical generation case, + and it will never be updated anyway. */ + if (key_used_on_scan != MAX_KEY) { - fprintf(stderr, -"InnoDB: Warning: table %s key_used_on_scan is %lu even though there is no\n" -"InnoDB: primary key inside InnoDB.\n", - name, (ulong)key_used_on_scan); + sql_print_warning("Table %s key_used_on_scan is %lu even " + "though there is no primary key inside " + "InnoDB.", name, (ulong) key_used_on_scan); } } - auto_inc_counter_for_this_stat = 0; - block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL in query optimization */ @@ -1746,19 +2299,25 @@ ha_innobase::open( DBUG_RETURN(0); } +uint +ha_innobase::max_supported_key_part_length() const +{ + return(DICT_MAX_INDEX_COL_LEN - 1); +} + /********************************************************************** Closes a handle to an InnoDB table. */ int ha_innobase::close(void) /*====================*/ - /* out: error number */ + /* out: 0 */ { DBUG_ENTER("ha_innobase::close"); row_prebuilt_free((row_prebuilt_t*) innobase_prebuilt); - my_free((char*) upd_buff, MYF(0)); + my_free((gptr) upd_buff, MYF(0)); free_share(share); /* Tell InnoDB server that there might be work for @@ -1833,18 +2392,6 @@ set_field_in_record_to_null( record[null_offset] = record[null_offset] | field->null_bit; } -/****************************************************************** -Resets SQL NULL bits in a record to zero. */ -inline -void -reset_null_bits( -/*============*/ - TABLE* table, /* in: MySQL table object */ - char* record) /* in: a row in MySQL format */ -{ - bzero(record, table->null_bytes); -} - extern "C" { /***************************************************************** InnoDB uses this function to compare two data fields for which the data type @@ -1877,12 +2424,14 @@ innobase_mysql_cmp( switch (mysql_tp) { - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: case FIELD_TYPE_TINY_BLOB: case FIELD_TYPE_MEDIUM_BLOB: case FIELD_TYPE_BLOB: case FIELD_TYPE_LONG_BLOB: + case MYSQL_TYPE_VARCHAR: /* Use the charset number to pick the right charset struct for the comparison. Since the MySQL function get_charset may be slow before Bar removes the mutex operation there, we first @@ -1896,9 +2445,10 @@ innobase_mysql_cmp( charset = get_charset(charset_number, MYF(MY_WME)); if (charset == NULL) { - fprintf(stderr, -"InnoDB: fatal error: InnoDB needs charset %lu for doing a comparison,\n" -"InnoDB: but MySQL cannot find that charset.\n", (ulong)charset_number); + sql_print_error("InnoDB needs charset %lu for doing " + "a comparison, but MySQL cannot " + "find that charset.", + (ulong) charset_number); ut_a(0); } } @@ -1910,7 +2460,7 @@ innobase_mysql_cmp( ret = charset->coll->strnncollsp(charset, a, a_length, - b, b_length); + b, b_length, 0); if (ret < 0) { return(-1); } else if (ret > 0) { @@ -1927,7 +2477,9 @@ innobase_mysql_cmp( } /****************************************************************** -Converts a MySQL type to an InnoDB type. */ +Converts a MySQL type to an InnoDB type. Note that this function returns +the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1 +VARCHAR and the new true VARCHAR in >= 5.0.3 by the 'prtype'. */ inline ulint get_innobase_type_from_mysql_type( @@ -1972,8 +2524,9 @@ get_innobase_type_from_mysql_type( switch (field->type()) { /* NOTE that we only allow string types in DATA_MYSQL and DATA_VARMYSQL */ - case FIELD_TYPE_VAR_STRING: if (field->binary()) { - + case MYSQL_TYPE_VAR_STRING: /* old <= 4.1 VARCHAR */ + case MYSQL_TYPE_VARCHAR: /* new >= 5.0.3 true VARCHAR */ + if (field->binary()) { return(DATA_BINARY); } else if (strcmp( field->charset()->name, @@ -1982,7 +2535,8 @@ get_innobase_type_from_mysql_type( } else { return(DATA_VARMYSQL); } - case FIELD_TYPE_STRING: if (field->binary()) { + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_STRING: if (field->binary()) { return(DATA_FIXBINARY); } else if (strcmp( @@ -1992,6 +2546,8 @@ get_innobase_type_from_mysql_type( } else { return(DATA_MYSQL); } + case FIELD_TYPE_NEWDECIMAL: + return(DATA_FIXBINARY); case FIELD_TYPE_LONG: case FIELD_TYPE_LONGLONG: case FIELD_TYPE_TINY: @@ -2010,6 +2566,7 @@ get_innobase_type_from_mysql_type( return(DATA_DOUBLE); case FIELD_TYPE_DECIMAL: return(DATA_DECIMAL); + case FIELD_TYPE_GEOMETRY: case FIELD_TYPE_TINY_BLOB: case FIELD_TYPE_MEDIUM_BLOB: case FIELD_TYPE_BLOB: @@ -2039,6 +2596,19 @@ innobase_write_to_2_little_endian( } /*********************************************************************** +Reads an unsigned integer value < 64k from 2 bytes, in the little-endian +storage format. */ +inline +uint +innobase_read_from_2_little_endian( +/*===============================*/ + /* out: value */ + const mysql_byte* buf) /* in: from where to read */ +{ + return (uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1]))); +} + +/*********************************************************************** Stores a key value for a row to a buffer. */ uint @@ -2075,9 +2645,14 @@ ha_innobase::store_key_val_for_row( 3. In a column prefix field, prefix_len next bytes are reserved for data. In a normal field the max field length next bytes are reserved for data. For a VARCHAR(n) the max field length is n. If the stored - value is the SQL NULL then these data bytes are set to 0. */ + value is the SQL NULL then these data bytes are set to 0. + + 4. We always use a 2 byte length for a true >= 5.0.3 VARCHAR. Note that + in the MySQL row format, the length is stored in 1 or 2 bytes, + depending on the maximum allowed length. But in the MySQL key value + format, the length always takes 2 bytes. - /* We have to zero-fill the buffer so that MySQL is able to use a + We have to zero-fill the buffer so that MySQL is able to use a simple memcmp to compare two key values to determine if they are equal. MySQL does this to compare contents of two 'ref' values. */ @@ -2100,14 +2675,77 @@ ha_innobase::store_key_val_for_row( field = key_part->field; mysql_type = field->type(); - if (mysql_type == FIELD_TYPE_TINY_BLOB + if (mysql_type == MYSQL_TYPE_VARCHAR) { + /* >= 5.0.3 true VARCHAR */ + ulint lenlen; + ulint len; + byte* data; + ulint key_len; + ulint true_len; + CHARSET_INFO* cs; + int error=0; + + key_len = key_part->length; + + if (is_null) { + buff += key_len + 2; + + continue; + } + cs = field->charset(); + + lenlen = (ulint) + (((Field_varstring*)field)->length_bytes); + + data = row_mysql_read_true_varchar(&len, + (byte*) (record + + (ulint)get_field_offset(table, field)), + lenlen); + + true_len = len; + + /* For multi byte character sets we need to calculate + the true length of the key */ + + if (len > 0 && cs->mbmaxlen > 1) { + true_len = (ulint) cs->cset->well_formed_len(cs, + (const char *) data, + (const char *) data + len, + (uint) (key_len / + cs->mbmaxlen), + &error); + } + + /* In a column prefix index, we may need to truncate + the stored value: */ + + if (true_len > key_len) { + true_len = key_len; + } + + /* The length in a key value is always stored in 2 + bytes */ + + row_mysql_store_true_var_len((byte*)buff, true_len, 2); + buff += 2; + + memcpy(buff, data, true_len); + + /* Note that we always reserve the maximum possible + length of the true VARCHAR in the key value, though + only len first bytes after the 2 length bytes contain + actual data. The rest of the space was reset to zero + in the bzero() call above. */ + + buff += key_len; + + } else if (mysql_type == FIELD_TYPE_TINY_BLOB || mysql_type == FIELD_TYPE_MEDIUM_BLOB || mysql_type == FIELD_TYPE_BLOB || mysql_type == FIELD_TYPE_LONG_BLOB) { CHARSET_INFO* cs; ulint key_len; - ulint len; ulint true_len; int error=0; ulint blob_len; @@ -2118,32 +2756,33 @@ ha_innobase::store_key_val_for_row( key_len = key_part->length; if (is_null) { - buff += key_len + 2; - - continue; + buff += key_len + 2; + + continue; } cs = field->charset(); - + blob_data = row_mysql_read_blob_ref(&blob_len, (byte*) (record + (ulint)get_field_offset(table, field)), (ulint) field->pack_length()); + true_len = blob_len; + ut_a(get_field_offset(table, field) == key_part->offset); - true_len = blob_len; - /* For multi byte character sets we need to calculate the true length of the key */ - - if (key_len > 0 && cs->mbmaxlen > 1) { + + if (blob_len > 0 && cs->mbmaxlen > 1) { true_len = (ulint) cs->cset->well_formed_len(cs, (const char *) blob_data, - (const char *) blob_data + (const char *) blob_data + blob_len, - key_len / cs->mbmaxlen, + (uint) (key_len / + cs->mbmaxlen), &error); } @@ -2185,7 +2824,7 @@ ha_innobase::store_key_val_for_row( if (is_null) { buff += key_len; - + continue; } @@ -2198,24 +2837,25 @@ ha_innobase::store_key_val_for_row( type is not enum or set. For these fields check if character set is multi byte. */ - if (real_type != FIELD_TYPE_ENUM + if (real_type != FIELD_TYPE_ENUM && real_type != FIELD_TYPE_SET && ( mysql_type == MYSQL_TYPE_VAR_STRING || mysql_type == MYSQL_TYPE_STRING)) { cs = field->charset(); - /* For multi byte character sets we need to + /* For multi byte character sets we need to calculate the true length of the key */ if (key_len > 0 && cs->mbmaxlen > 1) { - true_len = (ulint) + true_len = (ulint) cs->cset->well_formed_len(cs, (const char *)src_start, - (const char *)src_start + (const char *)src_start + key_len, - key_len / cs->mbmaxlen, + (uint) (key_len / + cs->mbmaxlen), &error); } } @@ -2223,9 +2863,9 @@ ha_innobase::store_key_val_for_row( memcpy(buff, src_start, true_len); buff += true_len; - /* Pad the unused space with spaces. Note that no - padding is ever needed for UCS-2 because in MySQL, - all UCS2 characters are 2 bytes, as MySQL does not + /* Pad the unused space with spaces. Note that no + padding is ever needed for UCS-2 because in MySQL, + all UCS2 characters are 2 bytes, as MySQL does not support surrogate pairs, which are needed to represent characters in the range U+10000 to U+10FFFF. */ @@ -2266,6 +2906,8 @@ build_template( ibool fetch_all_in_key = FALSE; ibool fetch_primary_key_cols = FALSE; ulint i; + /* byte offset of the end of last requested column */ + ulint mysql_prefix_len = 0; if (prebuilt->select_lock_type == LOCK_X) { /* We always retrieve the whole clustered index record if we @@ -2322,7 +2964,7 @@ build_template( the clustered index */ } - n_fields = (ulint)table->fields; /* number of columns */ + n_fields = (ulint)table->s->fields; /* number of columns */ if (!prebuilt->mysql_template) { prebuilt->mysql_template = (mysql_row_templ_t*) @@ -2331,7 +2973,7 @@ build_template( } prebuilt->template_type = templ_type; - prebuilt->null_bitmap_len = table->null_bytes; + prebuilt->null_bitmap_len = table->s->null_bytes; prebuilt->templ_contains_blob = FALSE; @@ -2341,18 +2983,44 @@ build_template( templ = prebuilt->mysql_template + n_requested_fields; field = table->field[i]; - if (templ_type == ROW_MYSQL_REC_FIELDS - && !(fetch_all_in_key - && dict_index_contains_col_or_prefix(index, i)) - && !(fetch_primary_key_cols - && dict_table_col_in_clustered_key(index->table, i)) - && thd->query_id != field->query_id) { + if (UNIV_LIKELY(templ_type == ROW_MYSQL_REC_FIELDS)) { + /* Decide which columns we should fetch + and which we can skip. */ + register const ibool index_contains_field = + dict_index_contains_col_or_prefix(index, i); + + if (!index_contains_field && prebuilt->read_just_key) { + /* If this is a 'key read', we do not need + columns that are not in the key */ + + goto skip_field; + } + + if (index_contains_field && fetch_all_in_key) { + /* This field is needed in the query */ + + goto include_field; + } + + if (thd->query_id == field->query_id) { + /* This field is needed in the query */ + + goto include_field; + } + + if (fetch_primary_key_cols + && dict_table_col_in_clustered_key(index->table, + i)) { + /* This field is needed in the query */ + + goto include_field; + } /* This field is not needed in the query, skip it */ goto skip_field; } - +include_field: n_requested_fields++; templ->col_no = i; @@ -2383,12 +3051,25 @@ build_template( get_field_offset(table, field); templ->mysql_col_len = (ulint) field->pack_length(); + if (mysql_prefix_len < templ->mysql_col_offset + + templ->mysql_col_len) { + mysql_prefix_len = templ->mysql_col_offset + + templ->mysql_col_len; + } templ->type = index->table->cols[i].type.mtype; - templ->is_unsigned = index->table->cols[i].type.prtype - & DATA_UNSIGNED; + templ->mysql_type = (ulint)field->type(); + + if (templ->mysql_type == DATA_MYSQL_TRUE_VARCHAR) { + templ->mysql_length_bytes = (ulint) + (((Field_varstring*)field)->length_bytes); + } + templ->charset = dtype_get_charset_coll_noninline( index->table->cols[i].type.prtype); - + templ->mbminlen = index->table->cols[i].type.mbminlen; + templ->mbmaxlen = index->table->cols[i].type.mbmaxlen; + templ->is_unsigned = index->table->cols[i].type.prtype + & DATA_UNSIGNED; if (templ->type == DATA_BLOB) { prebuilt->templ_contains_blob = TRUE; } @@ -2397,6 +3078,7 @@ skip_field: } prebuilt->n_template = n_requested_fields; + prebuilt->mysql_prefix_len = mysql_prefix_len; if (index != clust_index && prebuilt->need_to_access_clustered) { /* Change rec_field_no's to correspond to the clustered index @@ -2424,31 +3106,31 @@ ha_innobase::write_row( int error; longlong auto_inc; longlong dummy; - ibool incremented_auto_inc_for_stat = FALSE; - ibool incremented_auto_inc_counter = FALSE; - ibool skip_auto_inc_decr; + ibool auto_inc_used= FALSE; DBUG_ENTER("ha_innobase::write_row"); if (prebuilt->trx != - (trx_t*) current_thd->transaction.all.innobase_tid) { - fprintf(stderr, -"InnoDB: Error: the transaction object for the table handle is at\n" -"InnoDB: %p, but for the current thread it is at %p\n", - prebuilt->trx, - current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]) { + sql_print_error("The transaction object for the table handle is at " + "%p, but for the current thread it is at %p", + prebuilt->trx, + (trx_t*) current_thd->ha_data[innobase_hton.slot]); + fputs("InnoDB: Dump of 200 bytes around prebuilt: ", stderr); ut_print_buf(stderr, ((const byte*)prebuilt) - 100, 200); fputs("\n" "InnoDB: Dump of 200 bytes around transaction.all: ", stderr); ut_print_buf(stderr, - ((byte*)(&(current_thd->transaction.all))) - 100, 200); + ((byte*)(&(current_thd->ha_data[innobase_hton.slot]))) - 100, + 200); putc('\n', stderr); ut_error; } - statistic_increment(ha_write_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_write_count, + &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -2468,7 +3150,7 @@ ha_innobase::write_row( being blocked by a MySQL table lock TL_WRITE_ALLOW_READ. */ dict_table_t* src_table; - ibool mode; + ulint mode; num_write_row = 0; @@ -2480,7 +3162,7 @@ ha_innobase::write_row( src_table = lock_get_src_table( prebuilt->trx, prebuilt->table, &mode); if (!src_table) { - no_commit: +no_commit: /* Unknown situation: do not commit */ /* ut_print_timestamp(stderr); @@ -2495,14 +3177,15 @@ ha_innobase::write_row( no need to re-acquire locks on it. */ /* Altering to InnoDB format */ - innobase_commit(user_thd, prebuilt->trx); + innobase_commit(user_thd, 1); /* Note that this transaction is still active. */ - user_thd->transaction.all.innodb_active_trans = 1; + prebuilt->trx->active_trans = 1; /* We will need an IX lock on the destination table. */ prebuilt->sql_stat_start = TRUE; } else { /* Ensure that there are no other table locks than LOCK_IX and LOCK_AUTO_INC on the destination table. */ + if (!lock_is_table_exclusive(prebuilt->table, prebuilt->trx)) { goto no_commit; @@ -2510,9 +3193,9 @@ ha_innobase::write_row( /* Commit the transaction. This will release the table locks, so they have to be acquired again. */ - innobase_commit(user_thd, prebuilt->trx); + innobase_commit(user_thd, 1); /* Note that this transaction is still active. */ - user_thd->transaction.all.innodb_active_trans = 1; + prebuilt->trx->active_trans = 1; /* Re-acquire the table lock on the source table. */ row_lock_table_for_mysql(prebuilt, src_table, mode); /* We will need an IX lock on the destination table. */ @@ -2557,99 +3240,30 @@ ha_innobase::write_row( prebuilt->sql_stat_start = TRUE; } - /* Fetch the value the user possibly has set in the - autoincrement field */ + /* We have to use the transactional lock mechanism on the + auto-inc counter of the table to ensure that replication and + roll-forward of the binlog exactly imitates also the given + auto-inc values. The lock is released at each SQL statement's + end. This lock also prevents a race where two threads would + call ::get_auto_increment() simultaneously. */ - auto_inc = table->next_number_field->val_int(); + error = row_lock_table_autoinc_for_mysql(prebuilt); - /* In replication and also otherwise the auto-inc column - can be set with SET INSERT_ID. Then we must look at - user_thd->next_insert_id. If it is nonzero and the user - has not supplied a value, we must use it, and use values - incremented by 1 in all subsequent inserts within the - same SQL statement! */ + if (error != DB_SUCCESS) { + /* Deadlock or lock wait timeout */ - if (auto_inc == 0 && user_thd->next_insert_id != 0) { + error = convert_error_code_to_mysql(error, user_thd); - auto_inc_counter_for_this_stat - = user_thd->next_insert_id; + goto func_exit; } - if (auto_inc == 0 && auto_inc_counter_for_this_stat) { - /* The user set the auto-inc counter for - this SQL statement with SET INSERT_ID. We must - assign sequential values from the counter. */ + /* We must use the handler code to update the auto-increment + value to be sure that we increment it correctly. */ - auto_inc = auto_inc_counter_for_this_stat; + if ((error= update_auto_increment())) + goto func_exit; + auto_inc_used = 1; - /* We give MySQL a new value to place in the - auto-inc column */ - user_thd->next_insert_id = auto_inc; - - auto_inc_counter_for_this_stat++; - incremented_auto_inc_for_stat = TRUE; - } - - if (auto_inc != 0) { - /* This call will calculate the max of the current - value and the value supplied by the user and - update the counter accordingly */ - - /* We have to use the transactional lock mechanism - on the auto-inc counter of the table to ensure - that replication and roll-forward of the binlog - exactly imitates also the given auto-inc values. - The lock is released at each SQL statement's - end. */ - - innodb_srv_conc_enter_innodb(prebuilt->trx); - error = row_lock_table_autoinc_for_mysql(prebuilt); - innodb_srv_conc_exit_innodb(prebuilt->trx); - - if (error != DB_SUCCESS) { - - error = convert_error_code_to_mysql(error, - user_thd); - goto func_exit; - } - - dict_table_autoinc_update(prebuilt->table, auto_inc); - } else { - innodb_srv_conc_enter_innodb(prebuilt->trx); - - if (!prebuilt->trx->auto_inc_lock) { - - error = row_lock_table_autoinc_for_mysql( - prebuilt); - if (error != DB_SUCCESS) { - innodb_srv_conc_exit_innodb( - prebuilt->trx); - - error = convert_error_code_to_mysql( - error, user_thd); - goto func_exit; - } - } - - /* The following call gets the value of the auto-inc - counter of the table and increments it by 1 */ - - auto_inc = dict_table_autoinc_get(prebuilt->table); - incremented_auto_inc_counter = TRUE; - - innodb_srv_conc_exit_innodb(prebuilt->trx); - - /* We can give the new value for MySQL to place in - the field */ - - user_thd->next_insert_id = auto_inc; - } - - /* This call of a handler.cc function places - user_thd->next_insert_id to the column value, if the column - value was not set by the user */ - - update_auto_increment(); } if (prebuilt->mysql_template == NULL @@ -2664,122 +3278,47 @@ ha_innobase::write_row( error = row_insert_for_mysql((byte*) record, prebuilt); - innodb_srv_conc_exit_innodb(prebuilt->trx); + if (error == DB_SUCCESS && auto_inc_used) { - if (error != DB_SUCCESS) { - /* If the insert did not succeed we restore the value of - the auto-inc counter we used; note that this behavior was - introduced only in version 4.0.4. - NOTE that a REPLACE command and LOAD DATA INFILE REPLACE - handles a duplicate key error - itself, and we must not decrement the autoinc counter - if we are performing those statements. - NOTE 2: if there was an error, for example a deadlock, - which caused InnoDB to roll back the whole transaction - already in the call of row_insert_for_mysql(), we may no - longer have the AUTO-INC lock, and cannot decrement - the counter here. */ - - skip_auto_inc_decr = FALSE; - - if (error == DB_DUPLICATE_KEY - && (user_thd->lex->sql_command == SQLCOM_REPLACE - || user_thd->lex->sql_command - == SQLCOM_REPLACE_SELECT - || (user_thd->lex->sql_command == SQLCOM_LOAD - && user_thd->lex->duplicates == DUP_REPLACE))) { - - skip_auto_inc_decr= TRUE; - } + /* Fetch the value that was set in the autoincrement field */ - if (!skip_auto_inc_decr && incremented_auto_inc_counter - && prebuilt->trx->auto_inc_lock) { - dict_table_autoinc_decrement(prebuilt->table); - } + auto_inc = table->next_number_field->val_int(); - if (!skip_auto_inc_decr && incremented_auto_inc_for_stat - && prebuilt->trx->auto_inc_lock) { - auto_inc_counter_for_this_stat--; - } - } - - error = convert_error_code_to_mysql(error, user_thd); + if (auto_inc != 0) { + /* This call will update the counter according to the + value that was inserted in the table */ - /* Tell InnoDB server that there might be work for - utility threads: */ -func_exit: - innobase_active_small(); + dict_table_autoinc_update(prebuilt->table, auto_inc); + } + } - DBUG_RETURN(error); -} + /* A REPLACE command and LOAD DATA INFILE REPLACE handle a duplicate + key error themselves, and we must update the autoinc counter if we are + performing those statements. */ -/****************************************************************** -Converts field data for storage in an InnoDB update vector. */ -inline -mysql_byte* -innobase_convert_and_store_changed_col( -/*===================================*/ - /* out: pointer to the end of the converted - data in the buffer */ - upd_field_t* ufield, /* in/out: field in the update vector */ - mysql_byte* buf, /* in: buffer we can use in conversion */ - mysql_byte* data, /* in: column data to store */ - ulint len, /* in: data len */ - ulint col_type,/* in: data type in InnoDB type numbers */ - ulint prtype) /* InnoDB precise data type and flags */ -{ - uint i; - - if (len == UNIV_SQL_NULL) { - data = NULL; - } else if (col_type == DATA_VARCHAR || col_type == DATA_BINARY - || col_type == DATA_VARMYSQL) { - /* Remove trailing spaces. */ - - /* Handle UCS2 strings differently. As no new - collations will be introduced in 4.1, we hardcode the - charset-collation codes here. In 5.0, the logic will - be based on mbminlen. */ - ulint cset = dtype_get_charset_coll_noninline(prtype); - if (cset == 35/*ucs2_general_ci*/ - || cset == 90/*ucs2_bin*/ - || (cset >= 128/*ucs2_unicode_ci*/ - && cset <= 144/*ucs2_persian_ci*/)) { - /* space=0x0020 */ - /* Trim "half-chars", just in case. */ - len = len - (len % 2); /* len &= ~1; */ - - while (len && data[len - 2] == 0x00 - && data[len - 1] == 0x20) { - len -= 2; - } - } else { - /* space=0x20 */ - while (len && data[len - 1] == 0x20) { - len--; - } - } - } else if (col_type == DATA_INT) { - /* Store integer data in InnoDB in a big-endian - format, sign bit negated, if signed */ + if (error == DB_DUPLICATE_KEY && auto_inc_used + && (user_thd->lex->sql_command == SQLCOM_REPLACE + || user_thd->lex->sql_command == SQLCOM_REPLACE_SELECT + || (user_thd->lex->sql_command == SQLCOM_LOAD + && user_thd->lex->duplicates == DUP_REPLACE))) { - for (i = 0; i < len; i++) { - buf[len - 1 - i] = data[i]; - } + auto_inc = table->next_number_field->val_int(); - if (!(prtype & DATA_UNSIGNED)) { - buf[0] = buf[0] ^ 128; - } + if (auto_inc != 0) { + dict_table_autoinc_update(prebuilt->table, auto_inc); + } + } - data = buf; + innodb_srv_conc_exit_innodb(prebuilt->trx); - buf += len; - } + error = convert_error_code_to_mysql(error, user_thd); - ufield->new_val.data = data; - ufield->new_val.len = len; + /* Tell InnoDB server that there might be work for + utility threads: */ +func_exit: + innobase_active_small(); - return(buf); + DBUG_RETURN(error); } /************************************************************************** @@ -2802,19 +3341,22 @@ calc_row_difference( { mysql_byte* original_upd_buff = upd_buff; Field* field; + enum_field_types field_mysql_type; uint n_fields; ulint o_len; ulint n_len; + ulint col_pack_len; + byte* new_mysql_row_col; byte* o_ptr; byte* n_ptr; byte* buf; upd_field_t* ufield; ulint col_type; - ulint prtype; ulint n_changed = 0; + dfield_t dfield; uint i; - n_fields = table->fields; + n_fields = table->s->fields; /* We use upd_buff to convert changed fields */ buf = (byte*) upd_buff; @@ -2831,24 +3373,50 @@ calc_row_difference( o_ptr = (byte*) old_row + get_field_offset(table, field); n_ptr = (byte*) new_row + get_field_offset(table, field); - o_len = field->pack_length(); - n_len = field->pack_length(); + + /* Use new_mysql_row_col and col_pack_len save the values */ + + new_mysql_row_col = n_ptr; + col_pack_len = field->pack_length(); + + o_len = col_pack_len; + n_len = col_pack_len; + + /* We use o_ptr and n_ptr to dig up the actual data for + comparison. */ + + field_mysql_type = field->type(); col_type = prebuilt->table->cols[i].type.mtype; - prtype = prebuilt->table->cols[i].type.prtype; + switch (col_type) { case DATA_BLOB: o_ptr = row_mysql_read_blob_ref(&o_len, o_ptr, o_len); n_ptr = row_mysql_read_blob_ref(&n_len, n_ptr, n_len); + break; + case DATA_VARCHAR: case DATA_BINARY: case DATA_VARMYSQL: - o_ptr = row_mysql_read_var_ref_noninline(&o_len, - o_ptr); - n_ptr = row_mysql_read_var_ref_noninline(&n_len, - n_ptr); + if (field_mysql_type == MYSQL_TYPE_VARCHAR) { + /* This is a >= 5.0.3 type true VARCHAR where + the real payload data length is stored in + 1 or 2 bytes */ + + o_ptr = row_mysql_read_true_varchar( + &o_len, o_ptr, + (ulint) + (((Field_varstring*)field)->length_bytes)); + + n_ptr = row_mysql_read_true_varchar( + &n_len, n_ptr, + (ulint) + (((Field_varstring*)field)->length_bytes)); + } + + break; default: ; } @@ -2871,11 +3439,26 @@ calc_row_difference( ufield = uvect->fields + n_changed; - buf = (byte*) - innobase_convert_and_store_changed_col(ufield, - (mysql_byte*)buf, - (mysql_byte*)n_ptr, n_len, col_type, - prtype); + /* Let us use a dummy dfield to make the conversion + from the MySQL column format to the InnoDB format */ + + dfield.type = (prebuilt->table->cols + i)->type; + + if (n_len != UNIV_SQL_NULL) { + buf = row_mysql_store_col_in_innobase_format( + &dfield, + (byte*)buf, + TRUE, + new_mysql_row_col, + col_pack_len, + prebuilt->table->comp); + ufield->new_val.data = dfield.data; + ufield->new_val.len = dfield.len; + } else { + ufield->new_val.data = NULL; + ufield->new_val.len = UNIV_SQL_NULL; + } + ufield->exp = NULL; ufield->field_no = prebuilt->table->cols[i].clust_pos; n_changed++; @@ -2912,7 +3495,7 @@ ha_innobase::update_row( DBUG_ENTER("ha_innobase::update_row"); ut_ad(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -2973,7 +3556,7 @@ ha_innobase::delete_row( DBUG_ENTER("ha_innobase::delete_row"); ut_ad(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -3006,6 +3589,42 @@ ha_innobase::delete_row( DBUG_RETURN(error); } +/************************************************************************** +Removes a new lock set on a row. This method does nothing unless the +option innodb_locks_unsafe_for_binlog is set.*/ + +void +ha_innobase::unlock_row(void) +/*=========================*/ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + + DBUG_ENTER("ha_innobase::unlock_row"); + + if (last_query_id != user_thd->query_id) { + ut_print_timestamp(stderr); + sql_print_error("last_query_id is %lu != user_thd_query_id is " + "%lu", (ulong) last_query_id, + (ulong) user_thd->query_id); + mem_analyze_corruption((byte *) prebuilt->trx); + ut_error; + } + + /* Consistent read does not take any locks, thus there is + nothing to unlock. */ + + if (prebuilt->select_lock_type == LOCK_NONE) { + DBUG_VOID_RETURN; + } + + if (srv_locks_unsafe_for_binlog) { + row_unlock_for_mysql(prebuilt, FALSE); + } + + DBUG_VOID_RETURN; + +} + /********************************************************************** Initializes a handle to use an index. */ @@ -3157,9 +3776,10 @@ ha_innobase::index_read( DBUG_ENTER("index_read"); ut_ad(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, + &LOCK_status); if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -3207,7 +3827,7 @@ ha_innobase::index_read( match_mode = ROW_SEL_EXACT_PREFIX; } - last_match_mode = match_mode; + last_match_mode = (uint) match_mode; innodb_srv_conc_enter_innodb(prebuilt->trx); @@ -3227,7 +3847,7 @@ ha_innobase::index_read( error = HA_ERR_KEY_NOT_FOUND; table->status = STATUS_NOT_FOUND; } else { - error = convert_error_code_to_mysql(ret, user_thd); + error = convert_error_code_to_mysql((int) ret, user_thd); table->status = STATUS_NOT_FOUND; } @@ -3265,16 +3885,17 @@ ha_innobase::change_active_index( { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; KEY* key=0; - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, + &LOCK_status); DBUG_ENTER("change_active_index"); ut_ad(user_thd == current_thd); ut_ad(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); active_index = keynr; - if (keynr != MAX_KEY && table->keys > 0) { + if (keynr != MAX_KEY && table->s->keys > 0) { key = table->key_info + active_index; prebuilt->index = dict_table_get_index_noninline( @@ -3286,9 +3907,10 @@ ha_innobase::change_active_index( } if (!prebuilt->index) { - sql_print_error( -"Innodb could not find key n:o %u with name %s from dict cache for table %s", - keynr, key ? key->name : "NULL", prebuilt->table->name); + sql_print_error("Innodb could not find key n:o %u with name %s " + "from dict cache for table %s", + keynr, key ? key->name : "NULL", + prebuilt->table->name); DBUG_RETURN(1); } @@ -3358,7 +3980,7 @@ ha_innobase::general_fetch( DBUG_ENTER("general_fetch"); ut_ad(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); innodb_srv_conc_enter_innodb(prebuilt->trx); @@ -3378,7 +4000,7 @@ ha_innobase::general_fetch( error = HA_ERR_END_OF_FILE; table->status = STATUS_NOT_FOUND; } else { - error = convert_error_code_to_mysql(ret, user_thd); + error = convert_error_code_to_mysql((int) ret, user_thd); table->status = STATUS_NOT_FOUND; } @@ -3397,7 +4019,8 @@ ha_innobase::index_next( mysql_byte* buf) /* in/out: buffer for next row in MySQL format */ { - statistic_increment(ha_read_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); return(general_fetch(buf, ROW_SEL_NEXT, 0)); } @@ -3414,7 +4037,8 @@ ha_innobase::index_next_same( const mysql_byte* key, /* in: key value */ uint keylen) /* in: key value length */ { - statistic_increment(ha_read_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); return(general_fetch(buf, ROW_SEL_NEXT, last_match_mode)); } @@ -3431,7 +4055,8 @@ ha_innobase::index_prev( mysql_byte* buf) /* in/out: buffer for previous row in MySQL format */ { - statistic_increment(ha_read_prev_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_prev_count, + &LOCK_status); return(general_fetch(buf, ROW_SEL_PREV, 0)); } @@ -3450,7 +4075,8 @@ ha_innobase::index_first( int error; DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY); @@ -3476,7 +4102,8 @@ ha_innobase::index_last( int error; DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count, + &LOCK_status); error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY); @@ -3541,7 +4168,8 @@ ha_innobase::rnd_next( int error; DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); if (start_of_scan) { error = index_first(buf); @@ -3577,10 +4205,11 @@ ha_innobase::rnd_pos( DBUG_ENTER("rnd_pos"); DBUG_DUMP("key", (char*) pos, ref_length); - statistic_increment(ha_read_rnd_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, + &LOCK_status); ut_ad(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -3594,7 +4223,7 @@ ha_innobase::rnd_pos( } if (error) { - DBUG_PRINT("error",("Got error: %ld",error)); + DBUG_PRINT("error", ("Got error: %d", error)); DBUG_RETURN(error); } @@ -3602,10 +4231,11 @@ ha_innobase::rnd_pos( for the table, and it is == ref_length */ error = index_read(buf, pos, ref_length, HA_READ_KEY_EXACT); - if (error) - { - DBUG_PRINT("error",("Got error: %ld",error)); + + if (error) { + DBUG_PRINT("error", ("Got error: %d", error)); } + change_active_index(keynr); DBUG_RETURN(error); @@ -3629,7 +4259,7 @@ ha_innobase::position( uint len; ut_ad(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); if (prebuilt->clust_index_was_generated) { /* No primary key was defined for the table and we @@ -3645,14 +4275,12 @@ ha_innobase::position( ref_length, record); } - /* Since we do not store len to the buffer 'ref', we must assume - that len is always fixed for this table. The following assertion - checks this. */ - + /* We assume that the 'ref' value len is always fixed for the same + table. */ + if (len != ref_length) { - fprintf(stderr, - "InnoDB: Error: stored ref len is %lu, but table ref len is %lu\n", - (ulong)len, (ulong)ref_length); + sql_print_error("Stored ref len is %lu, but table ref len is %lu", + (ulong) len, (ulong) ref_length); } } @@ -3666,7 +4294,7 @@ create_table_def( TABLE* form, /* in: information on table columns and indexes */ const char* table_name, /* in: table name */ - const char* path_of_temp_table)/* in: if this is a table explicitly + const char* path_of_temp_table,/* in: if this is a table explicitly created by the user with the TEMPORARY keyword, then this parameter is the dir path where the @@ -3674,27 +4302,30 @@ create_table_def( an .ibd file for it (no .ibd extension in the path, though); otherwise this is NULL */ + ibool comp) /* in: TRUE=compact record format */ { Field* field; dict_table_t* table; ulint n_cols; int error; ulint col_type; + ulint col_len; ulint nulls_allowed; ulint unsigned_type; ulint binary_type; + ulint long_true_varchar; ulint charset_no; ulint i; DBUG_ENTER("create_table_def"); DBUG_PRINT("enter", ("table_name: %s", table_name)); - n_cols = form->fields; + n_cols = form->s->fields; /* We pass 0 as the space id, and determine at a lower level the space id where to store the table */ - table = dict_mem_table_create((char*) table_name, 0, n_cols); + table = dict_mem_table_create(table_name, 0, n_cols, comp); if (path_of_temp_table) { table->dir_path_of_temp_table = @@ -3705,7 +4336,7 @@ create_table_def( field = form->field[i]; col_type = get_innobase_type_from_mysql_type(&unsigned_type, - field); + field); if (field->null_ptr) { nulls_allowed = 0; } else { @@ -3718,23 +4349,46 @@ create_table_def( binary_type = 0; } - charset_no = 0; + charset_no = 0; if (dtype_is_string_type(col_type)) { charset_no = (ulint)field->charset()->number; - ut_a(charset_no < 256); /* in ut0type.h we assume that - the number fits in one byte */ + ut_a(charset_no < 256); /* in data0type.h we assume + that the number fits in one + byte */ + } + + ut_a(field->type() < 256); /* we assume in dtype_form_prtype() + that this fits in one byte */ + col_len = field->pack_length(); + + /* The MySQL pack length contains 1 or 2 bytes length field + for a true VARCHAR. Let us subtract that, so that the InnoDB + column length in the InnoDB data dictionary is the real + maximum byte length of the actual data. */ + + long_true_varchar = 0; + + if (field->type() == MYSQL_TYPE_VARCHAR) { + col_len -= ((Field_varstring*)field)->length_bytes; + + if (((Field_varstring*)field)->length_bytes == 2) { + long_true_varchar = DATA_LONG_TRUE_VARCHAR; + } } - dict_mem_table_add_col(table, (char*) field->field_name, - col_type, dtype_form_prtype( - (ulint)field->type() - | nulls_allowed | unsigned_type - | binary_type, - + charset_no), - field->pack_length(), 0); + dict_mem_table_add_col(table, + (char*) field->field_name, + col_type, + dtype_form_prtype( + (ulint)field->type() + | nulls_allowed | unsigned_type + | binary_type | long_true_varchar, + charset_no), + col_len, + 0); } error = row_create_table_for_mysql(table, trx); @@ -3768,6 +4422,7 @@ create_index( ulint is_unsigned; ulint i; ulint j; + ulint* field_lengths; DBUG_ENTER("create_index"); @@ -3777,7 +4432,7 @@ create_index( ind_type = 0; - if (key_num == form->primary_key) { + if (key_num == form->s->primary_key) { ind_type = ind_type | DICT_CLUSTERED; } @@ -3790,6 +4445,10 @@ create_index( index = dict_mem_index_create((char*) table_name, key->name, 0, ind_type, n_fields); + + field_lengths = (ulint*) my_malloc(sizeof(ulint) * n_fields, + MYF(MY_FAE)); + for (i = 0; i < n_fields; i++) { key_part = key->key_part + i; @@ -3798,9 +4457,9 @@ create_index( bytes of the column to the index field.) The flag does not seem to be properly set by MySQL. Let us fall back on testing the length of the key part versus the column. */ - + field = NULL; - for (j = 0; j < form->fields; j++) { + for (j = 0; j < form->s->fields; j++) { field = form->field[j]; @@ -3813,13 +4472,17 @@ create_index( } } - ut_a(j < form->fields); + ut_a(j < form->s->fields); col_type = get_innobase_type_from_mysql_type( &is_unsigned, key_part->field); if (DATA_BLOB == col_type - || key_part->length < field->pack_length()) { + || (key_part->length < field->pack_length() + && field->type() != MYSQL_TYPE_VARCHAR) + || (field->type() == MYSQL_TYPE_VARCHAR + && key_part->length < field->pack_length() + - ((Field_varstring*)field)->length_bytes)) { prefix_len = key_part->length; @@ -3827,17 +4490,21 @@ create_index( || col_type == DATA_FLOAT || col_type == DATA_DOUBLE || col_type == DATA_DECIMAL) { - fprintf(stderr, -"InnoDB: error: MySQL is trying to create a column prefix index field\n" -"InnoDB: on an inappropriate data type. Table name %s, column name %s.\n", - table_name, key_part->field->field_name); - + sql_print_error("MySQL is trying to create a column " + "prefix index field, on an " + "inappropriate data type. Table " + "name %s, column name %s.", + table_name, + key_part->field->field_name); + prefix_len = 0; } } else { prefix_len = 0; } + field_lengths[i] = key_part->length; + /* We assume all fields should be sorted in ascending order, hence the '0': */ @@ -3846,10 +4513,15 @@ create_index( 0, prefix_len); } - error = row_create_index_for_mysql(index, trx); + /* Even though we've defined max_supported_key_part_length, we + still do our own checking using field_lengths to be absolutely + sure we don't create too long indexes. */ + error = row_create_index_for_mysql(index, trx, field_lengths); error = convert_error_code_to_mysql(error, NULL); + my_free((gptr) field_lengths, MYF(0)); + DBUG_RETURN(error); } @@ -3872,7 +4544,7 @@ create_clustered_index_when_no_primary( index = dict_mem_index_create((char*) table_name, (char*) "GEN_CLUST_INDEX", 0, DICT_CLUSTERED, 0); - error = row_create_index_for_mysql(index, trx); + error = row_create_index_for_mysql(index, trx, NULL); error = convert_error_code_to_mysql(error, NULL); @@ -3908,25 +4580,25 @@ ha_innobase::create( DBUG_ASSERT(thd != NULL); - if (form->fields > 1000) { + if (form->s->fields > 1000) { /* The limit probably should be REC_MAX_N_FIELDS - 3 = 1020, but we play safe here */ DBUG_RETURN(HA_ERR_TO_BIG_ROW); - } + } /* Get the transaction associated with the current thd, or create one if not yet created */ - + parent_trx = check_trx_exists(current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ - trx_search_latch_release_if_reserved(parent_trx); - + trx_search_latch_release_if_reserved(parent_trx); + trx = trx_allocate_for_mysql(); - + trx->mysql_thd = thd; trx->mysql_query_str = &((*thd).query); @@ -3956,27 +4628,18 @@ ha_innobase::create( /* Create the table definition in InnoDB */ - if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { - - error = create_table_def(trx, form, norm_name, name2); - } else { - error = create_table_def(trx, form, norm_name, NULL); - } + error = create_table_def(trx, form, norm_name, + create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL, + form->s->row_type != ROW_TYPE_REDUNDANT); if (error) { - innobase_commit_low(trx); - - row_mysql_unlock_data_dictionary(trx); - - trx_free_for_mysql(trx); - - DBUG_RETURN(error); + goto cleanup; } /* Look for a primary key */ - primary_key_no= (table->primary_key != MAX_KEY ? - (int) table->primary_key : + primary_key_no= (table->s->primary_key != MAX_KEY ? + (int) table->s->primary_key : -1); /* Our function row_get_mysql_key_number_for_index assumes @@ -3986,7 +4649,7 @@ ha_innobase::create( /* Create the keys */ - if (form->keys == 0 || primary_key_no == -1) { + if (form->s->keys == 0 || primary_key_no == -1) { /* Create an index which is used as the clustered index; order the rows by their row id which is internally generated by InnoDB */ @@ -3994,13 +4657,7 @@ ha_innobase::create( error = create_clustered_index_when_no_primary(trx, norm_name); if (error) { - innobase_commit_low(trx); - - row_mysql_unlock_data_dictionary(trx); - - trx_free_for_mysql(trx); - - DBUG_RETURN(error); + goto cleanup; } } @@ -4009,29 +4666,16 @@ ha_innobase::create( first */ if ((error = create_index(trx, form, norm_name, (uint) primary_key_no))) { - innobase_commit_low(trx); - - row_mysql_unlock_data_dictionary(trx); - - trx_free_for_mysql(trx); - - DBUG_RETURN(error); + goto cleanup; } } - for (i = 0; i < form->keys; i++) { + for (i = 0; i < form->s->keys; i++) { if (i != (uint) primary_key_no) { if ((error = create_index(trx, form, norm_name, i))) { - - innobase_commit_low(trx); - - row_mysql_unlock_data_dictionary(trx); - - trx_free_for_mysql(trx); - - DBUG_RETURN(error); + goto cleanup; } } } @@ -4044,21 +4688,18 @@ ha_innobase::create( current_thd->query_length, current_thd->charset())) { error = HA_ERR_OUT_OF_MEM; - } else { - error = row_table_add_foreign_constraints(trx, - q.str, norm_name); - error = convert_error_code_to_mysql(error, NULL); + goto cleanup; } - if (error) { - innobase_commit_low(trx); - - row_mysql_unlock_data_dictionary(trx); + error = row_table_add_foreign_constraints(trx, + q.str, norm_name, + create_info->options & HA_LEX_CREATE_TMP_TABLE); - trx_free_for_mysql(trx); + error = convert_error_code_to_mysql(error, NULL); - DBUG_RETURN(error); + if (error) { + goto cleanup; } } @@ -4079,7 +4720,7 @@ ha_innobase::create( if ((create_info->used_fields & HA_CREATE_USED_AUTO) && (create_info->auto_increment_value != 0)) { - /* Query was ALTER TABLE...AUTO_INCREMENT = x; or + /* Query was ALTER TABLE...AUTO_INCREMENT = x; or CREATE TABLE ...AUTO_INCREMENT = x; Find out a table definition from the dictionary and get the current value of the auto increment field. Set a new value to the @@ -4098,6 +4739,15 @@ ha_innobase::create( trx_free_for_mysql(trx); DBUG_RETURN(0); + +cleanup: + innobase_commit_low(trx); + + row_mysql_unlock_data_dictionary(trx); + + trx_free_for_mysql(trx); + + DBUG_RETURN(error); } /********************************************************************* @@ -4118,7 +4768,7 @@ ha_innobase::discard_or_import_tablespace( ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); ut_a(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); dict_table = prebuilt->table; trx = prebuilt->trx; @@ -4135,6 +4785,46 @@ ha_innobase::discard_or_import_tablespace( } /********************************************************************* +Deletes all rows of an InnoDB table. */ + +int +ha_innobase::delete_all_rows(void) +/*==============================*/ + /* out: error number */ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; + int error; + trx_t* trx; + THD* thd = current_thd; + + DBUG_ENTER("ha_innobase::delete_all_rows"); + + if (thd->lex->sql_command != SQLCOM_TRUNCATE) { + fallback: + /* We only handle TRUNCATE TABLE t as a special case. + DELETE FROM t will have to use ha_innobase::delete_row(). */ + DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND); + } + + /* Get the transaction associated with the current thd, or create one + if not yet created */ + + trx = check_trx_exists(thd); + + /* Truncate the table in InnoDB */ + + error = row_truncate_table_for_mysql(prebuilt->table, trx); + if (error == DB_ERROR) { + /* Cannot truncate; resort to ha_innobase::delete_row() */ + goto fallback; + } + + error = convert_error_code_to_mysql(error, NULL); + + DBUG_RETURN(error); +} + +/********************************************************************* Drops a table from an InnoDB database. Before calling this function, MySQL calls innobase_commit to commit the transaction of the current user. Then the current user cannot have locks set on the table. Drop table @@ -4158,13 +4848,13 @@ ha_innobase::delete_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - + parent_trx = check_trx_exists(current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ - trx_search_latch_release_if_reserved(parent_trx); + trx_search_latch_release_if_reserved(parent_trx); if (lower_case_table_names) { srv_lower_case_table_names = TRUE; @@ -4240,13 +4930,13 @@ innobase_drop_database( /* Get the transaction associated with the current thd, or create one if not yet created */ - + parent_trx = check_trx_exists(current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ - trx_search_latch_release_if_reserved(parent_trx); + trx_search_latch_release_if_reserved(parent_trx); ptr = strend(path) - 2; @@ -4256,7 +4946,7 @@ innobase_drop_database( } ptr++; - namebuf = my_malloc(len + 2, MYF(0)); + namebuf = my_malloc((uint) len + 2, MYF(0)); memcpy(namebuf, ptr, len); namebuf[len] = '/'; @@ -4316,13 +5006,13 @@ ha_innobase::rename_table( /* Get the transaction associated with the current thd, or create one if not yet created */ - + parent_trx = check_trx_exists(current_thd); /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ - trx_search_latch_release_if_reserved(parent_trx); + trx_search_latch_release_if_reserved(parent_trx); if (lower_case_table_names) { srv_lower_case_table_names = TRUE; @@ -4388,11 +5078,11 @@ ha_innobase::records_in_range( KEY* key; dict_index_t* index; mysql_byte* key_val_buff2 = (mysql_byte*) my_malloc( - table->reclength - + table->max_key_length + 100, - MYF(MY_WME)); - ulint buff2_len = table->reclength - + table->max_key_length + 100; + table->s->reclength + + table->s->max_key_length + 100, + MYF(MY_FAE)); + ulint buff2_len = table->s->reclength + + table->s->max_key_length + 100; dtuple_t* range_start; dtuple_t* range_end; ib_longlong n_rows; @@ -4449,7 +5139,7 @@ ha_innobase::records_in_range( dtuple_free_for_mysql(heap1); dtuple_free_for_mysql(heap2); - my_free((char*) key_val_buff2, MYF(0)); + my_free((gptr) key_val_buff2, MYF(0)); prebuilt->trx->op_info = (char*)""; @@ -4530,7 +5220,7 @@ ha_innobase::scan_time() searches, we pretend that a sequential read takes the same time as a random disk read, that is, we do not divide the following by 10, which would be physically realistic. */ - + return((double) (prebuilt->table->stat_clustered_index_size)); } @@ -4548,22 +5238,28 @@ ha_innobase::read_time( { ha_rows total_rows; double time_for_scan; - - if (index != table->primary_key) - return handler::read_time(index, ranges, rows); // Not clustered - if (rows <= 2) - return (double) rows; + if (index != table->s->primary_key) { + /* Not clustered */ + return(handler::read_time(index, ranges, rows)); + } + + if (rows <= 2) { + + return((double) rows); + } /* Assume that the read time is proportional to the scan time for all rows + at most one seek per range. */ time_for_scan = scan_time(); - if ((total_rows = estimate_rows_upper_bound()) < rows) - return time_for_scan; + if ((total_rows = estimate_rows_upper_bound()) < rows) { + + return(time_for_scan); + } - return (ranges + (double) rows / (double) total_rows * time_for_scan); + return(ranges + (double) rows / (double) total_rows * time_for_scan); } /************************************************************************* @@ -4621,21 +5317,13 @@ ha_innobase::info( prebuilt->trx->op_info = (char*) "returning various info to MySQL"; - - if (ib_table->space != 0) { - my_snprintf(path, sizeof(path), "%s/%s%s", - mysql_data_home, ib_table->name, - ".ibd"); - unpack_filename(path,path); - } else { - my_snprintf(path, sizeof(path), "%s/%s%s", + my_snprintf(path, sizeof(path), "%s/%s%s", mysql_data_home, ib_table->name, reg_ext); - - unpack_filename(path,path); - } - /* Note that we do not know the access time of the table, + unpack_filename(path,path); + + /* Note that we do not know the access time of the table, nor the CHECK TABLE time, nor the UPDATE or INSERT time. */ if (os_file_get_status(path,&stat_info)) { @@ -4654,7 +5342,7 @@ ha_innobase::info( is an accurate estimate if it is zero. Of course, it is not, since we do not have any locks on the rows yet at this phase. Since SHOW TABLE STATUS seems to call this function with the - HA_STATUS_TIME flag set, while the left join optizer does not + HA_STATUS_TIME flag set, while the left join optimizer does not set that flag, we add one to a zero value if the flag is not set. That way SHOW TABLE STATUS will show the best estimate, while the optimizer never sees the table empty. */ @@ -4692,15 +5380,18 @@ ha_innobase::info( index = dict_table_get_next_index_noninline(index); } - for (i = 0; i < table->keys; i++) { + for (i = 0; i < table->s->keys; i++) { if (index == NULL) { ut_print_timestamp(stderr); - fprintf(stderr, -" InnoDB: Error: table %s contains less indexes inside InnoDB\n" -"InnoDB: than are defined in the MySQL .frm file. Have you mixed up\n" -"InnoDB: .frm files from different installations? See section\n" -"InnoDB: 15.1 at http://www.innodb.com/ibman.html\n", - ib_table->name); + sql_print_error("Table %s contains fewer " + "indexes inside InnoDB than " + "are defined in the MySQL " + ".frm file. Have you mixed up " + ".frm files from different " + "installations? See " +"http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n", + + ib_table->name); break; } @@ -4708,15 +5399,15 @@ ha_innobase::info( if (j + 1 > index->n_uniq) { ut_print_timestamp(stderr); - fprintf(stderr, -" InnoDB: Error: index %s of %s has %lu columns unique inside InnoDB\n" -"InnoDB: but MySQL is asking statistics for %lu columns. Have you mixed up\n" -"InnoDB: .frm files from different installations? See section\n" -"InnoDB: 15.1 at http://www.innodb.com/ibman.html\n", - index->name, - ib_table->name, - (unsigned long) index->n_uniq, - j + 1); + sql_print_error( +"Index %s of %s has %lu columns unique inside InnoDB, but MySQL is asking " +"statistics for %lu columns. Have you mixed up .frm files from different " +"installations? " +"See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n", + index->name, + ib_table->name, + (unsigned long) + index->n_uniq, j + 1); break; } @@ -4756,6 +5447,33 @@ ha_innobase::info( trx_get_error_info(prebuilt->trx)); } + if (flag & HA_STATUS_AUTO && table->found_next_number_field) { + longlong auto_inc; + int ret; + + /* The following function call can the first time fail in + a lock wait timeout error because it reserves the auto-inc + lock on the table. If it fails, then someone is already initing + the auto-inc counter, and the second call is guaranteed to + succeed. */ + + ret = innobase_read_and_init_auto_inc(&auto_inc); + + if (ret != 0) { + ret = innobase_read_and_init_auto_inc(&auto_inc); + + if (ret != 0) { + ut_print_timestamp(stderr); + sql_print_error("Cannot get table %s auto-inc" + "counter value in ::info\n", + ib_table->name); + auto_inc = 0; + } + } + + auto_increment_value = auto_inc; + } + prebuilt->trx->op_info = (char*)""; DBUG_RETURN(0); @@ -4767,7 +5485,7 @@ each index tree. This does NOT calculate exact statistics on the table. */ int ha_innobase::analyze( -/*=================*/ +/*=================*/ /* out: returns always 0 (success) */ THD* thd, /* in: connection thread handle */ HA_CHECK_OPT* check_opt) /* in: currently ignored */ @@ -4810,7 +5528,7 @@ ha_innobase::check( ut_a(prebuilt->trx && prebuilt->trx->magic_n == TRX_MAGIC_N); ut_a(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); if (prebuilt->mysql_template == NULL) { /* Build the template; we will use a dummy template @@ -4825,7 +5543,7 @@ ha_innobase::check( return(HA_ADMIN_OK); } - return(HA_ADMIN_CORRUPT); + return(HA_ADMIN_CORRUPT); } /***************************************************************** @@ -4840,7 +5558,7 @@ ha_innobase::update_table_comment( info on foreign keys */ const char* comment)/* in: table comment defined by user */ { - uint length = strlen(comment); + uint length = (uint) strlen(comment); char* str; row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; long flen; @@ -4849,7 +5567,7 @@ ha_innobase::update_table_comment( external_lock(). To be safe, update the thd of the current table handle. */ - if(length > 64000 - 3) { + if (length > 64000 - 3) { return((char*)comment); /* string too long */ } @@ -4966,6 +5684,103 @@ ha_innobase::get_foreign_key_create_info(void) return(str); } + +int +ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) +{ + dict_foreign_t* foreign; + + DBUG_ENTER("get_foreign_key_list"); + row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; + ut_a(prebuilt != NULL); + update_thd(current_thd); + prebuilt->trx->op_info = (char*)"getting list of foreign keys"; + trx_search_latch_release_if_reserved(prebuilt->trx); + mutex_enter_noninline(&(dict_sys->mutex)); + foreign = UT_LIST_GET_FIRST(prebuilt->table->foreign_list); + + while (foreign != NULL) + { + uint i; + FOREIGN_KEY_INFO f_key_info; + LEX_STRING *name= 0; + const char *tmp_buff; + + tmp_buff= foreign->id; + i= 0; + while (tmp_buff[i] != '/') + i++; + tmp_buff+= i + 1; + f_key_info.forein_id= make_lex_string(thd, 0, tmp_buff, + (uint) strlen(tmp_buff), 1); + tmp_buff= foreign->referenced_table_name; + i= 0; + while (tmp_buff[i] != '/') + i++; + f_key_info.referenced_db= make_lex_string(thd, 0, + tmp_buff, i, 1); + tmp_buff+= i + 1; + f_key_info.referenced_table= make_lex_string(thd, 0, tmp_buff, + (uint) strlen(tmp_buff), 1); + + for (i= 0;;) + { + tmp_buff= foreign->foreign_col_names[i]; + name= make_lex_string(thd, name, tmp_buff, (uint) strlen(tmp_buff), 1); + f_key_info.foreign_fields.push_back(name); + tmp_buff= foreign->referenced_col_names[i]; + name= make_lex_string(thd, name, tmp_buff, (uint) strlen(tmp_buff), 1); + f_key_info.referenced_fields.push_back(name); + if (++i >= foreign->n_fields) + break; + } + + ulong length= 0; + if (foreign->type == DICT_FOREIGN_ON_DELETE_CASCADE) + { + length=17; + tmp_buff= "ON DELETE CASCADE"; + } + else if (foreign->type == DICT_FOREIGN_ON_DELETE_SET_NULL) + { + length=18; + tmp_buff= "ON DELETE SET NULL"; + } + else if (foreign->type == DICT_FOREIGN_ON_DELETE_NO_ACTION) + { + length=19; + tmp_buff= "ON DELETE NO ACTION"; + } + else if (foreign->type == DICT_FOREIGN_ON_UPDATE_CASCADE) + { + length=17; + tmp_buff= "ON UPDATE CASCADE"; + } + else if (foreign->type == DICT_FOREIGN_ON_UPDATE_SET_NULL) + { + length=18; + tmp_buff= "ON UPDATE SET NULL"; + } + else if (foreign->type == DICT_FOREIGN_ON_UPDATE_NO_ACTION) + { + length=19; + tmp_buff= "ON UPDATE NO ACTION"; + } + f_key_info.constraint_method= make_lex_string(thd, + f_key_info.constraint_method, + tmp_buff, length, 1); + + FOREIGN_KEY_INFO *pf_key_info= ((FOREIGN_KEY_INFO *) + thd->memdup((gptr) &f_key_info, + sizeof(FOREIGN_KEY_INFO))); + f_key_list->push_back(pf_key_info); + foreign = UT_LIST_GET_NEXT(foreign_list, foreign); + } + mutex_exit_noninline(&(dict_sys->mutex)); + prebuilt->trx->op_info = (char*)""; + DBUG_RETURN(0); +} + /********************************************************************* Checks if ALTER TABLE may change the storage engine of the table. Changing storage engines is not allowed for tables for which there @@ -5054,9 +5869,11 @@ ha_innobase::extra( if (prebuilt->blob_heap) { row_mysql_prebuilt_free_blob_heap(prebuilt); } + prebuilt->keep_other_fields_on_keyread = 0; prebuilt->read_just_key = 0; break; case HA_EXTRA_RESET_STATE: + prebuilt->keep_other_fields_on_keyread = 0; prebuilt->read_just_key = 0; break; case HA_EXTRA_NO_KEYREAD: @@ -5075,6 +5892,9 @@ ha_innobase::extra( case HA_EXTRA_KEYREAD: prebuilt->read_just_key = 1; break; + case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: + prebuilt->keep_other_fields_on_keyread = 1; + break; default:/* Do nothing */ ; } @@ -5087,13 +5907,19 @@ MySQL calls this function at the start of each SQL statement inside LOCK TABLES. Inside LOCK TABLES the ::external_lock method does not work to mark SQL statement borders. Note also a special case: if a temporary table is created inside LOCK TABLES, MySQL has not called external_lock() at all -on that table. */ +on that table. +MySQL-5.0 also calls this before each statement in an execution of a stored +procedure. To make the execution more deterministic for binlogging, MySQL-5.0 +locks all tables involved in a stored procedure with full explicit table +locks (thd->in_lock_tables is true in ::store_lock()) before executing the +procedure. */ int ha_innobase::start_stmt( /*====================*/ /* out: 0 or error code */ - THD* thd) /* in: handle to the user thread */ + THD* thd, /* in: handle to the user thread */ + thr_lock_type lock_type) { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; trx_t* trx; @@ -5111,31 +5937,23 @@ ha_innobase::start_stmt( innobase_release_stat_resources(trx); - if (trx->isolation_level <= TRX_ISO_READ_COMMITTED - && trx->read_view) { - /* At low transaction isolation levels we let - each consistent read set its own snapshot */ - - read_view_close_for_mysql(trx); - } - - auto_inc_counter_for_this_stat = 0; prebuilt->sql_stat_start = TRUE; prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; if (!prebuilt->mysql_has_locked) { /* This handle is for a temporary table created inside this same LOCK TABLES; since MySQL does NOT call external_lock in this case, we must use x-row locks inside InnoDB to be prepared for an update of a row */ - + prebuilt->select_lock_type = LOCK_X; } else { if (trx->isolation_level != TRX_ISO_SERIALIZABLE && thd->lex->sql_command == SQLCOM_SELECT - && thd->lex->lock_option == TL_READ) { - + && lock_type == TL_READ) { + /* For other than temporary tables, we obtain no lock for consistent read (plain SELECT). */ @@ -5145,30 +5963,26 @@ ha_innobase::start_stmt( select_lock_type value. The value of stored_select_lock_type was decided in: 1) ::store_lock(), - 2) ::external_lock(), and - 3) ::init_table_handle_for_HANDLER(). */ + 2) ::external_lock(), + 3) ::init_table_handle_for_HANDLER(), and + 4) :.transactional_table_lock(). */ prebuilt->select_lock_type = prebuilt->stored_select_lock_type; } + } - if (prebuilt->stored_select_lock_type != LOCK_S - && prebuilt->stored_select_lock_type != LOCK_X) { - fprintf(stderr, -"InnoDB: Error: stored_select_lock_type is %lu inside ::start_stmt()!\n", - prebuilt->stored_select_lock_type); + trx->detailed_error[0] = '\0'; - /* Set the value to LOCK_X: this is just fault - tolerance, we do not know what the correct value - should be! */ + /* Set the MySQL flag to mark that there is an active transaction */ + if (trx->active_trans == 0) { - prebuilt->select_lock_type = LOCK_X; - } + innobase_register_trx_and_stmt(thd); + trx->active_trans = 1; + } else { + innobase_register_stmt(thd); } - /* Set the MySQL flag to mark that there is an active transaction */ - thd->transaction.all.innodb_active_trans = 1; - return(0); } @@ -5187,9 +6001,9 @@ innobase_map_isolation_level( case ISO_SERIALIZABLE: return(TRX_ISO_SERIALIZABLE); case ISO_READ_UNCOMMITTED: return(TRX_ISO_READ_UNCOMMITTED); default: ut_a(0); return(0); - } + } } - + /********************************************************************** As MySQL will execute an external lock for every new table it uses when it starts to process an SQL statement (an exception is when MySQL calls @@ -5220,6 +6034,7 @@ ha_innobase::external_lock( prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; if (lock_type == F_WRLCK) { @@ -5232,9 +6047,17 @@ ha_innobase::external_lock( if (lock_type != F_UNLCK) { /* MySQL is setting a new table lock */ + trx->detailed_error[0] = '\0'; + /* Set the MySQL flag to mark that there is an active transaction */ - thd->transaction.all.innodb_active_trans = 1; + if (trx->active_trans == 0) { + + innobase_register_trx_and_stmt(thd); + trx->active_trans = 1; + } else if (trx->n_mysql_tables_in_use == 0) { + innobase_register_stmt(thd); + } trx->n_mysql_tables_in_use++; prebuilt->mysql_has_locked = TRUE; @@ -5259,28 +6082,35 @@ ha_innobase::external_lock( reads. */ prebuilt->select_lock_type = LOCK_S; + prebuilt->stored_select_lock_type = LOCK_S; } /* Starting from 4.1.9, no InnoDB table lock is taken in LOCK TABLES if AUTOCOMMIT=1. It does not make much sense to acquire an InnoDB table lock if it is released immediately at the end of LOCK TABLES, and InnoDB's table locks in that case cause - VERY easily deadlocks. */ + VERY easily deadlocks. + + We do not set InnoDB table locks if user has not explicitly + requested a table lock. Note that thd->in_lock_tables + can be TRUE on some cases e.g. at the start of a stored + procedure call (SQLCOM_CALL). */ if (prebuilt->select_lock_type != LOCK_NONE) { if (thd->in_lock_tables && + thd->lex->sql_command == SQLCOM_LOCK_TABLES && thd->variables.innodb_table_locks && (thd->options & OPTION_NOT_AUTOCOMMIT)) { ulint error; error = row_lock_table_for_mysql(prebuilt, - NULL, LOCK_TABLE_EXP); + NULL, 0); if (error != DB_SUCCESS) { error = convert_error_code_to_mysql( - error, user_thd); - DBUG_RETURN(error); + (int) error, user_thd); + DBUG_RETURN((int) error); } } @@ -5294,10 +6124,6 @@ ha_innobase::external_lock( trx->n_mysql_tables_in_use--; prebuilt->mysql_has_locked = FALSE; - auto_inc_counter_for_this_stat = 0; - if (trx->n_lock_table_exp) { - row_unlock_tables_for_mysql(trx); - } /* If the MySQL lock count drops to zero we know that the current SQL statement has ended */ @@ -5306,7 +6132,7 @@ ha_innobase::external_lock( trx->mysql_n_tables_locked = 0; prebuilt->used_in_HANDLER = FALSE; - + /* Release a possible FIFO ticket and search latch. Since we may reserve the kernel mutex, we have to release the search system latch first to obey the latching order. */ @@ -5314,12 +6140,12 @@ ha_innobase::external_lock( innobase_release_stat_resources(trx); if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { - if (thd->transaction.all.innodb_active_trans != 0) { - innobase_commit(thd, trx); + if (trx->active_trans != 0) { + innobase_commit(thd, TRUE); } } else { if (trx->isolation_level <= TRX_ISO_READ_COMMITTED - && trx->read_view) { + && trx->global_read_view) { /* At low transaction isolation levels we let each consistent read set its own snapshot */ @@ -5332,11 +6158,112 @@ ha_innobase::external_lock( DBUG_RETURN(0); } +/********************************************************************** +With this function MySQL request a transactional lock to a table when +user issued query LOCK TABLES..WHERE ENGINE = InnoDB. */ + +int +ha_innobase::transactional_table_lock( +/*==================================*/ + /* out: error code */ + THD* thd, /* in: handle to the user thread */ + int lock_type) /* in: lock type */ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + trx_t* trx; + + DBUG_ENTER("ha_innobase::transactional_table_lock"); + DBUG_PRINT("enter",("lock_type: %d", lock_type)); + + /* We do not know if MySQL can call this function before calling + external_lock(). To be safe, update the thd of the current table + handle. */ + + update_thd(thd); + + if (prebuilt->table->ibd_file_missing && !current_thd->tablespace_op) { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB error:\n" +"MySQL is trying to use a table handle but the .ibd file for\n" +"table %s does not exist.\n" +"Have you deleted the .ibd file from the database directory under\n" +"the MySQL datadir?" +"See http://dev.mysql.com/doc/refman/5.0/en/innodb-troubleshooting.html\n" +"how you can resolve the problem.\n", + prebuilt->table->name); + DBUG_RETURN(HA_ERR_CRASHED); + } + + trx = prebuilt->trx; + + prebuilt->sql_stat_start = TRUE; + prebuilt->hint_need_to_fetch_extra_cols = 0; + + prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; + + if (lock_type == F_WRLCK) { + prebuilt->select_lock_type = LOCK_X; + prebuilt->stored_select_lock_type = LOCK_X; + } else if (lock_type == F_RDLCK) { + prebuilt->select_lock_type = LOCK_S; + prebuilt->stored_select_lock_type = LOCK_S; + } else { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB error:\n" +"MySQL is trying to set transactional table lock with corrupted lock type\n" +"to table %s, lock type %d does not exist.\n", + prebuilt->table->name, lock_type); + DBUG_RETURN(HA_ERR_CRASHED); + } + + /* MySQL is setting a new transactional table lock */ + + /* Set the MySQL flag to mark that there is an active transaction */ + if (trx->active_trans == 0) { + + innobase_register_trx_and_stmt(thd); + trx->active_trans = 1; + } + + if (thd->in_lock_tables && thd->variables.innodb_table_locks) { + ulint error = DB_SUCCESS; + + error = row_lock_table_for_mysql(prebuilt, NULL, 0); + + if (error != DB_SUCCESS) { + error = convert_error_code_to_mysql((int) error, user_thd); + DBUG_RETURN((int) error); + } + + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + + /* Store the current undo_no of the transaction + so that we know where to roll back if we have + to roll back the next SQL statement */ + + trx_mark_sql_stat_end(trx); + } + } + + DBUG_RETURN(0); +} + +/**************************************************************************** +Here we export InnoDB status variables to MySQL. */ + +void +innodb_export_status(void) +/*======================*/ +{ + srv_export_innodb_status(); +} + /**************************************************************************** Implements the SHOW INNODB STATUS command. Sends the output of the InnoDB Monitor to the client. */ -int +bool innodb_show_status( /*===============*/ THD* thd) /* in: the MySQL query thread of the caller */ @@ -5354,7 +6281,7 @@ innodb_show_status( my_message(ER_NOT_SUPPORTED_YET, "Cannot call SHOW INNODB STATUS because skip-innodb is defined", MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } trx = check_trx_exists(thd); @@ -5373,6 +6300,7 @@ innodb_show_status( &trx_list_start, &trx_list_end); flen = ftell(srv_monitor_file); os_file_set_eof(srv_monitor_file); + if (flen < 0) { flen = 0; } @@ -5389,28 +6317,28 @@ innodb_show_status( if (!(str = my_malloc(usable_len + 1, MYF(0)))) { mutex_exit_noninline(&srv_monitor_file_mutex); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } rewind(srv_monitor_file); if (flen < MAX_STATUS_SIZE) { /* Display the entire output. */ - flen = fread(str, 1, flen, srv_monitor_file); + flen = (long) fread(str, 1, flen, srv_monitor_file); } else if (trx_list_end < (ulint) flen && trx_list_start < trx_list_end && trx_list_start + (flen - trx_list_end) < MAX_STATUS_SIZE - sizeof truncated_msg - 1) { /* Omit the beginning of the list of active transactions. */ - long len = fread(str, 1, trx_list_start, srv_monitor_file); + long len = (long) fread(str, 1, trx_list_start, srv_monitor_file); memcpy(str + len, truncated_msg, sizeof truncated_msg - 1); len += sizeof truncated_msg - 1; usable_len = (MAX_STATUS_SIZE - 1) - len; fseek(srv_monitor_file, flen - usable_len, SEEK_SET); - len += fread(str + len, 1, usable_len, srv_monitor_file); + len += (long) fread(str + len, 1, usable_len, srv_monitor_file); flen = len; } else { /* Omit the end of the output. */ - flen = fread(str, 1, MAX_STATUS_SIZE - 1, srv_monitor_file); + flen = (long) fread(str, 1, MAX_STATUS_SIZE - 1, srv_monitor_file); } mutex_exit_noninline(&srv_monitor_file_mutex); @@ -5419,22 +6347,140 @@ innodb_show_status( field_list.push_back(new Item_empty_string("Status", flen)); - if (protocol->send_fields(&field_list, 1)) { - + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) { my_free(str, MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } protocol->prepare_for_resend(); protocol->store(str, flen, system_charset_info); my_free(str, MYF(0)); - if (protocol->write()) - DBUG_RETURN(-1); + if (protocol->write()) { + DBUG_RETURN(TRUE); + } send_eof(thd); - DBUG_RETURN(0); + + DBUG_RETURN(FALSE); +} + +/**************************************************************************** +Implements the SHOW MUTEX STATUS command. . */ + +bool +innodb_mutex_show_status( +/*===============*/ + THD* thd) /* in: the MySQL query thread of the caller */ +{ + Protocol *protocol= thd->protocol; + List<Item> field_list; + mutex_t* mutex; +#ifdef UNIV_DEBUG + ulint rw_lock_count= 0; + ulint rw_lock_count_spin_loop= 0; + ulint rw_lock_count_spin_rounds= 0; + ulint rw_lock_count_os_wait= 0; + ulint rw_lock_count_os_yield= 0; + ulonglong rw_lock_wait_time= 0; +#endif /* UNIV_DEBUG */ + DBUG_ENTER("innodb_mutex_show_status"); + +#ifdef UNIV_DEBUG + field_list.push_back(new Item_empty_string("Mutex", FN_REFLEN)); + field_list.push_back(new Item_empty_string("Module", FN_REFLEN)); + field_list.push_back(new Item_uint("Count", MY_INT64_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_uint("Spin_waits", MY_INT64_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_uint("Spin_rounds", MY_INT64_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_uint("OS_waits", MY_INT64_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_uint("OS_yields", MY_INT64_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_uint("OS_waits_time", MY_INT64_NUM_DECIMAL_DIGITS)); +#else /* UNIV_DEBUG */ + field_list.push_back(new Item_empty_string("File", FN_REFLEN)); + field_list.push_back(new Item_uint("Line", MY_INT64_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_uint("OS_waits", MY_INT64_NUM_DECIMAL_DIGITS)); +#endif /* UNIV_DEBUG */ + + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); + + mutex_enter_noninline(&mutex_list_mutex); + + mutex = UT_LIST_GET_FIRST(mutex_list); + + while ( mutex != NULL ) + { +#ifdef UNIV_DEBUG + if (mutex->mutex_type != 1) + { + if (mutex->count_using > 0) + { + protocol->prepare_for_resend(); + protocol->store(mutex->cmutex_name, system_charset_info); + protocol->store(mutex->cfile_name, system_charset_info); + protocol->store((ulonglong)mutex->count_using); + protocol->store((ulonglong)mutex->count_spin_loop); + protocol->store((ulonglong)mutex->count_spin_rounds); + protocol->store((ulonglong)mutex->count_os_wait); + protocol->store((ulonglong)mutex->count_os_yield); + protocol->store((ulonglong)mutex->lspent_time/1000); + + if (protocol->write()) + { + mutex_exit_noninline(&mutex_list_mutex); + DBUG_RETURN(1); + } + } + } + else + { + rw_lock_count += mutex->count_using; + rw_lock_count_spin_loop += mutex->count_spin_loop; + rw_lock_count_spin_rounds += mutex->count_spin_rounds; + rw_lock_count_os_wait += mutex->count_os_wait; + rw_lock_count_os_yield += mutex->count_os_yield; + rw_lock_wait_time += mutex->lspent_time; + } +#else /* UNIV_DEBUG */ + protocol->prepare_for_resend(); + protocol->store(mutex->cfile_name, system_charset_info); + protocol->store((ulonglong)mutex->cline); + protocol->store((ulonglong)mutex->count_os_wait); + + if (protocol->write()) + { + mutex_exit_noninline(&mutex_list_mutex); + DBUG_RETURN(1); + } +#endif /* UNIV_DEBUG */ + + mutex = UT_LIST_GET_NEXT(list, mutex); + } + + mutex_exit_noninline(&mutex_list_mutex); + +#ifdef UNIV_DEBUG + protocol->prepare_for_resend(); + protocol->store("rw_lock_mutexes", system_charset_info); + protocol->store("", system_charset_info); + protocol->store((ulonglong)rw_lock_count); + protocol->store((ulonglong)rw_lock_count_spin_loop); + protocol->store((ulonglong)rw_lock_count_spin_rounds); + protocol->store((ulonglong)rw_lock_count_os_wait); + protocol->store((ulonglong)rw_lock_count_os_yield); + protocol->store((ulonglong)rw_lock_wait_time/1000); + + if (protocol->write()) + { + DBUG_RETURN(1); + } +#endif /* UNIV_DEBUG */ + + send_eof(thd); + DBUG_RETURN(FALSE); } /**************************************************************************** @@ -5451,37 +6497,42 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, static INNOBASE_SHARE *get_share(const char *table_name) { - INNOBASE_SHARE *share; - pthread_mutex_lock(&innobase_mutex); - uint length=(uint) strlen(table_name); - if (!(share=(INNOBASE_SHARE*) hash_search(&innobase_open_tables, - (mysql_byte*) table_name, - length))) - { - if ((share=(INNOBASE_SHARE *) my_malloc(sizeof(*share)+length+1, - MYF(MY_WME | MY_ZEROFILL)))) - { - share->table_name_length=length; - share->table_name=(char*) (share+1); - strmov(share->table_name,table_name); - if (my_hash_insert(&innobase_open_tables, (mysql_byte*) share)) - { - pthread_mutex_unlock(&innobase_mutex); - my_free((gptr) share,0); - return 0; - } - thr_lock_init(&share->lock); - pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); - } - } - share->use_count++; - pthread_mutex_unlock(&innobase_mutex); - return share; + INNOBASE_SHARE *share; + pthread_mutex_lock(&innobase_share_mutex); + uint length=(uint) strlen(table_name); + + if (!(share=(INNOBASE_SHARE*) hash_search(&innobase_open_tables, + (mysql_byte*) table_name, + length))) { + + share = (INNOBASE_SHARE *) my_malloc(sizeof(*share)+length+1, + MYF(MY_FAE | MY_ZEROFILL)); + + share->table_name_length=length; + share->table_name=(char*) (share+1); + strmov(share->table_name,table_name); + + if (my_hash_insert(&innobase_open_tables, + (mysql_byte*) share)) { + pthread_mutex_unlock(&innobase_share_mutex); + my_free((gptr) share,0); + + return 0; + } + + thr_lock_init(&share->lock); + pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + } + + share->use_count++; + pthread_mutex_unlock(&innobase_share_mutex); + + return share; } static void free_share(INNOBASE_SHARE *share) { - pthread_mutex_lock(&innobase_mutex); + pthread_mutex_lock(&innobase_share_mutex); if (!--share->use_count) { hash_delete(&innobase_open_tables, (mysql_byte*) share); @@ -5489,7 +6540,7 @@ static void free_share(INNOBASE_SHARE *share) pthread_mutex_destroy(&share->mutex); my_free((gptr) share, MYF(0)); } - pthread_mutex_unlock(&innobase_mutex); + pthread_mutex_unlock(&innobase_share_mutex); } /********************************************************************* @@ -5513,10 +6564,15 @@ ha_innobase::store_lock( of current handle is stored next to this array */ enum thr_lock_type lock_type) /* in: lock type to store in - 'lock' */ + 'lock'; this may also be + TL_IGNORE */ { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + /* NOTE: MySQL can call this function with lock 'type' TL_IGNORE! + Be careful to ignore TL_IGNORE if we are going to do something with + only 'real' locks! */ + if ((lock_type == TL_READ && thd->in_lock_tables) || (lock_type == TL_READ_HIGH_PRIORITY && thd->in_lock_tables) || lock_type == TL_READ_WITH_SHARED_LOCKS || @@ -5525,7 +6581,8 @@ ha_innobase::store_lock( && lock_type != TL_IGNORE)) { /* The OR cases above are in this order: - 1) MySQL is doing LOCK TABLES ... READ LOCAL, or + 1) MySQL is doing LOCK TABLES ... READ LOCAL, or we + are processing a stored procedure or function, or 2) (we do not know when TL_READ_HIGH_PRIORITY is used), or 3) this is a SELECT ... IN SHARE MODE, or 4) we are doing a complex SQL statement like @@ -5534,32 +6591,35 @@ ha_innobase::store_lock( MySQL is doing LOCK TABLES ... READ. 5) we let InnoDB do locking reads for all SQL statements that are not simple SELECTs; note that select_lock_type in this - case may get strengthened in ::external_lock() to LOCK_X. */ + case may get strengthened in ::external_lock() to LOCK_X. + Note that we MUST use a locking read in all data modifying + SQL statements, because otherwise the execution would not be + serializable, and also the results from the update could be + unexpected if an obsolete consistent read view would be + used. */ if (srv_locks_unsafe_for_binlog && prebuilt->trx->isolation_level != TRX_ISO_SERIALIZABLE && (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) && - thd->lex->sql_command != SQLCOM_SELECT && - thd->lex->sql_command != SQLCOM_UPDATE_MULTI && - thd->lex->sql_command != SQLCOM_DELETE_MULTI && - thd->lex->sql_command != SQLCOM_LOCK_TABLES) { + (thd->lex->sql_command == SQLCOM_INSERT_SELECT || + thd->lex->sql_command == SQLCOM_UPDATE || + thd->lex->sql_command == SQLCOM_CREATE_TABLE)) { /* In case we have innobase_locks_unsafe_for_binlog option set and isolation level of the transaction is not set to serializable and MySQL is doing - INSERT INTO...SELECT or UPDATE ... = (SELECT ...) - without FOR UPDATE or IN SHARE MODE in select, then - we use consistent read for select. */ + INSERT INTO...SELECT or UPDATE ... = (SELECT ...) or + CREATE ... SELECT... without FOR UPDATE or + IN SHARE MODE in select, then we use consistent + read for select. */ prebuilt->select_lock_type = LOCK_NONE; prebuilt->stored_select_lock_type = LOCK_NONE; } else if (thd->lex->sql_command == SQLCOM_CHECKSUM) { - /* Use consistent read for checksum table and - convert lock type to the TL_READ */ + /* Use consistent read for checksum table */ prebuilt->select_lock_type = LOCK_NONE; prebuilt->stored_select_lock_type = LOCK_NONE; - lock.type = TL_READ; } else { prebuilt->select_lock_type = LOCK_S; prebuilt->stored_select_lock_type = LOCK_S; @@ -5567,11 +6627,7 @@ ha_innobase::store_lock( } else if (lock_type != TL_IGNORE) { - /* In ha_berkeley.cc there is a comment that MySQL - may in exceptional cases call this with TL_IGNORE also - when it is NOT going to release the lock. */ - - /* We set possible LOCK_X value in external_lock, not yet + /* We set possible LOCK_X value in external_lock, not yet here even if this would be SELECT ... FOR UPDATE */ prebuilt->select_lock_type = LOCK_NONE; @@ -5580,7 +6636,16 @@ ha_innobase::store_lock( if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { - if (lock_type == TL_READ && thd->in_lock_tables) { + /* Starting from 5.0.7, we weaken also the table locks + set at the start of a MySQL stored procedure call, just like + we weaken the locks set at the start of an SQL statement. + MySQL does set thd->in_lock_tables TRUE there, but in reality + we do not need table locks to make the execution of a + single transaction stored procedure call deterministic + (if it does not use a consistent read). */ + + if (lock_type == TL_READ + && thd->lex->sql_command == SQLCOM_LOCK_TABLES) { /* We come here if MySQL is processing LOCK TABLES ... READ LOCAL. MyISAM under that table lock type reads the table as it was at the time the lock was @@ -5595,31 +6660,59 @@ ha_innobase::store_lock( lock_type = TL_READ_NO_INSERT; } - /* If we are not doing a LOCK TABLE or DISCARD/IMPORT - TABLESPACE, then allow multiple writers */ + /* If we are not doing a LOCK TABLE, DISCARD/IMPORT + TABLESPACE or TRUNCATE TABLE then allow multiple + writers. Note that ALTER TABLE uses a TL_WRITE_ALLOW_READ + < TL_WRITE_CONCURRENT_INSERT. + + We especially allow multiple writers if MySQL is at the + start of a stored procedure call (SQLCOM_CALL) or a + stored function call (MySQL does have thd->in_lock_tables + TRUE there). */ - if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && - lock_type <= TL_WRITE) && !thd->in_lock_tables + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT + && lock_type <= TL_WRITE) + && !(thd->in_lock_tables + && thd->lex->sql_command == SQLCOM_LOCK_TABLES) && !thd->tablespace_op - && thd->lex->sql_command != SQLCOM_CREATE_TABLE) { + && thd->lex->sql_command != SQLCOM_TRUNCATE + && thd->lex->sql_command != SQLCOM_OPTIMIZE - lock_type = TL_WRITE_ALLOW_WRITE; +#ifdef __WIN__ + /* For alter table on win32 for succesful operation + completion it is used TL_WRITE(=10) lock instead of + TL_WRITE_ALLOW_READ(=6), however here in innodb handler + TL_WRITE is lifted to TL_WRITE_ALLOW_WRITE, which causes + race condition when several clients do alter table + simultaneously (bug #17264). This fix avoids the problem. */ + && thd->lex->sql_command != SQLCOM_ALTER_TABLE +#endif + + && thd->lex->sql_command != SQLCOM_CREATE_TABLE) { + + lock_type = TL_WRITE_ALLOW_WRITE; } /* In queries of type INSERT INTO t1 SELECT ... FROM t2 ... MySQL would use the lock TL_READ_NO_INSERT on t2, and that would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts to t2. Convert the lock to a normal read lock to allow - concurrent inserts to t2. */ - - if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) { + concurrent inserts to t2. + + We especially allow concurrent inserts if MySQL is at the + start of a stored procedure call (SQLCOM_CALL) + (MySQL does have thd->in_lock_tables TRUE there). */ + + if (lock_type == TL_READ_NO_INSERT + && thd->lex->sql_command != SQLCOM_LOCK_TABLES) { + lock_type = TL_READ; } - - lock.type=lock_type; - } - *to++= &lock; + lock.type = lock_type; + } + + *to++= &lock; return(to); } @@ -5627,25 +6720,31 @@ ha_innobase::store_lock( /*********************************************************************** This function initializes the auto-inc counter if it has not been initialized yet. This function does not change the value of the auto-inc -counter if it already has been initialized. In paramete ret returns +counter if it already has been initialized. In parameter ret returns the value of the auto-inc counter. */ int ha_innobase::innobase_read_and_init_auto_inc( /*=========================================*/ - /* out: 0 or error code: deadlock or - lock wait timeout */ + /* out: 0 or error code: deadlock or lock wait + timeout */ longlong* ret) /* out: auto-inc value */ { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; longlong auto_inc; + ulint old_select_lock_type; + ibool trx_was_not_started = FALSE; int error; ut_a(prebuilt); ut_a(prebuilt->trx == - (trx_t*) current_thd->transaction.all.innobase_tid); + (trx_t*) current_thd->ha_data[innobase_hton.slot]); ut_a(prebuilt->table); - + + if (prebuilt->trx->conc_state == TRX_NOT_STARTED) { + trx_was_not_started = TRUE; + } + /* In case MySQL calls this in the middle of a SELECT query, release possible adaptive hash latch to avoid deadlocks of threads */ @@ -5656,8 +6755,10 @@ ha_innobase::innobase_read_and_init_auto_inc( if (auto_inc != 0) { /* Already initialized */ *ret = auto_inc; - - return(0); + + error = 0; + + goto func_exit_early; } error = row_lock_table_autoinc_for_mysql(prebuilt); @@ -5665,39 +6766,46 @@ ha_innobase::innobase_read_and_init_auto_inc( if (error != DB_SUCCESS) { error = convert_error_code_to_mysql(error, user_thd); - goto func_exit; - } + goto func_exit_early; + } /* Check again if someone has initialized the counter meanwhile */ auto_inc = dict_table_autoinc_read(prebuilt->table); if (auto_inc != 0) { *ret = auto_inc; - - return(0); + + error = 0; + + goto func_exit_early; } (void) extra(HA_EXTRA_KEYREAD); - index_init(table->next_number_index); + index_init(table->s->next_number_index); - /* We use an exclusive lock when we read the max key value from the - auto-increment column index. This is because then build_template will - advise InnoDB to fetch all columns. In SHOW TABLE STATUS the query - id of the auto-increment column is not changed, and previously InnoDB - did not fetch it, causing SHOW TABLE STATUS to show wrong values - for the autoinc column. */ + /* Starting from 5.0.9, we use a consistent read to read the auto-inc + column maximum value. This eliminates the spurious deadlocks caused + by the row X-lock that we previously used. Note the following flaw + in our algorithm: if some other user meanwhile UPDATEs the auto-inc + column, our consistent read will not return the largest value. We + accept this flaw, since the deadlocks were a bigger trouble. */ - prebuilt->select_lock_type = LOCK_X; + /* Fetch all the columns in the key */ - /* Play safe and also give in another way the hint to fetch - all columns in the key: */ - prebuilt->hint_need_to_fetch_extra_cols = ROW_RETRIEVE_ALL_COLS; - prebuilt->trx->mysql_n_tables_locked += 1; - + old_select_lock_type = prebuilt->select_lock_type; + prebuilt->select_lock_type = LOCK_NONE; + + /* Eliminate an InnoDB error print that happens when we try to SELECT + from a table when no table has been locked in ::external_lock(). */ + prebuilt->trx->n_mysql_tables_in_use++; + error = index_last(table->record[1]); + prebuilt->trx->n_mysql_tables_in_use--; + prebuilt->select_lock_type = old_select_lock_type; + if (error) { if (error == HA_ERR_END_OF_FILE) { /* The table was empty, initialize to 1 */ @@ -5705,15 +6813,22 @@ ha_innobase::innobase_read_and_init_auto_inc( error = 0; } else { - /* Deadlock or a lock wait timeout */ + /* This should not happen in a consistent read */ + sql_print_error("Consistent read of auto-inc column " + "returned %lu", (ulong) error); auto_inc = -1; goto func_exit; } } else { - /* Initialize to max(col) + 1 */ - auto_inc = (longlong) table->next_number_field-> - val_int_offset(table->rec_buff_length) + 1; + /* Initialize to max(col) + 1; we use + 'found_next_number_field' below because MySQL in SHOW TABLE + STATUS does not seem to set 'next_number_field'. The comment + in table.h says that 'next_number_field' is set when it is + 'active'. */ + + auto_inc = (longlong) table->found_next_number_field-> + val_int_offset(table->s->rec_buff_length) + 1; } dict_table_autoinc_initialize(prebuilt->table, auto_inc); @@ -5725,7 +6840,20 @@ func_exit: *ret = auto_inc; - return(error); +func_exit_early: + /* Since MySQL does not seem to call autocommit after SHOW TABLE + STATUS (even if we would register the trx here), we commit our + transaction here if it was started here. This is to eliminate a + dangling transaction. If the user had AUTOCOMMIT=0, then SHOW + TABLE STATUS does leave a dangling transaction if the user does not + himself call COMMIT. */ + + if (trx_was_not_started) { + + innobase_commit_low(prebuilt->trx); + } + + return(error); } /*********************************************************************** @@ -5734,7 +6862,7 @@ initialized yet. This function does not change the value of the auto-inc counter if it already has been initialized. Returns the value of the auto-inc counter. */ -longlong +ulonglong ha_innobase::get_auto_increment() /*=============================*/ /* out: auto-increment column value, -1 if error @@ -5742,46 +6870,129 @@ ha_innobase::get_auto_increment() { longlong nr; int error; - + error = innobase_read_and_init_auto_inc(&nr); if (error) { + /* This should never happen in the current (5.0.6) code, since + we call this function only after the counter has been + initialized. */ + + ut_print_timestamp(stderr); + sql_print_error("Error %lu in ::get_auto_increment()", + (ulong) error); + return(~(ulonglong) 0); + } + + return((ulonglong) nr); +} + +/* See comment in handler.h */ +int +ha_innobase::reset_auto_increment(ulonglong value) +{ + DBUG_ENTER("ha_innobase::reset_auto_increment"); + + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + int error; + + error = row_lock_table_autoinc_for_mysql(prebuilt); - return(-1); + if (error != DB_SUCCESS) { + error = convert_error_code_to_mysql(error, user_thd); + + DBUG_RETURN(error); } - return(nr); + dict_table_autoinc_initialize(prebuilt->table, value); + + DBUG_RETURN(0); +} + +/* See comment in handler.cc */ +bool +ha_innobase::get_error_message(int error, String *buf) +{ + trx_t* trx = check_trx_exists(current_thd); + + buf->copy(trx->detailed_error, strlen(trx->detailed_error), + system_charset_info); + + return FALSE; } /*********************************************************************** -This function stores the binlog offset and flushes logs. */ +Compares two 'refs'. A 'ref' is the (internal) primary key value of the row. +If there is no explicitly declared non-null unique key or a primary key, then +InnoDB internally uses the row id as the primary key. */ -void -innobase_store_binlog_offset_and_flush_log( -/*=======================================*/ - char *binlog_name, /* in: binlog name */ - longlong offset) /* in: binlog offset */ +int +ha_innobase::cmp_ref( +/*=================*/ + /* out: < 0 if ref1 < ref2, 0 if equal, else + > 0 */ + const mysql_byte* ref1, /* in: an (internal) primary key value in the + MySQL key value format */ + const mysql_byte* ref2) /* in: an (internal) primary key value in the + MySQL key value format */ { - mtr_t mtr; - - assert(binlog_name != NULL); + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + enum_field_types mysql_type; + Field* field; + KEY_PART_INFO* key_part; + KEY_PART_INFO* key_part_end; + uint len1; + uint len2; + int result; - /* Start a mini-transaction */ - mtr_start_noninline(&mtr); + if (prebuilt->clust_index_was_generated) { + /* The 'ref' is an InnoDB row id */ - /* Update the latest MySQL binlog name and offset info - in trx sys header */ + return(memcmp(ref1, ref2, DATA_ROW_ID_LEN)); + } - trx_sys_update_mysql_binlog_offset( - binlog_name, - offset, - TRX_SYS_MYSQL_LOG_INFO, &mtr); + /* Do a type-aware comparison of primary key fields. PK fields + are always NOT NULL, so no checks for NULL are performed. */ - /* Commits the mini-transaction */ - mtr_commit(&mtr); - - /* Syncronous flush of the log buffer to disk */ - log_buffer_flush_to_disk(); + key_part = table->key_info[table->s->primary_key].key_part; + + key_part_end = key_part + + table->key_info[table->s->primary_key].key_parts; + + for (; key_part != key_part_end; ++key_part) { + field = key_part->field; + mysql_type = field->type(); + + if (mysql_type == FIELD_TYPE_TINY_BLOB + || mysql_type == FIELD_TYPE_MEDIUM_BLOB + || mysql_type == FIELD_TYPE_BLOB + || mysql_type == FIELD_TYPE_LONG_BLOB) { + + /* In the MySQL key value format, a column prefix of + a BLOB is preceded by a 2-byte length field */ + + len1 = innobase_read_from_2_little_endian(ref1); + len2 = innobase_read_from_2_little_endian(ref2); + + ref1 += 2; + ref2 += 2; + result = ((Field_blob*)field)->cmp( + (const char*)ref1, len1, + (const char*)ref2, len2); + } else { + result = field->key_cmp(ref1, ref2); + } + + if (result) { + + return(result); + } + + ref1 += key_part->store_length; + ref2 += key_part->store_length; + } + + return(0); } char* @@ -5825,7 +7036,7 @@ innobase_get_at_most_n_mbchars( ulint n_chars; /* number of characters in prefix */ CHARSET_INFO* charset; /* charset used in the field */ - charset = get_charset(charset_id, MYF(MY_WME)); + charset = get_charset((uint) charset_id, MYF(MY_WME)); ut_ad(charset); ut_ad(charset->mbmaxlen); @@ -5859,10 +7070,10 @@ innobase_get_at_most_n_mbchars( whole string. */ char_length = my_charpos(charset, str, - str + data_len, n_chars); + str + data_len, (int) n_chars); if (char_length > data_len) { char_length = data_len; - } + } } else { if (data_len < prefix_len) { char_length = data_len; @@ -5877,15 +7088,15 @@ innobase_get_at_most_n_mbchars( extern "C" { /********************************************************************** -This function returns true if +This function returns true if 1) SQL-query in the current thread -is either REPLACE or LOAD DATA INFILE REPLACE. +is either REPLACE or LOAD DATA INFILE REPLACE. 2) SQL-query in the current thread is INSERT ON DUPLICATE KEY UPDATE. -NOTE that /mysql/innobase/row/row0ins.c must contain the +NOTE that /mysql/innobase/row/row0ins.c must contain the prototype for this function ! */ ibool @@ -5893,18 +7104,20 @@ innobase_query_is_update(void) /*==========================*/ { THD* thd; - + thd = (THD *)innobase_current_thd(); - - if ( thd->lex->sql_command == SQLCOM_REPLACE || - thd->lex->sql_command == SQLCOM_REPLACE_SELECT || - ( thd->lex->sql_command == SQLCOM_LOAD && - thd->lex->duplicates == DUP_REPLACE )) { + + if (thd->lex->sql_command == SQLCOM_REPLACE || + thd->lex->sql_command == SQLCOM_REPLACE_SELECT || + (thd->lex->sql_command == SQLCOM_LOAD && + thd->lex->duplicates == DUP_REPLACE)) { + return(1); } - if ( thd->lex->sql_command == SQLCOM_INSERT && - thd->lex->duplicates == DUP_UPDATE ) { + if (thd->lex->sql_command == SQLCOM_INSERT && + thd->lex->duplicates == DUP_UPDATE) { + return(1); } @@ -5912,4 +7125,204 @@ innobase_query_is_update(void) } } +/*********************************************************************** +This function is used to prepare X/Open XA distributed transaction */ + +int +innobase_xa_prepare( +/*================*/ + /* out: 0 or error number */ + THD* thd, /* in: handle to the MySQL thread of the user + whose XA transaction should be prepared */ + bool all) /* in: TRUE - commit transaction + FALSE - the current SQL statement ended */ +{ + int error = 0; + trx_t* trx = check_trx_exists(thd); + + if (thd->lex->sql_command != SQLCOM_XA_PREPARE) { + + /* For ibbackup to work the order of transactions in binlog + and InnoDB must be the same. Consider the situation + + thread1> prepare; write to binlog; ... + <context switch> + thread2> prepare; write to binlog; commit + thread1> ... commit + + To ensure this will not happen we're taking the mutex on + prepare, and releasing it on commit. + + Note: only do it for normal commits, done via ha_commit_trans. + If 2pc protocol is executed by external transaction + coordinator, it will be just a regular MySQL client + executing XA PREPARE and XA COMMIT commands. + In this case we cannot know how many minutes or hours + will be between XA PREPARE and XA COMMIT, and we don't want + to block for undefined period of time. + */ + pthread_mutex_lock(&prepare_commit_mutex); + trx->active_trans = 2; + } + + if (!thd->variables.innodb_support_xa) { + + return(0); + } + + trx->xid=thd->transaction.xid_state.xid; + + /* Release a possible FIFO ticket and search latch. Since we will + reserve the kernel mutex, we have to release the search system latch + first to obey the latching order. */ + + innobase_release_stat_resources(trx); + + if (trx->active_trans == 0 && trx->conc_state != TRX_NOT_STARTED) { + + sql_print_error("trx->active_trans == 0, but trx->conc_state != " + "TRX_NOT_STARTED"); + } + + if (all + || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + + /* We were instructed to prepare the whole transaction, or + this is an SQL statement end and autocommit is on */ + + ut_ad(trx->active_trans); + + error = (int) trx_prepare_for_mysql(trx); + } else { + /* We just mark the SQL statement ended and do not do a + transaction prepare */ + + if (trx->auto_inc_lock) { + /* If we had reserved the auto-inc lock for some + table in this SQL statement we release it now */ + + row_unlock_table_autoinc_for_mysql(trx); + } + /* Store the current undo_no of the transaction so that we + know where to roll back if we have to roll back the next + SQL statement */ + + trx_mark_sql_stat_end(trx); + } + + /* Tell the InnoDB server that there might be work for utility + threads: */ + + srv_active_wake_master_thread(); + + return error; +} + +/*********************************************************************** +This function is used to recover X/Open XA distributed transactions */ + +int +innobase_xa_recover( +/*================*/ + /* out: number of prepared transactions + stored in xid_list */ + XID* xid_list, /* in/out: prepared transactions */ + uint len) /* in: number of slots in xid_list */ +{ + if (len == 0 || xid_list == NULL) { + + return(0); + } + + return(trx_recover_for_mysql(xid_list, len)); +} + +/*********************************************************************** +This function is used to commit one X/Open XA distributed transaction +which is in the prepared state */ + +int +innobase_commit_by_xid( +/*===================*/ + /* out: 0 or error number */ + XID* xid) /* in: X/Open XA transaction identification */ +{ + trx_t* trx; + + trx = trx_get_trx_by_xid(xid); + + if (trx) { + innobase_commit_low(trx); + + return(XA_OK); + } else { + return(XAER_NOTA); + } +} + +/*********************************************************************** +This function is used to rollback one X/Open XA distributed transaction +which is in the prepared state */ + +int +innobase_rollback_by_xid( +/*=====================*/ + /* out: 0 or error number */ + XID *xid) /* in: X/Open XA transaction identification */ +{ + trx_t* trx; + + trx = trx_get_trx_by_xid(xid); + + if (trx) { + return(innobase_rollback_trx(trx)); + } else { + return(XAER_NOTA); + } +} + +/*********************************************************************** +Create a consistent view for a cursor based on current transaction +which is created if the corresponding MySQL thread still lacks one. +This consistent view is then used inside of MySQL when accessing records +using a cursor. */ + +void* +innobase_create_cursor_view(void) +/*=============================*/ + /* out: Pointer to cursor view or NULL */ +{ + return(read_cursor_view_create_for_mysql( + check_trx_exists(current_thd))); +} + +/*********************************************************************** +Close the given consistent cursor view of a transaction and restore +global read view to a transaction read view. Transaction is created if the +corresponding MySQL thread still lacks one. */ + +void +innobase_close_cursor_view( +/*=======================*/ + void* curview)/* in: Consistent read view to be closed */ +{ + read_cursor_view_close_for_mysql(check_trx_exists(current_thd), + (cursor_view_t*) curview); +} + +/*********************************************************************** +Set the given consistent cursor view to a transaction which is created +if the corresponding MySQL thread still lacks one. If the given +consistent cursor view is NULL global read view of a transaction is +restored to a transaction read view. */ + +void +innobase_set_cursor_view( +/*=====================*/ + void* curview)/* in: Consistent cursor view to be set */ +{ + read_cursor_set_for_mysql(check_trx_exists(current_thd), + (cursor_view_t*) curview); +} + #endif /* HAVE_INNOBASE_DB */ diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index bbe226fcb19..9d18e22cc77 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB && Innobase Oy +/* Copyright (C) 2000-2005 MySQL AB && Innobase Oy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -33,21 +32,25 @@ typedef struct st_innobase_share { } INNOBASE_SHARE; +my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, + uint full_name_len, + ulonglong *unused); + /* The class defining a handle to an Innodb table */ class ha_innobase: public handler { - void* innobase_prebuilt; /* (row_prebuilt_t*) prebuilt - struct in Innodb, used to save - CPU */ + void* innobase_prebuilt;/* (row_prebuilt_t*) prebuilt + struct in InnoDB, used to save + CPU time with prebuilt data + structures*/ THD* user_thd; /* the thread handle of the user currently using the handle; this is set in external_lock function */ - ulong last_query_id; /* the latest query id where the + query_id_t last_query_id; /* the latest query id where the handle was used */ THR_LOCK_DATA lock; INNOBASE_SHARE *share; - gptr alloc_ptr; byte* upd_buff; /* buffer used in updates */ byte* key_val_buff; /* buffer used in converting search key values from MySQL format @@ -57,7 +60,6 @@ class ha_innobase: public handler two buffers */ ulong int_table_flags; uint primary_key; - uint last_dup_key; ulong start_of_scan; /* this is set to 1 when we are starting a table scan but have not yet fetched any row, else 0 */ @@ -65,8 +67,6 @@ class ha_innobase: public handler ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX, or undefined */ uint num_write_row; /* number of write_row() calls */ - longlong auto_inc_counter_for_this_stat; - ulong max_supported_row_length(const byte *buf); uint store_key_val_for_row(uint keynr, char* buff, uint buff_len, const byte* record); @@ -77,20 +77,13 @@ class ha_innobase: public handler /* Init values for the class: */ public: - ha_innobase(TABLE *table): handler(table), - int_table_flags(HA_REC_NOT_IN_SEQ | - HA_NULL_IN_KEY | HA_FAST_KEY_READ | - HA_CAN_INDEX_BLOBS | - HA_CAN_SQL_HANDLER | - HA_NOT_EXACT_COUNT | - HA_PRIMARY_KEY_IN_READ_INDEX | - HA_TABLE_SCAN_ON_INDEX), - last_dup_key((uint) -1), - start_of_scan(0), - num_write_row(0) - { - } + ha_innobase(TABLE *table_arg); ~ha_innobase() {} + /* + Get the row type from the storage engine. If this method returns + ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used. + */ + enum row_type get_row_type() const; const char* table_type() const { return("InnoDB");} const char *index_type(uint key_number) { return "BTREE"; } @@ -98,7 +91,10 @@ class ha_innobase: public handler ulong table_flags() const { return int_table_flags; } ulong index_flags(uint idx, uint part, bool all_parts) const { - return (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER | HA_READ_RANGE | + return (HA_READ_NEXT | + HA_READ_PREV | + HA_READ_ORDER | + HA_READ_RANGE | HA_KEYREAD_ONLY); } uint max_supported_keys() const { return MAX_KEY; } @@ -110,7 +106,7 @@ class ha_innobase: public handler but currently MySQL does not work with keys whose size is > MAX_KEY_LENGTH */ uint max_supported_key_length() const { return 3500; } - uint max_supported_key_part_length() const { return 3500; } + uint max_supported_key_part_length() const; const key_map *keys_to_use_for_scanning() { return &key_map_full; } bool has_transactions() { return 1;} @@ -122,6 +118,7 @@ class ha_innobase: public handler int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); + void unlock_row(); int index_init(uint index); int index_end(); @@ -148,51 +145,77 @@ class ha_innobase: public handler int discard_or_import_tablespace(my_bool discard); int extra(enum ha_extra_function operation); int external_lock(THD *thd, int lock_type); - int start_stmt(THD *thd); + int transactional_table_lock(THD *thd, int lock_type); + int start_stmt(THD *thd, thr_lock_type lock_type); void position(byte *record); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); + ha_rows records_in_range(uint inx, key_range *min_key, key_range + *max_key); ha_rows estimate_rows_upper_bound(); int create(const char *name, register TABLE *form, HA_CREATE_INFO *create_info); + int delete_all_rows(); int delete_table(const char *name); int rename_table(const char* from, const char* to); int check(THD* thd, HA_CHECK_OPT* check_opt); char* update_table_comment(const char* comment); char* get_foreign_key_create_info(); + int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list); bool can_switch_engines(); uint referenced_by_foreign_key(); - void free_foreign_key_create_info(char* str); + void free_foreign_key_create_info(char* str); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - void init_table_handle_for_HANDLER(); - longlong get_auto_increment(); - uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } + void init_table_handle_for_HANDLER(); + ulonglong get_auto_increment(); + int reset_auto_increment(ulonglong value); + + virtual bool get_error_message(int error, String *buf); - static char *get_mysql_bin_log_name(); + uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } + /* + ask handler about permission to cache table during query registration + */ + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *call_back, + ulonglong *engine_data) + { + *call_back= innobase_query_caching_of_table_permitted; + *engine_data= 0; + return innobase_query_caching_of_table_permitted(thd, table_key, + key_length, + engine_data); + } + static char *get_mysql_bin_log_name(); static ulonglong get_mysql_bin_log_pos(); + bool primary_key_is_clustered() { return true; } + int cmp_ref(const byte *ref1, const byte *ref2); }; -extern uint innobase_init_flags, innobase_lock_type; -extern uint innobase_flush_log_at_trx_commit; -extern ulong innobase_cache_size; -extern char *innobase_home, *innobase_tmpdir, *innobase_logdir; -extern long innobase_lock_scan_time; +extern struct show_var_st innodb_status_variables[]; +extern ulong innobase_fast_shutdown; +extern ulong innobase_large_page_size; extern long innobase_mirrored_log_groups, innobase_log_files_in_group; -extern long innobase_log_file_size, innobase_log_buffer_size; -extern long innobase_buffer_pool_size, innobase_additional_mem_pool_size; +extern longlong innobase_buffer_pool_size, innobase_log_file_size; +extern long innobase_log_buffer_size; +extern long innobase_additional_mem_pool_size; extern long innobase_buffer_pool_awe_mem_mb; extern long innobase_file_io_threads, innobase_lock_wait_timeout; -extern long innobase_force_recovery, innobase_thread_concurrency; +extern long innobase_force_recovery; extern long innobase_open_files; extern char *innobase_data_home_dir, *innobase_data_file_path; extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; extern char *innobase_unix_file_flush_method; /* The following variables have to be my_bool for SHOW VARIABLES to work */ extern my_bool innobase_log_archive, - innobase_use_native_aio, innobase_fast_shutdown, + innobase_use_doublewrite, + innobase_use_checksums, + innobase_use_large_pages, + innobase_use_native_aio, innobase_file_per_table, innobase_locks_unsafe_for_binlog, + innobase_rollback_on_timeout, innobase_create_status_file; extern my_bool innobase_very_fast_shutdown; /* set this to 1 just before calling innobase_end() if you want @@ -203,40 +226,111 @@ extern "C" { extern ulong srv_max_buf_pool_modified_pct; extern ulong srv_max_purge_lag; extern ulong srv_auto_extend_increment; +extern ulong srv_n_spin_wait_rounds; +extern ulong srv_n_free_tickets_to_enter; +extern ulong srv_thread_sleep_delay; +extern ulong srv_thread_concurrency; +extern ulong srv_commit_concurrency; +extern ulong srv_flush_log_at_trx_commit; } -extern TYPELIB innobase_lock_typelib; - bool innobase_init(void); bool innobase_end(void); bool innobase_flush_logs(void); uint innobase_get_free_space(void); -int innobase_commit(THD *thd, void* trx_handle); +/* + don't delete it - it may be re-enabled later + as an optimization for the most common case InnoDB+binlog +*/ +#if 0 int innobase_report_binlog_offset_and_commit( THD* thd, void* trx_handle, char* log_file_name, my_off_t end_offset); -int innobase_commit_complete( - void* trx_handle); -int innobase_rollback(THD *thd, void* trx_handle); -int innobase_rollback_to_savepoint( - THD* thd, - char* savepoint_name, - my_off_t* binlog_cache_pos); -int innobase_savepoint( - THD* thd, - char* savepoint_name, - my_off_t binlog_cache_pos); -int innobase_close_connection(THD *thd); +int innobase_commit_complete(void* trx_handle); +void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); +#endif + int innobase_drop_database(char *path); -int innodb_show_status(THD* thd); +bool innodb_show_status(THD* thd); +bool innodb_mutex_show_status(THD* thd); +void innodb_export_status(void); -my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, - uint full_name_len); -void innobase_release_temporary_latches(void* innobase_tid); +void innobase_release_temporary_latches(THD *thd); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); int innobase_start_trx_and_assign_read_view(THD* thd); + +/*********************************************************************** +This function is used to prepare X/Open XA distributed transaction */ + +int innobase_xa_prepare( +/*====================*/ + /* out: 0 or error number */ + THD* thd, /* in: handle to the MySQL thread of the user + whose XA transaction should be prepared */ + bool all); /* in: TRUE - commit transaction + FALSE - the current SQL statement ended */ + +/*********************************************************************** +This function is used to recover X/Open XA distributed transactions */ + +int innobase_xa_recover( +/*====================*/ + /* out: number of prepared transactions + stored in xid_list */ + XID* xid_list, /* in/out: prepared transactions */ + uint len); /* in: number of slots in xid_list */ + +/*********************************************************************** +This function is used to commit one X/Open XA distributed transaction +which is in the prepared state */ + +int innobase_commit_by_xid( +/*=======================*/ + /* out: 0 or error number */ + XID* xid); /* in : X/Open XA Transaction Identification */ + +/*********************************************************************** +This function is used to rollback one X/Open XA distributed transaction +which is in the prepared state */ + +int innobase_rollback_by_xid( + /* out: 0 or error number */ + XID *xid); /* in : X/Open XA Transaction Identification */ + + +/*********************************************************************** +Create a consistent view for a cursor based on current transaction +which is created if the corresponding MySQL thread still lacks one. +This consistent view is then used inside of MySQL when accessing records +using a cursor. */ + +void* +innobase_create_cursor_view(void); +/*=============================*/ + /* out: Pointer to cursor view or NULL */ + +/*********************************************************************** +Close the given consistent cursor view of a transaction and restore +global read view to a transaction read view. Transaction is created if the +corresponding MySQL thread still lacks one. */ + +void +innobase_close_cursor_view( +/*=======================*/ + void* curview); /* in: Consistent read view to be closed */ + +/*********************************************************************** +Set the given consistent cursor view to a transaction which is created +if the corresponding MySQL thread still lacks one. If the given +consistent cursor view is NULL global read view of a transaction is +restored to a transaction read view. */ + +void +innobase_set_cursor_view( +/*=====================*/ + void* curview); /* in: Consistent read view to be set */ diff --git a/sql/ha_isam.cc b/sql/ha_isam.cc deleted file mode 100644 index 34d5440a0f8..00000000000 --- a/sql/ha_isam.cc +++ /dev/null @@ -1,402 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - -#include "mysql_priv.h" -#ifdef HAVE_ISAM -#include <m_ctype.h> -#include <myisampack.h> -#include "ha_isam.h" -#ifndef MASTER -#include "../srclib/isam/isamdef.h" -#else -#include "../isam/isamdef.h" -#endif - -/***************************************************************************** -** isam tables -*****************************************************************************/ - - -const char **ha_isam::bas_ext() const -{ static const char *ext[]= { ".ISM",".ISD", NullS }; return ext; } - -int ha_isam::open(const char *name, int mode, uint test_if_locked) -{ - char name_buff[FN_REFLEN]; - if (!(file=nisam_open(fn_format(name_buff,name,"","",2 | 4), mode, - test_if_locked))) - return (my_errno ? my_errno : -1); - - if (!(test_if_locked == HA_OPEN_WAIT_IF_LOCKED || - test_if_locked == HA_OPEN_ABORT_IF_LOCKED)) - (void) nisam_extra(file,HA_EXTRA_NO_WAIT_LOCK); - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); - if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) - (void) nisam_extra(file,HA_EXTRA_WAIT_LOCK); - if (!table->db_record_offset) - int_table_flags|=HA_REC_NOT_IN_SEQ; - return (0); -} - -int ha_isam::close(void) -{ - return !nisam_close(file) ? 0 : my_errno ? my_errno : -1; -} - -uint ha_isam::min_record_length(uint options) const -{ - return (options & HA_OPTION_PACK_RECORD) ? 1 : 5; -} - - -int ha_isam::write_row(byte * buf) -{ - statistic_increment(ha_write_count,&LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) - table->timestamp_field->set_time(); - if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); - return !nisam_write(file,buf) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::update_row(const byte * old_data, byte * new_data) -{ - statistic_increment(ha_update_count,&LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - return !nisam_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::delete_row(const byte * buf) -{ - statistic_increment(ha_delete_count,&LOCK_status); - return !nisam_delete(file,buf) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - statistic_increment(ha_read_key_count,&LOCK_status); - int error=nisam_rkey(file, buf, active_index, key, key_len, find_flag); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - statistic_increment(ha_read_key_count,&LOCK_status); - int error=nisam_rkey(file, buf, index, key, key_len, find_flag); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_read_last(byte * buf, const byte * key, uint key_len) -{ - statistic_increment(ha_read_key_count,&LOCK_status); - int error=nisam_rkey(file, buf, active_index, key, key_len, - HA_READ_PREFIX_LAST); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_next(byte * buf) -{ - statistic_increment(ha_read_next_count,&LOCK_status); - int error=nisam_rnext(file,buf,active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::index_prev(byte * buf) -{ - statistic_increment(ha_read_prev_count,&LOCK_status); - int error=nisam_rprev(file,buf, active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::index_first(byte * buf) -{ - statistic_increment(ha_read_first_count,&LOCK_status); - int error=nisam_rfirst(file, buf, active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::index_last(byte * buf) -{ - statistic_increment(ha_read_last_count,&LOCK_status); - int error=nisam_rlast(file, buf, active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::rnd_init(bool scan) -{ - return nisam_extra(file,HA_EXTRA_RESET) ? 0 : my_errno ? my_errno : -1;; -} - -int ha_isam::rnd_next(byte *buf) -{ - statistic_increment(ha_read_rnd_next_count,&LOCK_status); - int error=nisam_rrnd(file, buf, NI_POS_ERROR); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::rnd_pos(byte * buf, byte *pos) -{ - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=nisam_rrnd(file, buf, (ulong) ha_get_ptr(pos,ref_length)); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -void ha_isam::position(const byte *record) -{ - my_off_t position=nisam_position(file); - if (position == (my_off_t) ~ (ulong) 0) - position=HA_OFFSET_ERROR; - ha_store_ptr(ref, ref_length, position); -} - -int ha_isam::info(uint flag) -{ - N_ISAMINFO info; - (void) nisam_info(file,&info,flag); - if (flag & HA_STATUS_VARIABLE) - { - records = info.records; - deleted = info.deleted; - data_file_length=info.data_file_length; - index_file_length=info.index_file_length; - delete_length = info.delete_length; - check_time = info.isamchk_time; - mean_rec_length=info.mean_reclength; - } - if (flag & HA_STATUS_CONST) - { - max_data_file_length=info.max_data_file_length; - max_index_file_length=info.max_index_file_length; - create_time = info.create_time; - sortkey = info.sortkey; - block_size=nisam_block_size; - table->keys = min(table->keys,info.keys); - table->keys_in_use.set_prefix(table->keys); - table->db_options_in_use= info.options; - table->db_record_offset= - (table->db_options_in_use & - (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ? 0 : - table->reclength; - if (!table->tmp_table) - { - ulong *rec_per_key=info.rec_per_key; - for (uint i=0 ; i < table->keys ; i++) - { - table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= - *(rec_per_key++); - } - } - ref_length=4; - } - if (flag & HA_STATUS_ERRKEY) - { - errkey = info.errkey; - ha_store_ptr(dupp_ref, ref_length, info.dupp_key_pos); - } - if (flag & HA_STATUS_TIME) - update_time = info.update_time; - return 0; -} - - -int ha_isam::extra(enum ha_extra_function operation) -{ - if ((specialflag & SPECIAL_SAFE_MODE || test_flags & TEST_NO_EXTRA) && - (operation == HA_EXTRA_WRITE_CACHE || - operation == HA_EXTRA_KEYREAD)) - return 0; - return nisam_extra(file,operation); -} - -int ha_isam::external_lock(THD *thd, int lock_type) -{ - if (!table->tmp_table) - return nisam_lock_database(file,lock_type); - return 0; -} - - -THR_LOCK_DATA **ha_isam::store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK) - file->lock.type=lock_type; - *to++= &file->lock; - return to; -} - - -int ha_isam::create(const char *name, register TABLE *form, - HA_CREATE_INFO *create_info) - -{ - uint options=form->db_options_in_use; - int error; - uint i,j,recpos,minpos,fieldpos,temp_length,length; - enum ha_base_keytype type; - char buff[FN_REFLEN]; - KEY *pos; - N_KEYDEF keydef[MAX_KEY]; - N_RECINFO *recinfo,*recinfo_pos; - DBUG_ENTER("ha_isam::create"); - - type=HA_KEYTYPE_BINARY; // Keep compiler happy - if (!(recinfo= (N_RECINFO*) my_malloc((form->fields*2+2)*sizeof(N_RECINFO), - MYF(MY_WME)))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - - pos=form->key_info; - for (i=0; i < form->keys ; i++, pos++) - { - keydef[i].base.flag= (pos->flags & HA_NOSAME); - for (j=0 ; (int7) j < pos->key_parts ; j++) - { - keydef[i].seg[j].base.flag=pos->key_part[j].key_part_flag; - Field *field=pos->key_part[j].field; - type=field->key_type(); - - if ((options & HA_OPTION_PACK_KEYS || - (pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY | - HA_SPACE_PACK_USED))) && - pos->key_part[j].length > 8 && - (type == HA_KEYTYPE_TEXT || - type == HA_KEYTYPE_NUM || - (type == HA_KEYTYPE_BINARY && !field->zero_pack()))) - { - if (j == 0) - keydef[i].base.flag|=HA_PACK_KEY; - if (!(field->flags & ZEROFILL_FLAG) && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING || - ((int) (pos->key_part[j].length - field->decimals())) - >= 4)) - keydef[i].seg[j].base.flag|=HA_SPACE_PACK; - } - keydef[i].seg[j].base.type=(int) type; - keydef[i].seg[j].base.start= pos->key_part[j].offset; - keydef[i].seg[j].base.length= pos->key_part[j].length; - } - keydef[i].seg[j].base.type=(int) HA_KEYTYPE_END; /* End of key-parts */ - } - - recpos=0; recinfo_pos=recinfo; - while (recpos < (uint) form->reclength) - { - Field **field,*found=0; - minpos=form->reclength; length=0; - - for (field=form->field ; *field ; field++) - { - if ((fieldpos=(*field)->offset()) >= recpos && - fieldpos <= minpos) - { - /* skip null fields */ - if (!(temp_length= (*field)->pack_length())) - continue; /* Skip null-fields */ - if (! found || fieldpos < minpos || - (fieldpos == minpos && temp_length < length)) - { - minpos=fieldpos; found= *field; length=temp_length; - } - } - } - DBUG_PRINT("loop",("found: %lx recpos: %d minpos: %d length: %d", - found,recpos,minpos,length)); - if (recpos != minpos) - { // Reserved space (Null bits?) - recinfo_pos->base.type=(int) FIELD_NORMAL; - recinfo_pos++->base.length= (uint16) (minpos-recpos); - } - if (! found) - break; - - if (found->flags & BLOB_FLAG) - { - /* ISAM can only handle blob pointers of sizeof(char(*)) */ - recinfo_pos->base.type= (int) FIELD_BLOB; - if (options & HA_OPTION_LONG_BLOB_PTR) - length= length-portable_sizeof_char_ptr+sizeof(char*); - } - else if (!(options & HA_OPTION_PACK_RECORD)) - recinfo_pos->base.type= (int) FIELD_NORMAL; - else if (found->zero_pack()) - recinfo_pos->base.type= (int) FIELD_SKIP_ZERO; - else - recinfo_pos->base.type= (int) ((length <= 3 || - (found->flags & ZEROFILL_FLAG)) ? - FIELD_NORMAL : - found->type() == FIELD_TYPE_STRING || - found->type() == FIELD_TYPE_VAR_STRING ? - FIELD_SKIP_ENDSPACE : - FIELD_SKIP_PRESPACE); - recinfo_pos++ ->base.length=(uint16) length; - recpos=minpos+length; - DBUG_PRINT("loop",("length: %d type: %d", - recinfo_pos[-1].base.length,recinfo_pos[-1].base.type)); - - if ((found->flags & BLOB_FLAG) && (options & HA_OPTION_LONG_BLOB_PTR) && - sizeof(char*) != portable_sizeof_char_ptr) - { // Not used space - recinfo_pos->base.type=(int) FIELD_ZERO; - recinfo_pos++->base.length= - (uint16) (portable_sizeof_char_ptr-sizeof(char*)); - recpos+= (portable_sizeof_char_ptr-sizeof(char*)); - } - } - recinfo_pos->base.type= (int) FIELD_LAST; /* End of fieldinfo */ - error=nisam_create(fn_format(buff,name,"","",2+4+16),form->keys,keydef, - recinfo,(ulong) form->max_rows, (ulong) form->min_rows, - 0, 0, 0L); - my_free((gptr) recinfo,MYF(0)); - DBUG_RETURN(error); - -} - -static key_range no_range= { (byte*) 0, 0, HA_READ_KEY_EXACT }; - -ha_rows ha_isam::records_in_range(uint inx, key_range *min_key, - key_range *max_key) -{ - /* ISAM checks if 'key' pointer <> 0 to know if there is no range */ - if (!min_key) - min_key= &no_range; - if (!max_key) - max_key= &no_range; - return (ha_rows) nisam_records_in_range(file, - (int) inx, - min_key->key, min_key->length, - min_key->flag, - max_key->key, max_key->length, - max_key->flag); -} -#endif /* HAVE_ISAM */ diff --git a/sql/ha_isam.h b/sql/ha_isam.h deleted file mode 100644 index b94377c8e3d..00000000000 --- a/sql/ha_isam.h +++ /dev/null @@ -1,79 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif - -/* class for the the myisam handler */ - -#include <nisam.h> - -class ha_isam: public handler -{ - N_INFO *file; - /* We need this as table_flags() may change after open() */ - ulong int_table_flags; - - public: - ha_isam(TABLE *table) - :handler(table), file(0), - int_table_flags(HA_READ_RND_SAME | - HA_DUPP_POS | HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED) - {} - ~ha_isam() {} - ulong index_flags(uint idx, uint part, bool all_parts) const - { return HA_READ_NEXT; } // but no HA_READ_PREV here!!! - const char *table_type() const { return "ISAM"; } - const char *index_type(uint key_number) { return "BTREE"; } - const char **bas_ext() const; - ulong table_flags() const { return int_table_flags; } - uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_supported_keys() const { return N_MAXKEY; } - uint max_supported_key_parts() const { return N_MAXKEY_SEG; } - uint max_supported_key_length() const { return N_MAX_KEY_LENGTH; } - uint min_record_length(uint options) const; - bool low_byte_first() const { return 0; } - - int open(const char *name, int mode, uint test_if_locked); - int close(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - void position(const byte *record); - int info(uint); - int extra(enum ha_extra_function operation); - int external_lock(THD *thd, int lock_type); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); - - int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); - THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type); -}; - diff --git a/sql/ha_isammrg.cc b/sql/ha_isammrg.cc deleted file mode 100644 index 8a03e09bcec..00000000000 --- a/sql/ha_isammrg.cc +++ /dev/null @@ -1,210 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - -#include "mysql_priv.h" -#ifdef HAVE_ISAM -#include <m_ctype.h> -#ifndef MASTER -#include "../srclib/merge/mrg_def.h" -#else -#include "../merge/mrg_def.h" -#endif -#include "ha_isammrg.h" - -/***************************************************************************** -** ISAM MERGE tables -*****************************************************************************/ - -const char **ha_isammrg::bas_ext() const -{ static const char *ext[]= { ".MRG", NullS }; return ext; } - -int ha_isammrg::open(const char *name, int mode, uint test_if_locked) -{ - char name_buff[FN_REFLEN]; - if (!(file=mrg_open(fn_format(name_buff,name,"","",2 | 4), mode, - test_if_locked))) - return (my_errno ? my_errno : -1); - - if (!(test_if_locked == HA_OPEN_WAIT_IF_LOCKED || - test_if_locked == HA_OPEN_ABORT_IF_LOCKED)) - mrg_extra(file,HA_EXTRA_NO_WAIT_LOCK); - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); - if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) - mrg_extra(file,HA_EXTRA_WAIT_LOCK); - if (table->reclength != mean_rec_length) - { - DBUG_PRINT("error",("reclength: %d mean_rec_length: %d", - table->reclength, mean_rec_length)); - mrg_close(file); - file=0; - return ER_WRONG_MRG_TABLE; - } - return (0); -} - -int ha_isammrg::close(void) -{ - return !mrg_close(file) ? 0 : my_errno ? my_errno : -1; -} - -uint ha_isammrg::min_record_length(uint options) const -{ - return (options & HA_OPTION_PACK_RECORD) ? 1 : 5; -} - -int ha_isammrg::write_row(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::update_row(const byte * old_data, byte * new_data) -{ - statistic_increment(ha_update_count,&LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - return !mrg_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::delete_row(const byte * buf) -{ - statistic_increment(ha_delete_count,&LOCK_status); - return !mrg_delete(file,buf) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_next(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_prev(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_first(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_last(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::rnd_init(bool scan) -{ - return !mrg_extra(file,HA_EXTRA_RESET) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::rnd_next(byte *buf) -{ - statistic_increment(ha_read_rnd_next_count,&LOCK_status); - int error=mrg_rrnd(file, buf, ~(mrg_off_t) 0); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::rnd_pos(byte * buf, byte *pos) -{ - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=mrg_rrnd(file, buf, (ulong) ha_get_ptr(pos,ref_length)); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -void ha_isammrg::position(const byte *record) -{ - ulong position= mrg_position(file); - ha_store_ptr(ref, ref_length, (my_off_t) position); -} - - -int ha_isammrg::info(uint flag) -{ - MERGE_INFO info; - (void) mrg_info(file,&info,flag); - records = (ha_rows) info.records; - deleted = (ha_rows) info.deleted; - data_file_length=info.data_file_length; - errkey = info.errkey; - table->keys_in_use.clear_all(); // No keys yet - table->db_options_in_use = info.options; - mean_rec_length=info.reclength; - block_size=0; - update_time=0; - ref_length=4; // Should be big enough - return 0; -} - - -int ha_isammrg::extra(enum ha_extra_function operation) -{ - return !mrg_extra(file,operation) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::external_lock(THD *thd, int lock_type) -{ - return !mrg_lock_database(file,lock_type) ? 0 : my_errno ? my_errno : -1; -} - -uint ha_isammrg::lock_count(void) const -{ - return file->tables; -} - -THR_LOCK_DATA **ha_isammrg::store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - MRG_TABLE *open_table; - - for (open_table=file->open_tables ; - open_table != file->end_table ; - open_table++) - { - *(to++)= &open_table->table->lock; - if (lock_type != TL_IGNORE && open_table->table->lock.type == TL_UNLOCK) - open_table->table->lock.type=lock_type; - } - return to; -} - - -int ha_isammrg::create(const char *name, register TABLE *form, - HA_CREATE_INFO *create_info) - -{ - char buff[FN_REFLEN]; - return mrg_create(fn_format(buff,name,"","",2+4+16),0); -} -#endif /* HAVE_ISAM */ diff --git a/sql/ha_isammrg.h b/sql/ha_isammrg.h deleted file mode 100644 index a0fcee8f5a6..00000000000 --- a/sql/ha_isammrg.h +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef USE_PRAGMA_INTERFACE -#pragma interface /* gcc class implementation */ -#endif - -/* class for the the myisam merge handler */ - -#include <merge.h> - -class ha_isammrg: public handler -{ - MRG_INFO *file; - - public: - ha_isammrg(TABLE *table): handler(table), file(0) {} - ~ha_isammrg() {} - const char *table_type() const { return "MRG_ISAM"; } - const char **bas_ext() const; - ulong table_flags() const { return (HA_READ_RND_SAME | - HA_REC_NOT_IN_SEQ | HA_FILE_BASED); } - ulong index_flags(uint idx, uint part, bool all_parts) const - { DBUG_ASSERT(0); return 0; } - - uint max_supported_keys() const { return 0; } - bool low_byte_first() const { return 0; } - uint min_record_length(uint options) const; - - int open(const char *name, int mode, uint test_if_locked); - int close(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint indx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - void position(const byte *record); - int info(uint); - int extra(enum ha_extra_function operation); - int external_lock(THD *thd, int lock_type); - uint lock_count(void) const; - int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); - THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type); - uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } -}; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index c6802ffe53c..10d3dbb2ec5 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -50,6 +49,36 @@ TYPELIB myisam_stats_method_typelib= { ** MyISAM tables *****************************************************************************/ +/* MyISAM handlerton */ + +handlerton myisam_hton= { + "MyISAM", + SHOW_OPTION_YES, + "Default engine as of MySQL 3.23 with great performance", + DB_TYPE_MYISAM, + NULL, + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* release savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + /* + MyISAM doesn't support transactions and doesn't have + transaction-dependent context: cursors can survive a commit. + */ + HTON_CAN_RECREATE +}; + // collect errors printed by mi_check routines static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, @@ -116,25 +145,25 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, MI_COLUMNDEF **recinfo_out, uint *records_out) { uint i, j, recpos, minpos, fieldpos, temp_length, length; - uint options= table_arg->db_options_in_use; enum ha_base_keytype type= HA_KEYTYPE_BINARY; KEY *pos; MI_KEYDEF *keydef; MI_COLUMNDEF *recinfo, *recinfo_pos; HA_KEYSEG *keyseg; - + TABLE_SHARE *share= table_arg->s; + uint options= share->db_options_in_use; DBUG_ENTER("table2myisam"); if (!(my_multi_malloc(MYF(MY_WME), - recinfo_out, (table_arg->fields * 2 + 2) * sizeof(MI_COLUMNDEF), - keydef_out, table_arg->keys * sizeof(MI_KEYDEF), + recinfo_out, (share->fields * 2 + 2) * sizeof(MI_COLUMNDEF), + keydef_out, share->keys * sizeof(MI_KEYDEF), &keyseg, - (table_arg->key_parts + table_arg->keys) * sizeof(HA_KEYSEG), + (share->key_parts + share->keys) * sizeof(HA_KEYSEG), NullS))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */ keydef= *keydef_out; recinfo= *recinfo_out; pos= table_arg->key_info; - for (i= 0; i < table_arg->keys; i++, pos++) + for (i= 0; i < share->keys; i++, pos++) { keydef[i].flag= (pos->flags & (HA_NOSAME | HA_FULLTEXT | HA_SPATIAL)); keydef[i].key_alg= pos->algorithm == HA_KEY_ALG_UNDEF ? @@ -144,9 +173,9 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, keydef[i].keysegs= pos->key_parts; for (j= 0; j < pos->key_parts; j++) { - keydef[i].seg[j].flag= pos->key_part[j].key_part_flag; Field *field= pos->key_part[j].field; type= field->key_type(); + keydef[i].seg[j].flag= pos->key_part[j].key_part_flag; if (options & HA_OPTION_PACK_KEYS || (pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY | @@ -161,8 +190,8 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, if (j == 0) keydef[i].flag|= HA_PACK_KEY; if (!(field->flags & ZEROFILL_FLAG) && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING || + (field->type() == MYSQL_TYPE_STRING || + field->type() == MYSQL_TYPE_VAR_STRING || ((int) (pos->key_part[j].length - field->decimals())) >= 4)) keydef[i].seg[j].flag|= HA_SPACE_PACK; } @@ -172,7 +201,9 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, keydef[i].seg[j].type= (int) type; keydef[i].seg[j].start= pos->key_part[j].offset; keydef[i].seg[j].length= pos->key_part[j].length; - keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end= 0; + keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end= + keydef[i].seg[j].bit_length= 0; + keydef[i].seg[j].bit_pos= 0; keydef[i].seg[j].language= field->charset()->number; if (field->null_ptr) @@ -192,19 +223,26 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, keydef[i].seg[j].flag|= HA_BLOB_PART; /* save number of bytes used to pack length */ keydef[i].seg[j].bit_start= (uint) (field->pack_length() - - table_arg->blob_ptr_size); + share->blob_ptr_size); + } + else if (field->type() == FIELD_TYPE_BIT) + { + keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len; + keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs; + keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr - + (uchar*) table_arg->record[0]); } } keyseg+= pos->key_parts; } if (table_arg->found_next_number_field) - keydef[table_arg->next_number_index].flag|= HA_AUTO_KEY; + keydef[share->next_number_index].flag|= HA_AUTO_KEY; recpos= 0; recinfo_pos= recinfo; - while (recpos < (uint) table_arg->reclength) + while (recpos < (uint) share->reclength) { Field **field, *found= 0; - minpos= table_arg->reclength; + minpos= share->reclength; length= 0; for (field= table_arg->field; *field; field++) @@ -213,7 +251,7 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, fieldpos <= minpos) { /* skip null fields */ - if (!(temp_length= (*field)->pack_length())) + if (!(temp_length= (*field)->pack_length_in_rec())) continue; /* Skip null-fields */ if (! found || fieldpos < minpos || (fieldpos == minpos && temp_length < length)) @@ -224,7 +262,7 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, } } } - DBUG_PRINT("loop", ("found: %lx recpos: %d minpos: %d length: %d", + DBUG_PRINT("loop", ("found: 0x%lx recpos: %d minpos: %d length: %d", (long) found, recpos, minpos, length)); if (recpos != minpos) { // Reserved space (Null bits?) @@ -236,9 +274,9 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, break; if (found->flags & BLOB_FLAG) - { recinfo_pos->type= (int) FIELD_BLOB; - } + else if (found->type() == MYSQL_TYPE_VARCHAR) + recinfo_pos->type= FIELD_VARCHAR; else if (!(options & HA_OPTION_PACK_RECORD)) recinfo_pos->type= (int) FIELD_NORMAL; else if (found->zero_pack()) @@ -247,8 +285,8 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, recinfo_pos->type= (int) ((length <= 3 || (found->flags & ZEROFILL_FLAG)) ? FIELD_NORMAL : - found->type() == FIELD_TYPE_STRING || - found->type() == FIELD_TYPE_VAR_STRING ? + found->type() == MYSQL_TYPE_STRING || + found->type() == MYSQL_TYPE_VAR_STRING ? FIELD_SKIP_ENDSPACE : FIELD_SKIP_PRESPACE); if (found->null_ptr) @@ -418,9 +456,10 @@ int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo, extern "C" { -volatile my_bool *killed_ptr(MI_CHECK *param) +volatile int *killed_ptr(MI_CHECK *param) { - return &(((THD *)(param->thd))->killed); + /* In theory Unsafe conversion, but should be ok for now */ + return (int*) &(((THD *)(param->thd))->killed); } void mi_check_print_error(MI_CHECK *param, const char *fmt,...) @@ -453,8 +492,35 @@ void mi_check_print_warning(MI_CHECK *param, const char *fmt,...) } + +ha_myisam::ha_myisam(TABLE *table_arg) + :handler(&myisam_hton, table_arg), file(0), + int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | + HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | + HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME | + HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD | HA_CAN_RTREEKEYS), + can_enable_indexes(1) +{} + +handler *ha_myisam::clone(MEM_ROOT *mem_root) +{ + ha_myisam *new_handler= static_cast <ha_myisam *>(handler::clone(mem_root)); + if (new_handler) + new_handler->file->state= file->state; + return new_handler; +} + + +static const char *ha_myisam_exts[] = { + ".MYI", + ".MYD", + NullS +}; + const char **ha_myisam::bas_ext() const -{ static const char *ext[]= { ".MYI",".MYD", NullS }; return ext; } +{ + return ha_myisam_exts; +} const char *ha_myisam::index_type(uint key_number) @@ -541,7 +607,8 @@ int ha_myisam::dump(THD* thd, int fd) if (fd < 0) { - my_net_write(net, "", 0); + if (my_net_write(net, "", 0)) + error = errno ? errno : EPIPE; net_flush(net); } @@ -563,7 +630,7 @@ int ha_myisam::open(const char *name, int mode, uint test_if_locked) info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) VOID(mi_extra(file, HA_EXTRA_WAIT_LOCK, 0)); - if (!table->db_record_offset) + if (!table->s->db_record_offset) int_table_flags|=HA_REC_NOT_IN_SEQ; if (file->s->options & (HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)) int_table_flags|=HA_HAS_CHECKSUM; @@ -579,7 +646,7 @@ int ha_myisam::close(void) int ha_myisam::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); /* If we have a timestamp column, update it to the current time */ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) @@ -590,7 +657,11 @@ int ha_myisam::write_row(byte * buf) or a new row, then update the auto_increment value in the record. */ if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); + { + int error; + if ((error= update_auto_increment())) + return error; + } return mi_write(file,buf); } @@ -605,9 +676,9 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt) thd->proc_info="Checking table"; myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*)"check"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; + param.op_name = "check"; + param.db_name= table->s->db; + param.table_name= table->alias; param.testflag = check_opt->flags | T_CHECK | T_SILENT; param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method; @@ -640,12 +711,14 @@ int ha_myisam::check(THD* thd, HA_CHECK_OPT* check_opt) { uint old_testflag=param.testflag; param.testflag|=T_MEDIUM; - init_io_cache(¶m.read_cache, file->dfile, - my_default_record_cache_size, READ_CACHE, - share->pack.header_length, 1, MYF(MY_WME)); - error |= chk_data_link(¶m, file, param.testflag & T_EXTEND); - end_io_cache(&(param.read_cache)); - param.testflag=old_testflag; + if (!(error= init_io_cache(¶m.read_cache, file->dfile, + my_default_record_cache_size, READ_CACHE, + share->pack.header_length, 1, MYF(MY_WME)))) + { + error= chk_data_link(¶m, file, param.testflag & T_EXTEND); + end_io_cache(&(param.read_cache)); + } + param.testflag= old_testflag; } } if (!error) @@ -693,11 +766,11 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*) "analyze"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; - param.testflag=(T_FAST | T_CHECK | T_SILENT | T_STATISTICS | - T_DONT_CHECK_CHECKSUM); + param.op_name= "analyze"; + param.db_name= table->s->db; + param.table_name= table->alias; + param.testflag= (T_FAST | T_CHECK | T_SILENT | T_STATISTICS | + T_DONT_CHECK_CHECKSUM); param.using_global_keycache = 1; param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method; @@ -720,9 +793,9 @@ int ha_myisam::analyze(THD *thd, HA_CHECK_OPT* check_opt) int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) { HA_CHECK_OPT tmp_check_opt; - char* backup_dir= thd->lex->backup_dir; + char *backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; - char* table_name = table->real_name; + const char *table_name= table->s->table_name; int error; const char* errmsg; DBUG_ENTER("restore"); @@ -731,11 +804,11 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) MI_NAME_DEXT)) DBUG_RETURN(HA_ADMIN_INVALID); - if (my_copy(src_path, fn_format(dst_path, table->path, "", + if (my_copy(src_path, fn_format(dst_path, table->s->path, "", MI_NAME_DEXT, 4), MYF(MY_WME))) { - error = HA_ADMIN_FAILED; - errmsg = "Failed in my_copy (Error %d)"; + error= HA_ADMIN_FAILED; + errmsg= "Failed in my_copy (Error %d)"; goto err; } @@ -747,11 +820,11 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) { MI_CHECK param; myisamchk_init(¶m); - param.thd = thd; - param.op_name = (char*)"restore"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; - param.testflag = 0; + param.thd= thd; + param.op_name= "restore"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; + param.testflag= 0; mi_check_print_error(¶m, errmsg, my_errno); DBUG_RETURN(error); } @@ -760,9 +833,9 @@ int ha_myisam::restore(THD* thd, HA_CHECK_OPT *check_opt) int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) { - char* backup_dir= thd->lex->backup_dir; + char *backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; - char* table_name = table->real_name; + const char *table_name= table->s->table_name; int error; const char *errmsg; DBUG_ENTER("ha_myisam::backup"); @@ -770,12 +843,13 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) if (fn_format_relative_to_data_home(dst_path, table_name, backup_dir, reg_ext)) { - errmsg = "Failed in fn_format() for .frm file (errno: %d)"; - error = HA_ADMIN_INVALID; + errmsg= "Failed in fn_format() for .frm file (errno: %d)"; + error= HA_ADMIN_INVALID; goto err; } - if (my_copy(fn_format(src_path, table->path,"", reg_ext, MY_UNPACK_FILENAME), + if (my_copy(fn_format(src_path, table->s->path, "", reg_ext, + MY_UNPACK_FILENAME), dst_path, MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE))) { @@ -793,7 +867,7 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) goto err; } - if (my_copy(fn_format(src_path, table->path,"", MI_NAME_DEXT, + if (my_copy(fn_format(src_path, table->s->path, "", MI_NAME_DEXT, MY_UNPACK_FILENAME), dst_path, MYF(MY_WME | MY_HOLD_ORIGINAL_MODES | MY_DONT_OVERWRITE_FILE))) @@ -808,11 +882,11 @@ int ha_myisam::backup(THD* thd, HA_CHECK_OPT *check_opt) { MI_CHECK param; myisamchk_init(¶m); - param.thd = thd; - param.op_name = (char*)"backup"; - param.db_name = table->table_cache_key; - param.table_name = table->table_name; - param.testflag = 0; + param.thd= thd; + param.op_name= "backup"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; + param.testflag = 0; mi_check_print_error(¶m,errmsg, my_errno); DBUG_RETURN(error); } @@ -829,10 +903,10 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*) "repair"; - param.testflag = ((check_opt->flags & ~(T_EXTEND)) | - T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM | - (check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT)); + param.op_name= "repair"; + param.testflag= ((check_opt->flags & ~(T_EXTEND)) | + T_SILENT | T_FORCE_CREATE | T_CALC_CHECKSUM | + (check_opt->flags & T_EXTEND ? T_REP : T_REP_BY_SORT)); param.sort_buffer_length= check_opt->sort_buffer_size; start_records=file->state->records; while ((error=repair(thd,param,0)) && param.retry_repair) @@ -843,7 +917,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag&= ~T_RETRY_WITHOUT_QUICK; sql_print_information("Retrying repair of: '%s' without quick", - table->path); + table->s->path); continue; } param.testflag&= ~T_QUICK; @@ -851,7 +925,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) { param.testflag= (param.testflag & ~T_REP_BY_SORT) | T_REP; sql_print_information("Retrying repair of: '%s' with keycache", - table->path); + table->s->path); continue; } break; @@ -863,7 +937,7 @@ int ha_myisam::repair(THD* thd, HA_CHECK_OPT *check_opt) sql_print_information("Found %s of %s rows when repairing '%s'", llstr(file->state->records, llbuff), llstr(start_records, llbuff2), - table->path); + table->s->path); } return error; } @@ -876,9 +950,9 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt) myisamchk_init(¶m); param.thd = thd; - param.op_name = (char*) "optimize"; - param.testflag = (check_opt->flags | T_SILENT | T_FORCE_CREATE | - T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX); + param.op_name= "optimize"; + param.testflag= (check_opt->flags | T_SILENT | T_FORCE_CREATE | + T_REP_BY_SORT | T_STATISTICS | T_SORT_INDEX); param.sort_buffer_length= check_opt->sort_buffer_size; if ((error= repair(thd,param,1)) && param.retry_repair) { @@ -891,41 +965,41 @@ int ha_myisam::optimize(THD* thd, HA_CHECK_OPT *check_opt) } -int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) +int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool do_optimize) { int error=0; uint local_testflag=param.testflag; - bool optimize_done= !optimize, statistics_done=0; + bool optimize_done= !do_optimize, statistics_done=0; const char *old_proc_info=thd->proc_info; char fixed_name[FN_REFLEN]; MYISAM_SHARE* share = file->s; ha_rows rows= file->state->records; DBUG_ENTER("ha_myisam::repair"); - param.db_name = table->table_cache_key; - param.table_name = table->table_name; + param.db_name= table->s->db; + param.table_name= table->alias; param.tmpfile_createflag = O_RDWR | O_TRUNC; param.using_global_keycache = 1; - param.thd=thd; - param.tmpdir=&mysql_tmpdir_list; - param.out_flag=0; + param.thd= thd; + param.tmpdir= &mysql_tmpdir_list; + param.out_flag= 0; strmov(fixed_name,file->filename); // Don't lock tables if we have used LOCK TABLE if (!thd->locked_tables && - mi_lock_database(file, table->tmp_table ? F_EXTRA_LCK : F_WRLCK)) + mi_lock_database(file, table->s->tmp_table ? F_EXTRA_LCK : F_WRLCK)) { mi_check_print_error(¶m,ER(ER_CANT_LOCK),my_errno); DBUG_RETURN(HA_ADMIN_FAILED); } - if (!optimize || + if (!do_optimize || ((file->state->del || share->state.split != file->state->records) && (!(param.testflag & T_QUICK) || !(share->state.changed & STATE_NOT_OPTIMIZED_KEYS)))) { ulonglong key_map= ((local_testflag & T_CREATE_MISSING_KEYS) ? - ((ulonglong) 1L << share->base.keys)-1 : + mi_get_mask_all_keys_active(share->base.keys) : share->state.key_map); uint testflag=param.testflag; if (mi_test_if_sort_rep(file,file->state->records,key_map,0) && @@ -934,7 +1008,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) local_testflag|= T_STATISTICS; param.testflag|= T_STATISTICS; // We get this for free statistics_done=1; - if (current_thd->variables.myisam_repair_threads>1) + if (thd->variables.myisam_repair_threads>1) { char buf[40]; /* TODO: respect myisam_repair_threads variable */ @@ -1058,7 +1132,7 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) if ((error= mi_assign_to_key_cache(file, map, new_key_cache))) { - char buf[80]; + char buf[STRING_BUFFER_USUAL_SIZE]; my_snprintf(buf, sizeof(buf), "Failed to flush to index file (errno: %d)", error); errmsg= buf; @@ -1072,9 +1146,9 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) MI_CHECK param; myisamchk_init(¶m); param.thd= thd; - param.op_name= (char*)"assign_to_keycache"; - param.db_name= table->table_cache_key; - param.table_name= table->table_name; + param.op_name= "assign_to_keycache"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; param.testflag= 0; mi_check_print_error(¶m, errmsg); } @@ -1140,10 +1214,10 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) MI_CHECK param; myisamchk_init(¶m); param.thd= thd; - param.op_name= (char*)"preload_keys"; - param.db_name= table->table_cache_key; - param.table_name= table->table_name; - param.testflag= 0; + param.op_name= "preload_keys"; + param.db_name= table->s->db; + param.table_name= table->s->table_name; + param.testflag= 0; mi_check_print_error(¶m, errmsg); DBUG_RETURN(error); } @@ -1226,7 +1300,7 @@ int ha_myisam::enable_indexes(uint mode) { int error; - if (file->s->state.key_map == set_bits(ulonglong, file->s->base.keys)) + if (mi_is_all_keys_active(file->s->state.key_map, file->s->base.keys)) { /* All indexes are enabled already. */ return 0; @@ -1248,9 +1322,9 @@ int ha_myisam::enable_indexes(uint mode) const char *save_proc_info=thd->proc_info; thd->proc_info="Creating index"; myisamchk_init(¶m); - param.op_name = (char*) "recreating_index"; - param.testflag = (T_SILENT | T_REP_BY_SORT | T_QUICK | - T_CREATE_MISSING_KEYS); + param.op_name= "recreating_index"; + param.testflag= (T_SILENT | T_REP_BY_SORT | T_QUICK | + T_CREATE_MISSING_KEYS); param.myf_rw&= ~MY_WAIT_IF_FULL; param.sort_buffer_length= thd->variables.myisam_sort_buff_size; param.stats_method= (enum_mi_stats_method)thd->variables.myisam_stats_method; @@ -1259,8 +1333,16 @@ int ha_myisam::enable_indexes(uint mode) { sql_print_warning("Warning: Enabling keys got errno %d, retrying", my_errno); + /* Repairing by sort failed. Now try standard repair method. */ param.testflag&= ~(T_REP_BY_SORT | T_QUICK); error= (repair(thd,param,0) != HA_ADMIN_OK); + /* + If the standard repair succeeded, clear all error messages which + might have been set by the first repair. They can still be seen + with SHOW WARNINGS then. + */ + if (! error) + thd->clear_error(); } info(HA_STATUS_CONST); thd->proc_info=save_proc_info; @@ -1313,8 +1395,9 @@ int ha_myisam::indexes_are_disabled(void) void ha_myisam::start_bulk_insert(ha_rows rows) { DBUG_ENTER("ha_myisam::start_bulk_insert"); - THD *thd=current_thd; - ulong size= min(thd->variables.read_buff_size, table->avg_row_length*rows); + THD *thd= current_thd; + ulong size= min(thd->variables.read_buff_size, + table->s->avg_row_length*rows); DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu", (ulong) rows, size)); @@ -1322,8 +1405,8 @@ void ha_myisam::start_bulk_insert(ha_rows rows) if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE)) mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size); - can_enable_indexes= (file->s->state.key_map == - set_bits(ulonglong, file->s->base.keys)); + can_enable_indexes= mi_is_all_keys_active(file->s->state.key_map, + file->s->base.keys); if (!(specialflag & SPECIAL_SAFE_MODE)) { @@ -1382,18 +1465,18 @@ bool ha_myisam::check_and_repair(THD *thd) // Don't use quick if deleted rows if (!file->state->del && (myisam_recover_options & HA_RECOVER_QUICK)) check_opt.flags|=T_QUICK; - sql_print_warning("Checking table: '%s'",table->path); + sql_print_warning("Checking table: '%s'",table->s->path); old_query= thd->query; old_query_length= thd->query_length; pthread_mutex_lock(&LOCK_thread_count); - thd->query= table->real_name; - thd->query_length= strlen(table->real_name); + thd->query= (char*) table->s->table_name; + thd->query_length= (uint32) strlen(table->s->table_name); pthread_mutex_unlock(&LOCK_thread_count); if ((marked_crashed= mi_is_crashed(file)) || check(thd, &check_opt)) { - sql_print_warning("Recovering table: '%s'",table->path); + sql_print_warning("Recovering table: '%s'",table->s->path); check_opt.flags= ((myisam_recover_options & HA_RECOVER_BACKUP ? T_BACKUP_DATA : 0) | (marked_crashed ? 0 : T_QUICK) | @@ -1417,7 +1500,7 @@ bool ha_myisam::is_crashed() const int ha_myisam::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return mi_update(file,old_data,new_data); @@ -1425,7 +1508,7 @@ int ha_myisam::update_row(const byte * old_data, byte * new_data) int ha_myisam::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); return mi_delete(file,buf); } @@ -1433,7 +1516,8 @@ int ha_myisam::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1442,7 +1526,8 @@ int ha_myisam::index_read(byte * buf, const byte * key, int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=mi_rkey(file,buf,index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1451,7 +1536,8 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1460,7 +1546,8 @@ int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) int ha_myisam::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=mi_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1469,7 +1556,8 @@ int ha_myisam::index_next(byte * buf) int ha_myisam::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); int error=mi_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1478,7 +1566,8 @@ int ha_myisam::index_prev(byte * buf) int ha_myisam::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); int error=mi_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1487,7 +1576,8 @@ int ha_myisam::index_first(byte * buf) int ha_myisam::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); int error=mi_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1498,7 +1588,8 @@ int ha_myisam::index_next_same(byte * buf, uint length __attribute__((unused))) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=mi_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1514,7 +1605,8 @@ int ha_myisam::rnd_init(bool scan) int ha_myisam::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=mi_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1527,55 +1619,56 @@ int ha_myisam::restart_rnd_next(byte *buf, byte *pos) int ha_myisam::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=mi_rrnd(file, buf, ha_get_ptr(pos,ref_length)); + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); + int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; } void ha_myisam::position(const byte* record) { - my_off_t position=mi_position(file); - ha_store_ptr(ref, ref_length, position); + my_off_t row_position= mi_position(file); + my_store_ptr(ref, ref_length, row_position); } int ha_myisam::info(uint flag) { - MI_ISAMINFO info; + MI_ISAMINFO misam_info; char name_buff[FN_REFLEN]; - (void) mi_status(file,&info,flag); + (void) mi_status(file,&misam_info,flag); if (flag & HA_STATUS_VARIABLE) { - records = info.records; - deleted = info.deleted; - data_file_length=info.data_file_length; - index_file_length=info.index_file_length; - delete_length = info.delete_length; - check_time = info.check_time; - mean_rec_length=info.mean_reclength; + records= misam_info.records; + deleted= misam_info.deleted; + data_file_length= misam_info.data_file_length; + index_file_length= misam_info.index_file_length; + delete_length= misam_info.delete_length; + check_time= misam_info.check_time; + mean_rec_length= misam_info.mean_reclength; } if (flag & HA_STATUS_CONST) { - max_data_file_length=info.max_data_file_length; - max_index_file_length=info.max_index_file_length; - create_time = info.create_time; - sortkey = info.sortkey; - ref_length=info.reflength; - table->db_options_in_use = info.options; - block_size=myisam_block_size; - table->keys_in_use.set_prefix(table->keys); - table->keys_in_use.intersect(info.key_map); - table->keys_for_keyread= table->keys_in_use; - table->keys_for_keyread.subtract(table->read_only_keys); - table->db_record_offset=info.record_offset; - if (table->key_parts) + TABLE_SHARE *share= table->s; + max_data_file_length= misam_info.max_data_file_length; + max_index_file_length= misam_info.max_index_file_length; + create_time= misam_info.create_time; + sortkey= misam_info.sortkey; + ref_length= misam_info.reflength; + share->db_options_in_use= misam_info.options; + block_size= myisam_block_size; + share->keys_in_use.set_prefix(share->keys); + share->keys_in_use.intersect_extended(misam_info.key_map); + share->keys_for_keyread.intersect(share->keys_in_use); + share->db_record_offset= misam_info.record_offset; + if (share->key_parts) memcpy((char*) table->key_info[0].rec_per_key, - (char*) info.rec_per_key, - sizeof(table->key_info[0].rec_per_key)*table->key_parts); - raid_type=info.raid_type; - raid_chunks=info.raid_chunks; - raid_chunksize=info.raid_chunksize; + (char*) misam_info.rec_per_key, + sizeof(table->key_info[0].rec_per_key)*share->key_parts); + raid_type= misam_info.raid_type; + raid_chunks= misam_info.raid_chunks; + raid_chunksize= misam_info.raid_chunksize; /* Set data_file_name and index_file_name to point at the symlink value @@ -1583,21 +1676,21 @@ int ha_myisam::info(uint flag) */ data_file_name=index_file_name=0; fn_format(name_buff, file->filename, "", MI_NAME_DEXT, 2); - if (strcmp(name_buff, info.data_file_name)) - data_file_name=info.data_file_name; + if (strcmp(name_buff, misam_info.data_file_name)) + data_file_name= misam_info.data_file_name; strmov(fn_ext(name_buff),MI_NAME_IEXT); - if (strcmp(name_buff, info.index_file_name)) - index_file_name=info.index_file_name; + if (strcmp(name_buff, misam_info.index_file_name)) + index_file_name= misam_info.index_file_name; } if (flag & HA_STATUS_ERRKEY) { - errkey = info.errkey; - ha_store_ptr(dupp_ref, ref_length, info.dupp_key_pos); + errkey = misam_info.errkey; + my_store_ptr(dupp_ref, ref_length, misam_info.dupp_key_pos); } if (flag & HA_STATUS_TIME) - update_time = info.update_time; + update_time = misam_info.update_time; if (flag & HA_STATUS_AUTO) - auto_increment_value= info.auto_increment; + auto_increment_value= misam_info.auto_increment; return 0; } @@ -1633,7 +1726,7 @@ int ha_myisam::delete_table(const char *name) int ha_myisam::external_lock(THD *thd, int lock_type) { - return mi_lock_database(file, !table->tmp_table ? + return mi_lock_database(file, !table->s->tmp_table ? lock_type : ((lock_type == F_UNLCK) ? F_UNLCK : F_EXTRA_LCK)); } @@ -1667,35 +1760,39 @@ void ha_myisam::update_create_info(HA_CREATE_INFO *create_info) int ha_myisam::create(const char *name, register TABLE *table_arg, - HA_CREATE_INFO *info) + HA_CREATE_INFO *ha_create_info) { int error; - uint create_flags= 0, options= table_arg->db_options_in_use, records; + uint create_flags= 0, records; char buff[FN_REFLEN]; MI_KEYDEF *keydef; MI_COLUMNDEF *recinfo; MI_CREATE_INFO create_info; + TABLE_SHARE *share= table->s; + uint options= share->db_options_in_use; DBUG_ENTER("ha_myisam::create"); if ((error= table2myisam(table_arg, &keydef, &recinfo, &records))) DBUG_RETURN(error); /* purecov: inspected */ bzero((char*) &create_info, sizeof(create_info)); - create_info.max_rows= table_arg->max_rows; - create_info.reloc_rows= table_arg->min_rows; - create_info.with_auto_increment= table_arg->next_number_key_offset == 0; - create_info.auto_increment= (info->auto_increment_value ? - info->auto_increment_value -1 : + create_info.max_rows= share->max_rows; + create_info.reloc_rows= share->min_rows; + create_info.with_auto_increment= share->next_number_key_offset == 0; + create_info.auto_increment= (ha_create_info->auto_increment_value ? + ha_create_info->auto_increment_value -1 : (ulonglong) 0); - create_info.data_file_length= ((ulonglong) table_arg->max_rows * - table_arg->avg_row_length); - create_info.raid_type= info->raid_type; - create_info.raid_chunks= (info->raid_chunks ? info->raid_chunks : + create_info.data_file_length= ((ulonglong) share->max_rows * + share->avg_row_length); + create_info.raid_type= ha_create_info->raid_type; + create_info.raid_chunks= (ha_create_info->raid_chunks ? + ha_create_info->raid_chunks : RAID_DEFAULT_CHUNKS); - create_info.raid_chunksize= (info->raid_chunksize ? info->raid_chunksize : + create_info.raid_chunksize= (ha_create_info->raid_chunksize ? + ha_create_info->raid_chunksize : RAID_DEFAULT_CHUNKSIZE); - create_info.data_file_name= info->data_file_name; - create_info.index_file_name= info->index_file_name; + create_info.data_file_name= ha_create_info->data_file_name; + create_info.index_file_name= ha_create_info->index_file_name; - if (info->options & HA_LEX_CREATE_TMP_TABLE) + if (ha_create_info->options & HA_LEX_CREATE_TMP_TABLE) create_flags|= HA_CREATE_TMP_TABLE; if (options & HA_OPTION_PACK_RECORD) create_flags|= HA_PACK_RECORD; @@ -1707,7 +1804,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, /* TODO: Check that the following fn_format is really needed */ error= mi_create(fn_format(buff, name, "", "", MY_UNPACK_FILENAME|MY_REPLACE_EXT), - table_arg->keys, keydef, + share->keys, keydef, records, recinfo, 0, (MI_UNIQUEDEF*) 0, &create_info, create_flags); @@ -1722,30 +1819,35 @@ int ha_myisam::rename_table(const char * from, const char * to) } -longlong ha_myisam::get_auto_increment() +ulonglong ha_myisam::get_auto_increment() { - if (!table->next_number_key_offset) + ulonglong nr; + int error; + byte key[MI_MAX_KEY_LENGTH]; + + if (!table->s->next_number_key_offset) { // Autoincrement at key-start ha_myisam::info(HA_STATUS_AUTO); return auto_increment_value; } /* it's safe to call the following if bulk_insert isn't on */ - mi_flush_bulk_insert(file, table->next_number_index); + mi_flush_bulk_insert(file, table->s->next_number_index); - longlong nr; - int error; - byte key[MI_MAX_KEY_LENGTH]; (void) extra(HA_EXTRA_KEYREAD); - key_copy(key,table,table->next_number_index, - table->next_number_key_offset); - error=mi_rkey(file,table->record[1],(int) table->next_number_index, - key,table->next_number_key_offset,HA_READ_PREFIX_LAST); + key_copy(key, table->record[0], + table->key_info + table->s->next_number_index, + table->s->next_number_key_offset); + error= mi_rkey(file,table->record[1],(int) table->s->next_number_index, + key,table->s->next_number_key_offset,HA_READ_PREFIX_LAST); if (error) - nr=1; + nr= 1; else - nr=(longlong) - table->next_number_field->val_int_offset(table->rec_buff_length)+1; + { + /* Get data from record[1] */ + nr= ((ulonglong) table->next_number_field-> + val_int_offset(table->s->rec_buff_length)+1); + } extra(HA_EXTRA_NO_KEYREAD); return nr; } @@ -1790,7 +1892,8 @@ int ha_myisam::ft_read(byte * buf) if (!ft_handler) return -1; - thread_safe_increment(ha_read_next_count,&LOCK_status); // why ? + thread_safe_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); // why ? error=ft_handler->please->read_next(ft_handler,(char*) buf); @@ -1800,6 +1903,6 @@ int ha_myisam::ft_read(byte * buf) uint ha_myisam::checksum() const { - return (uint)file->s->state.checksum; + return (uint)file->state->checksum; } diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 5bd1d263ad8..b186d9c7bb8 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -43,14 +42,9 @@ class ha_myisam: public handler int repair(THD *thd, MI_CHECK ¶m, bool optimize); public: - ha_myisam(TABLE *table): handler(table), file(0), - int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | - HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | - HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME | - HA_CAN_INSERT_DELAYED), - can_enable_indexes(1) - {} + ha_myisam(TABLE *table_arg); ~ha_myisam() {} + handler *clone(MEM_ROOT *mem_root); const char *table_type() const { return "MyISAM"; } const char *index_type(uint key_number); const char **bas_ext() const; @@ -115,7 +109,7 @@ class ha_myisam: public handler int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int rename_table(const char * from, const char * to); int delete_table(const char *name); int check(THD* thd, HA_CHECK_OPT* check_opt); diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index 53923add49a..1202a733a16 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -32,6 +31,41 @@ ** MyISAM MERGE tables *****************************************************************************/ +/* MyISAM MERGE handlerton */ + +handlerton myisammrg_hton= { + "MRG_MYISAM", + SHOW_OPTION_YES, + "Collection of identical MyISAM tables", + DB_TYPE_MRG_MYISAM, + NULL, + 0, /* slot */ + 0, /* savepoint size. */ + NULL, /* close_connection */ + NULL, /* savepoint */ + NULL, /* rollback to savepoint */ + NULL, /* release savepoint */ + NULL, /* commit */ + NULL, /* rollback */ + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_CAN_RECREATE +}; + + +ha_myisammrg::ha_myisammrg(TABLE *table_arg) + :handler(&myisammrg_hton, table_arg), file(0) +{} + +static const char *ha_myisammrg_exts[] = { + ".MRG", + NullS +}; extern int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out, MI_COLUMNDEF **recinfo_out, uint *records_out); extern int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo, @@ -40,7 +74,10 @@ extern int check_definition(MI_KEYDEF *t1_keyinfo, MI_COLUMNDEF *t1_recinfo, uint t2_keys, uint t2_recs, bool strict); const char **ha_myisammrg::bas_ext() const -{ static const char *ext[]= { ".MRG", NullS }; return ext; } +{ + return ha_myisammrg_exts; +} + const char *ha_myisammrg::index_type(uint key_number) { @@ -60,7 +97,7 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) MI_COLUMNDEF *recinfo; MYRG_TABLE *u_table; uint recs; - uint keys= table->keys; + uint keys= table->s->keys; int error; char name_buff[FN_REFLEN]; @@ -80,10 +117,10 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) myrg_extra(file,HA_EXTRA_WAIT_LOCK,0); - if (table->reclength != mean_rec_length && mean_rec_length) + if (table->s->reclength != mean_rec_length && mean_rec_length) { - DBUG_PRINT("error",("reclength: %d mean_rec_length: %lu", - table->reclength, mean_rec_length)); + DBUG_PRINT("error",("reclength: %lu mean_rec_length: %lu", + table->s->reclength, mean_rec_length)); error= HA_ERR_WRONG_MRG_TABLE_DEF; goto err; } @@ -110,7 +147,7 @@ int ha_myisammrg::open(const char *name, int mode, uint test_if_locked) my_free((gptr) recinfo, MYF(0)); #if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4 /* Merge table has more than 2G rows */ - if (table->crashed) + if (table->s->crashed) { error= HA_ERR_WRONG_MRG_TABLE_DEF; goto err; @@ -130,17 +167,25 @@ int ha_myisammrg::close(void) int ha_myisammrg::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + + if (file->merge_insert_method == MERGE_INSERT_DISABLED || !file->tables) + return (HA_ERR_TABLE_READONLY); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); + { + int error; + if ((error= update_auto_increment())) + return error; + } return myrg_write(file,buf); } int ha_myisammrg::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return myrg_update(file,old_data,new_data); @@ -148,14 +193,15 @@ int ha_myisammrg::update_row(const byte * old_data, byte * new_data) int ha_myisammrg::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); return myrg_delete(file,buf); } int ha_myisammrg::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=myrg_rkey(file,buf,active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -164,7 +210,8 @@ int ha_myisammrg::index_read(byte * buf, const byte * key, int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=myrg_rkey(file,buf,index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -172,7 +219,8 @@ int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=myrg_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; @@ -181,7 +229,8 @@ int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len) int ha_myisammrg::index_next(byte * buf) { - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=myrg_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -189,7 +238,8 @@ int ha_myisammrg::index_next(byte * buf) int ha_myisammrg::index_prev(byte * buf) { - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); int error=myrg_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -197,7 +247,8 @@ int ha_myisammrg::index_prev(byte * buf) int ha_myisammrg::index_first(byte * buf) { - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); int error=myrg_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -205,7 +256,8 @@ int ha_myisammrg::index_first(byte * buf) int ha_myisammrg::index_last(byte * buf) { - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); int error=myrg_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -215,7 +267,8 @@ int ha_myisammrg::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=myrg_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -228,7 +281,8 @@ int ha_myisammrg::rnd_init(bool scan) int ha_myisammrg::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=myrg_rrnd(file, buf, HA_OFFSET_ERROR); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -236,16 +290,17 @@ int ha_myisammrg::rnd_next(byte *buf) int ha_myisammrg::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=myrg_rrnd(file, buf, ha_get_ptr(pos,ref_length)); + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); + int error=myrg_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; } void ha_myisammrg::position(const byte *record) { - ulonglong position= myrg_position(file); - ha_store_ptr(ref, ref_length, (my_off_t) position); + ulonglong row_position= myrg_position(file); + my_store_ptr(ref, ref_length, (my_off_t) row_position); } @@ -258,26 +313,46 @@ ha_rows ha_myisammrg::records_in_range(uint inx, key_range *min_key, int ha_myisammrg::info(uint flag) { - MYMERGE_INFO info; - (void) myrg_status(file,&info,flag); + MYMERGE_INFO mrg_info; + (void) myrg_status(file,&mrg_info,flag); /* The following fails if one has not compiled MySQL with -DBIG_TABLES and one has more than 2^32 rows in the merge tables. */ - records = (ha_rows) info.records; - deleted = (ha_rows) info.deleted; + records = (ha_rows) mrg_info.records; + deleted = (ha_rows) mrg_info.deleted; #if !defined(BIG_TABLES) || SIZEOF_OFF_T == 4 - if ((info.records >= (ulonglong) 1 << 32) || - (info.deleted >= (ulonglong) 1 << 32)) - table->crashed=1; + if ((mrg_info.records >= (ulonglong) 1 << 32) || + (mrg_info.deleted >= (ulonglong) 1 << 32)) + table->s->crashed= 1; #endif - data_file_length=info.data_file_length; - errkey = info.errkey; - table->keys_in_use.set_prefix(table->keys); - table->db_options_in_use = info.options; - table->is_view=1; - mean_rec_length=info.reclength; - block_size=0; + data_file_length=mrg_info.data_file_length; + errkey = mrg_info.errkey; + table->s->keys_in_use.set_prefix(table->s->keys); + table->s->db_options_in_use= mrg_info.options; + table->s->is_view= 1; + mean_rec_length= mrg_info.reclength; + + /* + The handler::block_size is used all over the code in index scan cost + calculations. It is used to get number of disk seeks required to + retrieve a number of index tuples. + If the merge table has N underlying tables, then (assuming underlying + tables have equal size, the only "simple" approach we can use) + retrieving X index records from a merge table will require N times more + disk seeks compared to doing the same on a MyISAM table with equal + number of records. + In the edge case (file_tables > myisam_block_size) we'll get + block_size==0, and index calculation code will act as if we need one + disk seek to retrieve one index tuple. + + TODO: In 5.2 index scan cost calculation will be factored out into a + virtual function in class handler and we'll be able to remove this hack. + */ + block_size= 0; + if (file->tables) + block_size= myisam_block_size / file->tables; + update_time=0; #if SIZEOF_OFF_T > 4 ref_length=6; // Should be big enough @@ -286,7 +361,7 @@ int ha_myisammrg::info(uint flag) #endif if (flag & HA_STATUS_CONST) { - if (table->key_parts && info.rec_per_key) + if (table->s->key_parts && mrg_info.rec_per_key) { #ifdef HAVE_purify /* @@ -296,12 +371,12 @@ int ha_myisammrg::info(uint flag) with such a number, it'll be an error later anyway. */ bzero((char*) table->key_info[0].rec_per_key, - sizeof(table->key_info[0].rec_per_key) * table->key_parts); + sizeof(table->key_info[0].rec_per_key) * table->s->key_parts); #endif memcpy((char*) table->key_info[0].rec_per_key, - (char*) info.rec_per_key, + (char*) mrg_info.rec_per_key, sizeof(table->key_info[0].rec_per_key) * - min(file->keys, table->key_parts)); + min(file->keys, table->s->key_parts)); } } return 0; @@ -403,14 +478,14 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST)))) goto err; split_file_name(open_table->table->filename, &db, &name); - if (!(ptr->real_name= thd->strmake(name.str, name.length))) + if (!(ptr->table_name= thd->strmake(name.str, name.length))) goto err; if (db.length && !(ptr->db= thd->strmake(db.str, db.length))) goto err; create_info->merge_list.elements++; (*create_info->merge_list.next) = (byte*) ptr; - create_info->merge_list.next= (byte**) &ptr->next; + create_info->merge_list.next= (byte**) &ptr->next_local; } *create_info->merge_list.next=0; } @@ -430,21 +505,22 @@ err: int ha_myisammrg::create(const char *name, register TABLE *form, HA_CREATE_INFO *create_info) { - char buff[FN_REFLEN],**table_names,**pos; + char buff[FN_REFLEN]; + const char **table_names, **pos; TABLE_LIST *tables= (TABLE_LIST*) create_info->merge_list.first; THD *thd= current_thd; uint dirlgt= dirname_length(name); DBUG_ENTER("ha_myisammrg::create"); - if (!(table_names= (char**) thd->alloc((create_info->merge_list.elements+1)* - sizeof(char*)))) + if (!(table_names= (const char**) + thd->alloc((create_info->merge_list.elements+1) * sizeof(char*)))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); - for (pos=table_names ; tables ; tables=tables->next) + for (pos= table_names; tables; tables= tables->next_local) { - char *table_name; + const char *table_name; TABLE **tbl= 0; if (create_info->options & HA_LEX_CREATE_TMP_TABLE) - tbl= find_temporary_table(thd, tables->db, tables->real_name); + tbl= find_temporary_table(thd, tables->db, tables->table_name); if (!tbl) { /* @@ -459,7 +535,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form, an embedded server without changing the paths in the .MRG file. */ uint length= my_snprintf(buff, FN_REFLEN, "%s/%s/%s", mysql_data_home, - tables->db, tables->real_name); + tables->db, tables->table_name); /* If a MyISAM table is in the same directory as the MERGE table, we use the table name without a path. This means that the @@ -467,19 +543,18 @@ int ha_myisammrg::create(const char *name, register TABLE *form, as the MyISAM tables are from the same database as the MERGE table. */ if ((dirname_length(buff) == dirlgt) && ! memcmp(buff, name, dirlgt)) - table_name= tables->real_name; + table_name= tables->table_name; else if (! (table_name= thd->strmake(buff, length))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); } else - table_name=(*tbl)->path; - DBUG_PRINT("info",("MyISAM table_name: '%s'", table_name)); + table_name= (*tbl)->s->path; *pos++= table_name; } *pos=0; DBUG_RETURN(myrg_create(fn_format(buff,name,"","",2+4+16), - (const char **) table_names, + table_names, create_info->merge_insert_method, (my_bool) 0)); } @@ -493,14 +568,14 @@ void ha_myisammrg::append_create_info(String *packet) if (file->merge_insert_method != MERGE_INSERT_DISABLED) { - packet->append(" INSERT_METHOD=",15); + packet->append(STRING_WITH_LEN(" INSERT_METHOD=")); packet->append(get_type(&merge_insert_method,file->merge_insert_method-1)); } - packet->append(" UNION=(",8); + packet->append(STRING_WITH_LEN(" UNION=(")); MYRG_TABLE *open_table,*first; - current_db= table->table_cache_key; - db_length= strlen(current_db); + current_db= table->s->db; + db_length= (uint) strlen(current_db); for (first=open_table=file->open_tables ; open_table != file->end_table ; diff --git a/sql/ha_myisammrg.h b/sql/ha_myisammrg.h index 84af55b262e..16c734e2682 100644 --- a/sql/ha_myisammrg.h +++ b/sql/ha_myisammrg.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -28,7 +27,7 @@ class ha_myisammrg: public handler MYRG_INFO *file; public: - ha_myisammrg(TABLE *table): handler(table), file(0) {} + ha_myisammrg(TABLE *table_arg); ~ha_myisammrg() {} const char *table_type() const { return "MRG_MyISAM"; } const char **bas_ext() const; @@ -37,7 +36,7 @@ class ha_myisammrg: public handler { return (HA_REC_NOT_IN_SEQ | HA_AUTO_PART_KEY | HA_READ_RND_SAME | HA_NULL_IN_KEY | HA_CAN_INDEX_BLOBS | HA_FILE_BASED | - HA_ANY_INDEX_MAY_BE_UNIQUE); + HA_ANY_INDEX_MAY_BE_UNIQUE | HA_CAN_BIT_FIELD); } ulong index_flags(uint inx, uint part, bool all_parts) const { diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d16e00f4e52..d2a242a6b01 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1,9 +1,8 @@ - /* Copyright (C) 2000-2003 MySQL AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -12,7 +11,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* @@ -35,17 +34,45 @@ // options from from mysqld.cc extern my_bool opt_ndb_optimized_node_selection; extern const char *opt_ndbcluster_connectstring; +extern ulong opt_ndb_cache_check_time; // Default value for parallelism -static const int parallelism= 240; +static const int parallelism= 0; // Default value for max number of transactions // createable against NDB from this handler -static const int max_transactions= 256; +static const int max_transactions= 2; static const char *ha_ndb_ext=".ndb"; -#define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0 +static int ndbcluster_close_connection(THD *thd); +static int ndbcluster_commit(THD *thd, bool all); +static int ndbcluster_rollback(THD *thd, bool all); + +handlerton ndbcluster_hton = { + "ndbcluster", + SHOW_OPTION_YES, + "Clustered, fault-tolerant, memory-based tables", + DB_TYPE_NDBCLUSTER, + ndbcluster_init, + 0, /* slot */ + 0, /* savepoint size */ + ndbcluster_close_connection, + NULL, /* savepoint_set */ + NULL, /* savepoint_rollback */ + NULL, /* savepoint_release */ + ndbcluster_commit, + ndbcluster_rollback, + NULL, /* prepare */ + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_CAN_RECREATE +}; + #define NDB_AUTO_INCREMENT_RETRIES 10 #define NDB_INVALID_SCHEMA_OBJECT 241 @@ -53,10 +80,11 @@ static const char *ha_ndb_ext=".ndb"; #define ERR_PRINT(err) \ DBUG_PRINT("error", ("%d message: %s", err.code, err.message)) -#define ERR_RETURN(err) \ -{ \ - ERR_PRINT(err); \ - DBUG_RETURN(ndb_to_mysql_error(&err)); \ +#define ERR_RETURN(err) \ +{ \ + const NdbError& tmp= err; \ + ERR_PRINT(tmp); \ + DBUG_RETURN(ndb_to_mysql_error(&tmp)); \ } // Typedefs for long names @@ -83,17 +111,61 @@ static void free_share(NDB_SHARE *share); static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len); static int unpackfrm(const void **data, uint *len, - const void* pack_data); + const void* pack_data); static int ndb_get_table_statistics(ha_ndbcluster*, bool, Ndb*, const char *, - Uint64* rows, Uint64* commits); + struct Ndb_statistics *); +// Util thread variables +static pthread_t ndb_util_thread; +pthread_mutex_t LOCK_ndb_util_thread; +pthread_cond_t COND_ndb_util_thread; +pthread_handler_t ndb_util_thread_func(void *arg); +ulong ndb_cache_check_time; /* Dummy buffer to read zero pack_length fields which are mapped to 1 char */ -static byte dummy_buf[1]; +static uint32 dummy_buf; + +/* + Stats that can be retrieved from ndb +*/ + +struct Ndb_statistics { + Uint64 row_count; + Uint64 commit_count; + Uint64 row_size; + Uint64 fragment_memory; +}; + +/* Status variables shown with 'show status like 'Ndb%' */ + +static long ndb_cluster_node_id= 0; +static const char * ndb_connected_host= 0; +static long ndb_connected_port= 0; +static long ndb_number_of_replicas= 0; +static long ndb_number_of_data_nodes= 0; + +static int update_status_variables(Ndb_cluster_connection *c) +{ + ndb_cluster_node_id= c->node_id(); + ndb_connected_port= c->get_connected_port(); + ndb_connected_host= c->get_connected_host(); + ndb_number_of_replicas= 0; + ndb_number_of_data_nodes= c->no_db_nodes(); + return 0; +} + +struct show_var_st ndb_status_variables[]= { + {"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG}, + {"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR}, + {"config_from_port", (char*) &ndb_connected_port, SHOW_LONG}, +// {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG}, + {"number_of_data_nodes",(char*) &ndb_number_of_data_nodes, SHOW_LONG}, + {NullS, NullS, SHOW_LONG} +}; /* Error handling functions @@ -114,7 +186,7 @@ static const err_code_mapping err_map[]= { 721, HA_ERR_TABLE_EXIST, 1 }, { 4244, HA_ERR_TABLE_EXIST, 1 }, - { 709, HA_ERR_NO_SUCH_TABLE, 1 }, + { 709, HA_ERR_NO_SUCH_TABLE, 0 }, { 266, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, { 274, HA_ERR_LOCK_WAIT_TIMEOUT, 1 }, @@ -129,6 +201,8 @@ static const err_code_mapping err_map[]= { 827, HA_ERR_RECORD_FILE_FULL, 1 }, { 832, HA_ERR_RECORD_FILE_FULL, 1 }, + { 284, HA_ERR_TABLE_DEF_CHANGED, 0 }, + { 0, 1, 0 }, { -1, -1, 1 } @@ -143,8 +217,8 @@ static int ndb_to_mysql_error(const NdbError *err) { // Push the NDB error message as warning push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - err->code, err->message, "NDB"); + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + err->code, err->message, "NDB"); } if (err_map[i].my_err == -1) return err->code; @@ -154,61 +228,73 @@ static int ndb_to_mysql_error(const NdbError *err) inline -int execute_no_commit(ha_ndbcluster *h, NdbConnection *trans) +int execute_no_commit(ha_ndbcluster *h, NdbTransaction *trans, + bool force_release) { - int m_batch_execute= 0; #ifdef NOT_USED + int m_batch_execute= 0; if (m_batch_execute) return 0; #endif - h->release_completed_operations(trans); - return trans->execute(NoCommit,AbortOnError,h->m_force_send); + h->release_completed_operations(trans, force_release); + return trans->execute(NdbTransaction::NoCommit, + NdbTransaction::AbortOnError, + h->m_force_send); } inline -int execute_commit(ha_ndbcluster *h, NdbConnection *trans) +int execute_commit(ha_ndbcluster *h, NdbTransaction *trans) { - int m_batch_execute= 0; #ifdef NOT_USED + int m_batch_execute= 0; if (m_batch_execute) return 0; #endif - return trans->execute(Commit,AbortOnError,h->m_force_send); + return trans->execute(NdbTransaction::Commit, + NdbTransaction::AbortOnError, + h->m_force_send); } inline -int execute_commit(THD *thd, NdbConnection *trans) +int execute_commit(THD *thd, NdbTransaction *trans) { - int m_batch_execute= 0; #ifdef NOT_USED + int m_batch_execute= 0; if (m_batch_execute) return 0; #endif - return trans->execute(Commit,AbortOnError,thd->variables.ndb_force_send); + return trans->execute(NdbTransaction::Commit, + NdbTransaction::AbortOnError, + thd->variables.ndb_force_send); } inline -int execute_no_commit_ie(ha_ndbcluster *h, NdbConnection *trans) +int execute_no_commit_ie(ha_ndbcluster *h, NdbTransaction *trans, + bool force_release) { - int m_batch_execute= 0; #ifdef NOT_USED + int m_batch_execute= 0; if (m_batch_execute) return 0; #endif - h->release_completed_operations(trans); - return trans->execute(NoCommit, AO_IgnoreError,h->m_force_send); + h->release_completed_operations(trans, force_release); + return trans->execute(NdbTransaction::NoCommit, + NdbTransaction::AO_IgnoreError, + h->m_force_send); } /* Place holder for ha_ndbcluster thread specific data */ - Thd_ndb::Thd_ndb() { ndb= new Ndb(g_ndb_cluster_connection, ""); lock_count= 0; count= 0; + all= NULL; + stmt= NULL; error= 0; + query_state&= NDB_QUERY_NORMAL; } Thd_ndb::~Thd_ndb() @@ -216,7 +302,8 @@ Thd_ndb::~Thd_ndb() if (ndb) { #ifndef DBUG_OFF - Ndb::Free_list_usage tmp; tmp.m_name= 0; + Ndb::Free_list_usage tmp; + tmp.m_name= 0; while (ndb->get_free_list_usage(&tmp)) { uint leaked= (uint) tmp.m_created - tmp.m_free; @@ -228,14 +315,23 @@ Thd_ndb::~Thd_ndb() } #endif delete ndb; + ndb= NULL; } - ndb= 0; + changed_tables.empty(); } inline +Thd_ndb * +get_thd_ndb(THD *thd) { return (Thd_ndb *) thd->ha_data[ndbcluster_hton.slot]; } + +inline +void +set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; } + +inline Ndb *ha_ndbcluster::get_ndb() { - return ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb; + return get_thd_ndb(current_thd)->ndb; } /* @@ -251,7 +347,7 @@ struct Ndb_local_table_statistics { void ha_ndbcluster::set_rec_per_key() { DBUG_ENTER("ha_ndbcluster::get_status_const"); - for (uint i=0 ; i < table->keys ; i++) + for (uint i=0 ; i < table->s->keys ; i++) { table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1; } @@ -265,29 +361,31 @@ int ha_ndbcluster::records_update() DBUG_ENTER("ha_ndbcluster::records_update"); int result= 0; - struct Ndb_local_table_statistics *info= + struct Ndb_local_table_statistics *local_info= (struct Ndb_local_table_statistics *)m_table_info; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - info->no_uncommitted_rows_count)); + ((const NDBTAB *)m_table)->getTableId(), + local_info->no_uncommitted_rows_count)); // if (info->records == ~(ha_rows)0) { Ndb *ndb= get_ndb(); - Uint64 rows; + struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); - result= ndb_get_table_statistics(this, true, ndb, m_tabname, &rows, 0); - if(result == 0) + result= ndb_get_table_statistics(this, true, ndb, m_tabname, &stat); + if (result == 0) { - info->records= rows; + mean_rec_length= stat.row_size; + data_file_length= stat.fragment_memory; + local_info->records= stat.row_count; } } { THD *thd= current_thd; - if (((Thd_ndb*)(thd->transaction.thd_ndb))->error) - info->no_uncommitted_rows_count= 0; + if (get_thd_ndb(thd)->error) + local_info->no_uncommitted_rows_count= 0; } if(result==0) - records= info->records+ info->no_uncommitted_rows_count; + records= local_info->records+ local_info->no_uncommitted_rows_count; DBUG_RETURN(result); } @@ -296,8 +394,7 @@ void ha_ndbcluster::no_uncommitted_rows_execute_failure() if (m_ha_not_exact_count) return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_execute_failure"); - THD *thd= current_thd; - ((Thd_ndb*)(thd->transaction.thd_ndb))->error= 1; + get_thd_ndb(current_thd)->error= 1; DBUG_VOID_RETURN; } @@ -306,17 +403,17 @@ void ha_ndbcluster::no_uncommitted_rows_init(THD *thd) if (m_ha_not_exact_count) return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_init"); - struct Ndb_local_table_statistics *info= + struct Ndb_local_table_statistics *local_info= (struct Ndb_local_table_statistics *)m_table_info; - Thd_ndb *thd_ndb= (Thd_ndb *)thd->transaction.thd_ndb; - if (info->last_count != thd_ndb->count) + Thd_ndb *thd_ndb= get_thd_ndb(thd); + if (local_info->last_count != thd_ndb->count) { - info->last_count = thd_ndb->count; - info->no_uncommitted_rows_count= 0; - info->records= ~(ha_rows)0; + local_info->last_count= thd_ndb->count; + local_info->no_uncommitted_rows_count= 0; + local_info->records= ~(ha_rows)0; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - info->no_uncommitted_rows_count)); + ((const NDBTAB *)m_table)->getTableId(), + local_info->no_uncommitted_rows_count)); } DBUG_VOID_RETURN; } @@ -326,12 +423,12 @@ void ha_ndbcluster::no_uncommitted_rows_update(int c) if (m_ha_not_exact_count) return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_update"); - struct Ndb_local_table_statistics *info= + struct Ndb_local_table_statistics *local_info= (struct Ndb_local_table_statistics *)m_table_info; - info->no_uncommitted_rows_count+= c; + local_info->no_uncommitted_rows_count+= c; DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", - ((const NDBTAB *)m_table)->getTableId(), - info->no_uncommitted_rows_count)); + ((const NDBTAB *)m_table)->getTableId(), + local_info->no_uncommitted_rows_count)); DBUG_VOID_RETURN; } @@ -340,16 +437,17 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) if (m_ha_not_exact_count) return; DBUG_ENTER("ha_ndbcluster::no_uncommitted_rows_reset"); - ((Thd_ndb*)(thd->transaction.thd_ndb))->count++; - ((Thd_ndb*)(thd->transaction.thd_ndb))->error= 0; + Thd_ndb *thd_ndb= get_thd_ndb(thd); + thd_ndb->count++; + thd_ndb->error= 0; DBUG_VOID_RETURN; } /* Take care of the error that occured in NDB - + RETURN - 0 No error + 0 No error # The mapped error code */ @@ -375,42 +473,42 @@ void ha_ndbcluster::invalidate_dictionary_cache(bool global) } else dict->removeCachedTable(m_tabname); - table->version=0L; /* Free when thread is ready */ + table->s->version=0L; /* Free when thread is ready */ /* Invalidate indexes */ - for (uint i= 0; i < table->keys; i++) + for (uint i= 0; i < table->s->keys; i++) { NDBINDEX *index = (NDBINDEX *) m_index[i].index; NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index; NDB_INDEX_TYPE idx_type= m_index[i].type; - switch(idx_type) { - case(PRIMARY_KEY_ORDERED_INDEX): - case(ORDERED_INDEX): + switch (idx_type) { + case PRIMARY_KEY_ORDERED_INDEX: + case ORDERED_INDEX: if (global) dict->invalidateIndex(index->getName(), m_tabname); else dict->removeCachedIndex(index->getName(), m_tabname); - break; - case(UNIQUE_ORDERED_INDEX): + break; + case UNIQUE_ORDERED_INDEX: if (global) dict->invalidateIndex(index->getName(), m_tabname); else dict->removeCachedIndex(index->getName(), m_tabname); - case(UNIQUE_INDEX): + case UNIQUE_INDEX: if (global) dict->invalidateIndex(unique_index->getName(), m_tabname); else dict->removeCachedIndex(unique_index->getName(), m_tabname); break; - case(PRIMARY_KEY_INDEX): - case(UNDEFINED_INDEX): + case PRIMARY_KEY_INDEX: + case UNDEFINED_INDEX: break; } } DBUG_VOID_RETURN; } -int ha_ndbcluster::ndb_err(NdbConnection *trans) +int ha_ndbcluster::ndb_err(NdbTransaction *trans) { int res; NdbError err= trans->getNdbError(); @@ -420,6 +518,13 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) switch (err.classification) { case NdbError::SchemaError: { + /* Close other open handlers not used by any thread */ + TABLE_LIST table_list; + bzero((char*) &table_list,sizeof(table_list)); + table_list.db= m_dbname; + table_list.alias= table_list.table_name= m_tabname; + close_cached_tables(current_thd, 0, &table_list); + invalidate_dictionary_cache(TRUE); if (err.code==284) @@ -437,16 +542,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) if (err.code != 709) DBUG_RETURN(1); } - else - { - DBUG_PRINT("info", ("Table exist but must have changed")); - /* In 5.0, this should be replaced with a mapping to a mysql error */ - my_printf_error(ER_UNKNOWN_ERROR, - "Table definition has changed, "\ - "please retry transaction", - MYF(0)); - DBUG_RETURN(1); - } + DBUG_PRINT("info", ("Table exists but must have changed")); } break; } @@ -455,7 +551,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) } res= ndb_to_mysql_error(&err); DBUG_PRINT("info", ("transformed ndbcluster error %d to mysql error %d", - err.code, res)); + err.code, res)); if (res == HA_ERR_FOUND_DUPP_KEY) { if (m_rows_to_insert == 1) @@ -465,7 +561,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) violations here, so we need to return MAX_KEY for non-primary to signal that key is unknown */ - m_dupkey= err.code == 630 ? table->primary_key : MAX_KEY; + m_dupkey= err.code == 630 ? table->s->primary_key : MAX_KEY; } else { @@ -483,7 +579,7 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) */ bool ha_ndbcluster::get_error_message(int error, - String *buf) + String *buf) { DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); @@ -508,7 +604,6 @@ bool ha_ndbcluster::get_error_message(int error, static bool ndb_supported_type(enum_field_types type) { switch (type) { - case MYSQL_TYPE_DECIMAL: case MYSQL_TYPE_TINY: case MYSQL_TYPE_SHORT: case MYSQL_TYPE_LONG: @@ -516,6 +611,8 @@ static bool ndb_supported_type(enum_field_types type) case MYSQL_TYPE_LONGLONG: case MYSQL_TYPE_FLOAT: case MYSQL_TYPE_DOUBLE: + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: case MYSQL_TYPE_TIMESTAMP: case MYSQL_TYPE_DATETIME: case MYSQL_TYPE_DATE: @@ -524,15 +621,17 @@ static bool ndb_supported_type(enum_field_types type) case MYSQL_TYPE_YEAR: case MYSQL_TYPE_STRING: case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: case MYSQL_TYPE_TINY_BLOB: case MYSQL_TYPE_BLOB: case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_GEOMETRY: return TRUE; case MYSQL_TYPE_NULL: - case MYSQL_TYPE_GEOMETRY: break; } return FALSE; @@ -545,11 +644,11 @@ static bool ndb_supported_type(enum_field_types type) */ bool ha_ndbcluster::set_hidden_key(NdbOperation *ndb_op, - uint fieldnr, const byte *field_ptr) + uint fieldnr, const byte *field_ptr) { DBUG_ENTER("set_hidden_key"); DBUG_RETURN(ndb_op->equal(fieldnr, (char*)field_ptr, - NDB_HIDDEN_PRIMARY_KEY_LENGTH) != 0); + NDB_HIDDEN_PRIMARY_KEY_LENGTH) != 0); } @@ -592,21 +691,50 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_ASSERT(ndb_supported_type(field->type())); { // ndb currently does not support size 0 - const byte *empty_field= ""; + uint32 empty_field; if (pack_len == 0) { - pack_len= 1; - field_ptr= empty_field; + pack_len= sizeof(empty_field); + field_ptr= (byte *)&empty_field; + if (field->is_null()) + empty_field= 0; + else + empty_field= 1; } if (! (field->flags & BLOB_FLAG)) { - if (field->is_null()) - // Set value to NULL - DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL, pack_len) != 0)); - // Common implementation for most field types - DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)field_ptr, pack_len) != 0); + if (field->type() != MYSQL_TYPE_BIT) + { + if (field->is_null()) + // Set value to NULL + DBUG_RETURN((ndb_op->setValue(fieldnr, + (char*)NULL, pack_len) != 0)); + // Common implementation for most field types + DBUG_RETURN(ndb_op->setValue(fieldnr, + (char*)field_ptr, pack_len) != 0); + } + else // if (field->type() == MYSQL_TYPE_BIT) + { + longlong bits= field->val_int(); + + // Round up bit field length to nearest word boundry + pack_len= ((pack_len + 3) >> 2) << 2; + DBUG_ASSERT(pack_len <= 8); + if (field->is_null()) + // Set value to NULL + DBUG_RETURN((ndb_op->setValue(fieldnr, (char*)NULL, pack_len) != 0)); + DBUG_PRINT("info", ("bit field")); + DBUG_DUMP("value", (char*)&bits, pack_len); +#ifdef WORDS_BIGENDIAN + if (pack_len < 5) + { + DBUG_RETURN(ndb_op->setValue(fieldnr, + ((char*)&bits)+4, pack_len) != 0); + } +#endif + DBUG_RETURN(ndb_op->setValue(fieldnr, (char*)&bits, pack_len) != 0); + } } - // Blob type NdbBlob *ndb_blob= ndb_op->getBlobHandle(fieldnr); if (ndb_blob != NULL) @@ -628,11 +756,11 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field, } DBUG_PRINT("value", ("set blob ptr=%p len=%u", - blob_ptr, blob_len)); + blob_ptr, blob_len)); DBUG_DUMP("value", (char*)blob_ptr, min(blob_len, 26)); if (set_blob_value) - *set_blob_value= TRUE; + *set_blob_value= TRUE; // No callback needed to write value DBUG_RETURN(ndb_blob->setValue(blob_ptr, blob_len) != 0); } @@ -661,10 +789,11 @@ int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg) if (ndb_blob->blobsNextBlob() != NULL) DBUG_RETURN(0); ha_ndbcluster *ha= (ha_ndbcluster *)arg; - DBUG_RETURN(ha->get_ndb_blobs_value(ndb_blob)); + DBUG_RETURN(ha->get_ndb_blobs_value(ndb_blob, ha->m_blobs_offset)); } -int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob) +int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob, + my_ptrdiff_t ptrdiff) { DBUG_ENTER("get_ndb_blobs_value"); @@ -673,7 +802,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob) for (int loop= 0; loop <= 1; loop++) { uint32 offset= 0; - for (uint i= 0; i < table->fields; i++) + for (uint i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; NdbValue value= m_value[i]; @@ -692,12 +821,15 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob) { char *buf= m_blobs_buffer + offset; uint32 len= 0xffffffff; // Max uint32 - DBUG_PRINT("value", ("read blob ptr=%x len=%u", - (UintPtr)buf, (uint)blob_len)); + DBUG_PRINT("value", ("read blob ptr: 0x%lx len: %u", + (long)buf, (uint)blob_len)); if (ndb_blob->readData(buf, len) != 0) DBUG_RETURN(-1); DBUG_ASSERT(len == blob_len); + // Ugly hack assumes only ptr needs to be changed + field_blob->ptr+= ptrdiff; field_blob->set_ptr(len, buf); + field_blob->ptr-= ptrdiff; } offset+= blob_size; } @@ -736,14 +868,21 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, DBUG_ASSERT(ndb_supported_type(field->type())); DBUG_ASSERT(field->ptr != NULL); if (! (field->flags & BLOB_FLAG)) - { - byte *field_buf; - if (field->pack_length() != 0) - field_buf= buf + (field->ptr - table->record[0]); - else - field_buf= dummy_buf; - m_value[fieldnr].rec= ndb_op->getValue(fieldnr, - field_buf); + { + if (field->type() != MYSQL_TYPE_BIT) + { + byte *field_buf; + if (field->pack_length() != 0) + field_buf= buf + (field->ptr - table->record[0]); + else + field_buf= (byte *)&dummy_buf; + m_value[fieldnr].rec= ndb_op->getValue(fieldnr, + field_buf); + } + else // if (field->type() == MYSQL_TYPE_BIT) + { + m_value[fieldnr].rec= ndb_op->getValue(fieldnr); + } DBUG_RETURN(m_value[fieldnr].rec == NULL); } @@ -753,6 +892,7 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, if (ndb_blob != NULL) { // Set callback + m_blobs_offset= buf - (byte*) table->record[0]; void *arg= (void *)this; DBUG_RETURN(ndb_blob->setActiveHook(g_get_ndb_blobs_value, arg) != 0); } @@ -770,14 +910,14 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, */ bool ha_ndbcluster::uses_blob_value(bool all_fields) { - if (table->blob_fields == 0) + if (table->s->blob_fields == 0) return FALSE; if (all_fields) return TRUE; { - uint no_fields= table->fields; + uint no_fields= table->s->fields; int i; - THD *thd= table->in_use; + THD *thd= current_thd; // They always put blobs at the end.. for (i= no_fields - 1; i >= 0; i--) { @@ -812,7 +952,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); do { - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; if (!(tab= dict->getTable(m_tabname))) @@ -830,7 +970,7 @@ int ha_ndbcluster::get_metadata(const char *path) */ error= 0; if (readfrm(path, &data, &length) || - packfrm(data, length, &pack_data, &pack_length)) + packfrm(data, length, &pack_data, &pack_length)) { my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR)); @@ -838,24 +978,24 @@ int ha_ndbcluster::get_metadata(const char *path) } if ((pack_length != tab->getFrmLength()) || - (memcmp(pack_data, tab->getFrmData(), pack_length))) + (memcmp(pack_data, tab->getFrmData(), pack_length))) { if (!invalidating_ndb_table) { - DBUG_PRINT("info", ("Invalidating table")); + DBUG_PRINT("info", ("Invalidating table")); invalidate_dictionary_cache(TRUE); - invalidating_ndb_table= TRUE; + invalidating_ndb_table= TRUE; } else { - DBUG_PRINT("error", - ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", - pack_length, tab->getFrmLength(), - memcmp(pack_data, tab->getFrmData(), pack_length))); - DBUG_DUMP("pack_data", (char*)pack_data, pack_length); - DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); - error= 3; - invalidating_ndb_table= FALSE; + DBUG_PRINT("error", + ("metadata, pack_length: %d getFrmLength: %d memcmp: %d", + pack_length, tab->getFrmLength(), + memcmp(pack_data, tab->getFrmData(), pack_length))); + DBUG_DUMP("pack_data", (char*)pack_data, pack_length); + DBUG_DUMP("frm", (char*)tab->getFrmData(), tab->getFrmLength()); + error= 3; + invalidating_ndb_table= FALSE; } } else @@ -877,8 +1017,8 @@ int ha_ndbcluster::get_metadata(const char *path) } static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, - const NDBINDEX *index, - KEY *key_info) + const NDBINDEX *index, + KEY *key_info) { DBUG_ENTER("fix_unique_index_attr_order"); unsigned sz= index->getNoOfIndexColumns(); @@ -893,19 +1033,16 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, for (unsigned i= 0; key_part != end; key_part++, i++) { const char *field_name= key_part->field->field_name; - unsigned name_sz= strlen(field_name); - if (name_sz >= NDB_MAX_ATTR_NAME_SIZE) - name_sz= NDB_MAX_ATTR_NAME_SIZE-1; #ifndef DBUG_OFF data.unique_index_attrid_map[i]= 255; #endif for (unsigned j= 0; j < sz; j++) { const NDBCOL *c= index->getColumn(j); - if (strncmp(field_name, c->getName(), name_sz) == 0) + if (strcmp(field_name, c->getName()) == 0) { - data.unique_index_attrid_map[i]= j; - break; + data.unique_index_attrid_map[i]= j; + break; } } DBUG_ASSERT(data.unique_index_attrid_map[i] != 255); @@ -919,58 +1056,77 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase) { uint i; int error= 0; - const char *name, *index_name; + const char *index_name; char unique_index_name[FN_LEN]; + bool null_in_unique_index= false; static const char* unique_suffix= "$unique"; KEY* key_info= tab->key_info; - const char **key_name= tab->keynames.type_names; + const char **key_name= tab->s->keynames.type_names; NDBDICT *dict= ndb->getDictionary(); - DBUG_ENTER("build_index_list"); + DBUG_ENTER("ha_ndbcluster::build_index_list"); + m_has_unique_index= FALSE; // Save information about all known indexes - for (i= 0; i < tab->keys; i++, key_info++, key_name++) + for (i= 0; i < tab->s->keys; i++, key_info++, key_name++) { index_name= *key_name; NDB_INDEX_TYPE idx_type= get_index_type_from_table(i); m_index[i].type= idx_type; if (idx_type == UNIQUE_ORDERED_INDEX || idx_type == UNIQUE_INDEX) { + m_has_unique_index= TRUE; strxnmov(unique_index_name, FN_LEN, index_name, unique_suffix, NullS); DBUG_PRINT("info", ("Created unique index name \'%s\' for index %d", - unique_index_name, i)); + unique_index_name, i)); } // Create secondary indexes if in create phase if (phase == ILBP_CREATE) { DBUG_PRINT("info", ("Creating index %u: %s", i, index_name)); switch (idx_type){ - + case PRIMARY_KEY_INDEX: - // Do nothing, already created - break; + // Do nothing, already created + break; case PRIMARY_KEY_ORDERED_INDEX: - error= create_ordered_index(index_name, key_info); - break; + error= create_ordered_index(index_name, key_info); + break; case UNIQUE_ORDERED_INDEX: - if (!(error= create_ordered_index(index_name, key_info))) - error= create_unique_index(unique_index_name, key_info); - break; + if (!(error= create_ordered_index(index_name, key_info))) + error= create_unique_index(unique_index_name, key_info); + break; case UNIQUE_INDEX: - if (!(error= check_index_fields_not_null(i))) - error= create_unique_index(unique_index_name, key_info); - break; + if (check_index_fields_not_null(i)) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_NULL_COLUMN_IN_INDEX, + "Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan"); + null_in_unique_index= true; + } + error= create_unique_index(unique_index_name, key_info); + break; case ORDERED_INDEX: - error= create_ordered_index(index_name, key_info); - break; + if (key_info->algorithm == HA_KEY_ALG_HASH) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_UNSUPPORTED_EXTENSION, + ER(ER_UNSUPPORTED_EXTENSION), + "Ndb does not support non-unique " + "hash based indexes"); + error= HA_ERR_UNSUPPORTED; + break; + } + error= create_ordered_index(index_name, key_info); + break; default: - DBUG_ASSERT(FALSE); - break; + DBUG_ASSERT(FALSE); + break; } if (error) { - DBUG_PRINT("error", ("Failed to create index %u", i)); - drop_table(); - break; + DBUG_PRINT("error", ("Failed to create index %u", i)); + drop_table(); + break; } } // Add handles to index objects @@ -989,6 +1145,11 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase) m_index[i].unique_index= (void *) index; error= fix_unique_index_attr_order(m_index[i], index, key_info); } + if (idx_type == UNIQUE_INDEX && + phase != ILBP_CREATE && + check_index_fields_not_null(i)) + null_in_unique_index= true; + m_index[i].null_in_unique_index= null_in_unique_index; } DBUG_RETURN(error); @@ -1002,33 +1163,29 @@ int ha_ndbcluster::build_index_list(Ndb *ndb, TABLE *tab, enum ILBP phase) NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_table(uint inx) const { bool is_hash_index= (table->key_info[inx].algorithm == HA_KEY_ALG_HASH); - if (inx == table->primary_key) + if (inx == table->s->primary_key) return is_hash_index ? PRIMARY_KEY_INDEX : PRIMARY_KEY_ORDERED_INDEX; - else - return ((table->key_info[inx].flags & HA_NOSAME) ? - (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) : - ORDERED_INDEX); + + return ((table->key_info[inx].flags & HA_NOSAME) ? + (is_hash_index ? UNIQUE_INDEX : UNIQUE_ORDERED_INDEX) : + ORDERED_INDEX); } -int ha_ndbcluster::check_index_fields_not_null(uint inx) +bool ha_ndbcluster::check_index_fields_not_null(uint inx) { KEY* key_info= table->key_info + inx; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; - DBUG_ENTER("check_index_fields_not_null"); + DBUG_ENTER("ha_ndbcluster::check_index_fields_not_null"); for (; key_part != end; key_part++) { Field* field= key_part->field; if (field->maybe_null()) - { - my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), - MYF(0),field->field_name); - DBUG_RETURN(ER_NULL_COLUMN_IN_INDEX); - } + DBUG_RETURN(true); } - DBUG_RETURN(0); + DBUG_RETURN(false); } void ha_ndbcluster::release_metadata() @@ -1064,7 +1221,7 @@ int ha_ndbcluster::get_ndb_lock_type(enum thr_lock_type type) DBUG_PRINT("info", ("Using exclusive lock")); DBUG_RETURN(NdbOperation::LM_Exclusive); } - else if (type == TL_READ_WITH_SHARED_LOCKS || + else if (type == TL_READ_WITH_SHARED_LOCKS || uses_blob_value(m_retrieve_all_fields)) { DBUG_PRINT("info", ("Using read lock")); @@ -1093,6 +1250,7 @@ static const ulong index_type_flags[]= */ // HA_KEYREAD_ONLY | HA_READ_NEXT | + HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER, @@ -1101,11 +1259,13 @@ static const ulong index_type_flags[]= /* UNIQUE_ORDERED_INDEX */ HA_READ_NEXT | + HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER, /* ORDERED_INDEX */ HA_READ_NEXT | + HA_READ_PREV | HA_READ_RANGE | HA_READ_ORDER }; @@ -1118,6 +1278,12 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const return m_index[idx_no].type; } +inline bool ha_ndbcluster::has_null_in_unique_index(uint idx_no) const +{ + DBUG_ASSERT(idx_no < MAX_KEY); + return m_index[idx_no].null_in_unique_index; +} + /* Get the flags for an index @@ -1129,16 +1295,35 @@ inline NDB_INDEX_TYPE ha_ndbcluster::get_index_type(uint idx_no) const inline ulong ha_ndbcluster::index_flags(uint idx_no, uint part, bool all_parts) const { - DBUG_ENTER("index_flags"); + DBUG_ENTER("ha_ndbcluster::index_flags"); DBUG_PRINT("info", ("idx_no: %d", idx_no)); DBUG_ASSERT(get_index_type_from_table(idx_no) < index_flags_size); - DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)]); + DBUG_RETURN(index_type_flags[get_index_type_from_table(idx_no)] | + HA_KEY_SCAN_NOT_ROR); +} + +static void shrink_varchar(Field* field, const byte* & ptr, char* buf) +{ + if (field->type() == MYSQL_TYPE_VARCHAR && ptr != NULL) { + Field_varstring* f= (Field_varstring*)field; + if (f->length_bytes == 1) { + uint pack_len= field->pack_length(); + DBUG_ASSERT(1 <= pack_len && pack_len <= 256); + if (ptr[1] == 0) { + buf[0]= ptr[0]; + } else { + DBUG_ASSERT(FALSE); + buf[0]= 255; + } + memmove(buf + 1, ptr + 2, pack_len - 1); + ptr= buf; + } + } } - int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) { - KEY* key_info= table->key_info + table->primary_key; + KEY* key_info= table->key_info + table->s->primary_key; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; DBUG_ENTER("set_primary_key"); @@ -1146,10 +1331,13 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) for (; key_part != end; key_part++) { Field* field= key_part->field; + const byte* ptr= key; + char buf[256]; + shrink_varchar(field, ptr, buf); if (set_ndb_key(op, field, - key_part->fieldnr-1, key)) + key_part->fieldnr-1, ptr)) ERR_RETURN(op->getNdbError()); - key += key_part->length; + key += key_part->store_length; } DBUG_RETURN(0); } @@ -1157,7 +1345,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record) { - KEY* key_info= table->key_info + table->primary_key; + KEY* key_info= table->key_info + table->s->primary_key; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; DBUG_ENTER("set_primary_key_from_record"); @@ -1172,16 +1360,99 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *rec DBUG_RETURN(0); } +int ha_ndbcluster::set_index_key_from_record(NdbOperation *op, const byte *record, uint keyno) +{ + KEY* key_info= table->key_info + keyno; + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + uint i; + DBUG_ENTER("set_index_key_from_record"); + + for (i= 0; key_part != end; key_part++, i++) + { + Field* field= key_part->field; + if (set_ndb_key(op, field, m_index[keyno].unique_index_attrid_map[i], + record+key_part->offset)) + ERR_RETURN(m_active_trans->getNdbError()); + } + DBUG_RETURN(0); +} + +int +ha_ndbcluster::set_index_key(NdbOperation *op, + const KEY *key_info, + const byte * key_ptr) +{ + DBUG_ENTER("ha_ndbcluster::set_index_key"); + uint i; + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + + for (i= 0; key_part != end; key_part++, i++) + { + Field* field= key_part->field; + const byte* ptr= key_part->null_bit ? key_ptr + 1 : key_ptr; + char buf[256]; + shrink_varchar(field, ptr, buf); + if (set_ndb_key(op, field, m_index[active_index].unique_index_attrid_map[i], ptr)) + ERR_RETURN(m_active_trans->getNdbError()); + key_ptr+= key_part->store_length; + } + DBUG_RETURN(0); +} + +inline +int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) +{ + uint i; + THD *thd= current_thd; + + DBUG_ENTER("define_read_attrs"); + + // Define attributes to read + for (i= 0; i < table->s->fields; i++) + { + Field *field= table->field[i]; + if ((thd->query_id == field->query_id) || + ((field->flags & PRI_KEY_FLAG)) || + m_retrieve_all_fields) + { + if (get_ndb_value(op, field, i, buf)) + ERR_RETURN(op->getNdbError()); + } + else + { + m_value[i].ptr= NULL; + } + } + + if (table->s->primary_key == MAX_KEY) + { + DBUG_PRINT("info", ("Getting hidden key")); + // Scanning table with no primary key + int hidden_no= table->s->fields; +#ifndef DBUG_OFF + const NDBTAB *tab= (const NDBTAB *) m_table; + if (!tab->getColumn(hidden_no)) + DBUG_RETURN(1); +#endif + if (get_ndb_value(op, NULL, hidden_no, NULL)) + ERR_RETURN(op->getNdbError()); + } + DBUG_RETURN(0); +} + /* Read one record from NDB using primary key */ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) { - uint no_fields= table->fields, i; + uint no_fields= table->s->fields; NdbConnection *trans= m_active_trans; NdbOperation *op; - THD *thd= current_thd; + + int res; DBUG_ENTER("pk_read"); DBUG_PRINT("enter", ("key_len: %u", key_len)); DBUG_DUMP("key", (char*)key, key_len); @@ -1191,45 +1462,29 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || op->readTuple(lm) != 0) ERR_RETURN(trans->getNdbError()); - - if (table->primary_key == MAX_KEY) + + if (table->s->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); DBUG_DUMP("key", (char*)key, 8); if (set_hidden_key(op, no_fields, key)) ERR_RETURN(trans->getNdbError()); - + // Read key at the same time, for future reference if (get_ndb_value(op, NULL, no_fields, NULL)) ERR_RETURN(trans->getNdbError()); } else { - int res; if ((res= set_primary_key(op, key))) return res; } - // Read all wanted non-key field(s) unless HA_EXTRA_RETRIEVE_ALL_COLS - for (i= 0; i < no_fields; i++) - { - Field *field= table->field[i]; - if ((thd->query_id == field->query_id) || - m_retrieve_all_fields || - (field->flags & PRI_KEY_FLAG) && m_retrieve_primary_key) - { - if (get_ndb_value(op, field, i, buf)) - ERR_RETURN(trans->getNdbError()); - } - else - { - // Attribute was not to be read - m_value[i].ptr= NULL; - } - } + if ((res= define_read_attrs(buf, op))) + DBUG_RETURN(res); - if (execute_no_commit_ie(this,trans) != 0) + if (execute_no_commit_ie(this,trans,false) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1241,15 +1496,14 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) DBUG_RETURN(0); } - /* Read one complementing record from NDB using primary key from old_data */ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) { - uint no_fields= table->fields, i; - NdbConnection *trans= m_active_trans; + uint no_fields= table->s->fields, i; + NdbTransaction *trans= m_active_trans; NdbOperation *op; THD *thd= current_thd; DBUG_ENTER("complemented_pk_read"); @@ -1263,24 +1517,21 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || op->readTuple(lm) != 0) ERR_RETURN(trans->getNdbError()); - int res; if ((res= set_primary_key_from_record(op, old_data))) ERR_RETURN(trans->getNdbError()); - // Read all unreferenced non-key field(s) for (i= 0; i < no_fields; i++) { Field *field= table->field[i]; - if (!(field->flags & PRI_KEY_FLAG) && - (thd->query_id != field->query_id)) + if (!((field->flags & PRI_KEY_FLAG) || + (thd->query_id == field->query_id))) { if (get_ndb_value(op, field, i, new_data)) - ERR_RETURN(trans->getNdbError()); + ERR_RETURN(trans->getNdbError()); } } - - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1289,35 +1540,156 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) // The value have now been fetched from NDB unpack_record(new_data); table->status= 0; + + /** + * restore m_value + */ + for (i= 0; i < no_fields; i++) + { + Field *field= table->field[i]; + if (!((field->flags & PRI_KEY_FLAG) || + (thd->query_id == field->query_id))) + { + m_value[i].ptr= NULL; + } + } + DBUG_RETURN(0); } /* - Peek to check if a particular row already exists + * Check that all operations between first and last all + * have gotten the errcode + * If checking for HA_ERR_KEY_NOT_FOUND then update m_dupkey + * for all succeeding operations + */ +bool ha_ndbcluster::check_all_operations_for_error(NdbTransaction *trans, + const NdbOperation *first, + const NdbOperation *last, + uint errcode) +{ + const NdbOperation *op= first; + DBUG_ENTER("ha_ndbcluster::check_all_operations_for_error"); + + while(op) + { + NdbError err= op->getNdbError(); + if (err.status != NdbError::Success) + { + if (ndb_to_mysql_error(&err) != (int) errcode) + DBUG_RETURN(false); + if (op == last) break; + op= trans->getNextCompletedOperation(op); + } + else + { + // We found a duplicate + if (op->getType() == NdbOperation::UniqueIndexAccess) + { + if (errcode == HA_ERR_KEY_NOT_FOUND) + { + NdbIndexOperation *iop= (NdbIndexOperation *) op; + const NDBINDEX *index= iop->getIndex(); + // Find the key_no of the index + for(uint i= 0; i<table->s->keys; i++) + { + if (m_index[i].unique_index == index) + { + m_dupkey= i; + break; + } + } + } + } + else + { + // Must have been primary key access + DBUG_ASSERT(op->getType() == NdbOperation::PrimaryKeyAccess); + if (errcode == HA_ERR_KEY_NOT_FOUND) + m_dupkey= table->s->primary_key; + } + DBUG_RETURN(false); + } + } + DBUG_RETURN(true); +} + +/* + * Peek to check if any rows already exist with conflicting + * primary key or unique index values */ -int ha_ndbcluster::peek_row(const byte *record) +int ha_ndbcluster::peek_indexed_rows(const byte *record, bool check_pk) { - NdbConnection *trans= m_active_trans; + NdbTransaction *trans= m_active_trans; NdbOperation *op; - THD *thd= current_thd; - DBUG_ENTER("peek_row"); - + const NdbOperation *first, *last; + uint i; + int res; + DBUG_ENTER("peek_indexed_rows"); + NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || - op->readTuple(lm) != 0) - ERR_RETURN(trans->getNdbError()); - - int res; - if ((res= set_primary_key_from_record(op, record))) - ERR_RETURN(trans->getNdbError()); - - if (execute_no_commit_ie(this,trans) != 0) + + first= NULL; + if (check_pk && table->s->primary_key != MAX_KEY) + { + /* + * Fetch any row with colliding primary key + */ + if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || + op->readTuple(lm) != 0) + ERR_RETURN(trans->getNdbError()); + + first= op; + if ((res= set_primary_key_from_record(op, record))) + ERR_RETURN(trans->getNdbError()); + } + /* + * Fetch any rows with colliding unique indexes + */ + KEY* key_info; + KEY_PART_INFO *key_part, *end; + for (i= 0, key_info= table->key_info; i < table->s->keys; i++, key_info++) + { + if (i != table->s->primary_key && + key_info->flags & HA_NOSAME) { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(ndb_err(trans)); - } + // A unique index is defined on table + NdbIndexOperation *iop; + NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index; + key_part= key_info->key_part; + end= key_part + key_info->key_parts; + if (!(iop= trans->getNdbIndexOperation(unique_index, + (const NDBTAB *) m_table)) || + iop->readTuple(lm) != 0) + ERR_RETURN(trans->getNdbError()); + + if (!first) + first= iop; + if ((res= set_index_key_from_record(iop, record, i))) + ERR_RETURN(trans->getNdbError()); + } + } + last= trans->getLastDefinedOperation(); + if (first) + res= execute_no_commit_ie(this,trans,false); + else + { + // Table has no keys + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); + } + if (check_all_operations_for_error(trans, first, last, + HA_ERR_KEY_NOT_FOUND)) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(ndb_err(trans)); + } + else + { + DBUG_PRINT("info", ("m_dupkey %d", m_dupkey)); + } DBUG_RETURN(0); } @@ -1326,66 +1698,31 @@ int ha_ndbcluster::peek_row(const byte *record) */ int ha_ndbcluster::unique_index_read(const byte *key, - uint key_len, byte *buf) + uint key_len, byte *buf) { - NdbConnection *trans= m_active_trans; + int res; + NdbTransaction *trans= m_active_trans; NdbIndexOperation *op; - THD *thd= current_thd; - byte *key_ptr; - KEY* key_info; - KEY_PART_INFO *key_part, *end; - uint i; - DBUG_ENTER("unique_index_read"); + DBUG_ENTER("ha_ndbcluster::unique_index_read"); DBUG_PRINT("enter", ("key_len: %u, index: %u", key_len, active_index)); DBUG_DUMP("key", (char*)key, key_len); NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); if (!(op= trans->getNdbIndexOperation((NDBINDEX *) - m_index[active_index].unique_index, + m_index[active_index].unique_index, (const NDBTAB *) m_table)) || op->readTuple(lm) != 0) ERR_RETURN(trans->getNdbError()); // Set secondary index key(s) - key_ptr= (byte *) key; - key_info= table->key_info + active_index; - DBUG_ASSERT(key_info->key_length == key_len); - end= (key_part= key_info->key_part) + key_info->key_parts; - - for (i= 0; key_part != end; key_part++, i++) - { - if (set_ndb_key(op, key_part->field, - m_index[active_index].unique_index_attrid_map[i], - key_part->null_bit ? key_ptr + 1 : key_ptr)) - ERR_RETURN(trans->getNdbError()); - key_ptr+= key_part->store_length; - } - - // Get non-index attribute(s) - for (i= 0; i < table->fields; i++) - { - Field *field= table->field[i]; - if ((thd->query_id == field->query_id) || - (field->flags & PRI_KEY_FLAG)) // && m_retrieve_primary_key ?? - { - if (get_ndb_value(op, field, i, buf)) - ERR_RETURN(op->getNdbError()); - } - else - { - // Attribute was not to be read - m_value[i].ptr= NULL; - } - } - if (table->primary_key == MAX_KEY) - { - DBUG_PRINT("info", ("Getting hidden key")); - if (get_ndb_value(op, NULL, i, NULL)) - ERR_RETURN(op->getNdbError()); - } + if ((res= set_index_key(op, table->key_info + active_index, key))) + DBUG_RETURN(res); + + if ((res= define_read_attrs(buf, op))) + DBUG_RETURN(res); - if (execute_no_commit_ie(this,trans) != 0) + if (execute_no_commit_ie(this,trans,false) != 0) { table->status= STATUS_NOT_FOUND; DBUG_RETURN(ndb_err(trans)); @@ -1396,28 +1733,13 @@ int ha_ndbcluster::unique_index_read(const byte *key, DBUG_RETURN(0); } -/* - Get the next record of a started scan. Try to fetch - it locally from NdbApi cached records if possible, - otherwise ask NDB for more. - - NOTE - If this is a update/delete make sure to not contact - NDB before any pending ops have been sent to NDB. - -*/ - -inline int ha_ndbcluster::next_result(byte *buf) -{ - int check; - NdbConnection *trans= m_active_trans; - NdbResultSet *cursor= m_active_cursor; - DBUG_ENTER("next_result"); - - if (!cursor) - DBUG_RETURN(HA_ERR_END_OF_FILE); - - if (m_lock_tuple) +inline int ha_ndbcluster::fetch_next(NdbScanOperation* cursor) +{ + DBUG_ENTER("fetch_next"); + int local_check; + NdbTransaction *trans= m_active_trans; + + if (m_lock_tuple) { /* Lock level m_lock.type either TL_WRITE_ALLOW_WRITE @@ -1425,24 +1747,22 @@ inline int ha_ndbcluster::next_result(byte *buf) LOCK WITH SHARE MODE) and row was not explictly unlocked with unlock_row() call */ - NdbConnection *trans= m_active_trans; + NdbConnection *con_trans= m_active_trans; NdbOperation *op; // Lock row DBUG_PRINT("info", ("Keeping lock on scanned row")); - if (!(op= m_active_cursor->lockTuple())) + if (!(op= m_active_cursor->lockCurrentTuple())) { + /* purecov: begin inspected */ m_lock_tuple= false; - ERR_RETURN(trans->getNdbError()); + ERR_RETURN(con_trans->getNdbError()); + /* purecov: end */ } m_ops_pending++; } m_lock_tuple= false; - - /* - If this an update or delete, call nextResult with false - to process any records already cached in NdbApi - */ + bool contact_ndb= m_lock.type < TL_WRITE_ALLOW_WRITE && m_lock.type != TL_READ_WITH_SHARED_LOCKS; do { @@ -1452,19 +1772,14 @@ inline int ha_ndbcluster::next_result(byte *buf) */ if (m_ops_pending && m_blobs_pending) { - if (execute_no_commit(this,trans) != 0) - DBUG_RETURN(ndb_err(trans)); + if (execute_no_commit(this,trans,false) != 0) + DBUG_RETURN(ndb_err(trans)); m_ops_pending= 0; m_blobs_pending= FALSE; } - check= cursor->nextResult(contact_ndb, m_force_send); - if (check == 0) + + if ((local_check= cursor->nextResult(contact_ndb, m_force_send)) == 0) { - // One more record found - DBUG_PRINT("info", ("One more record found")); - - unpack_record(buf); - table->status= 0; /* Explicitly lock tuple if "select for update" or "select lock in share mode" @@ -1474,45 +1789,86 @@ inline int ha_ndbcluster::next_result(byte *buf) m_lock.type == TL_READ_WITH_SHARED_LOCKS); DBUG_RETURN(0); } - else if (check == 1 || check == 2) + else if (local_check == 1 || local_check == 2) { // 1: No more records // 2: No more cached records - + /* - Before fetching more rows and releasing lock(s), - all pending update or delete operations should - be sent to NDB + Before fetching more rows and releasing lock(s), + all pending update or delete operations should + be sent to NDB */ - DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); + DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); if (m_ops_pending) { - // if (current_thd->transaction.on) - if (m_transaction_on) - { - if (execute_no_commit(this,trans) != 0) - DBUG_RETURN(ndb_err(trans)); - } - else - { - if (execute_commit(this,trans) != 0) - DBUG_RETURN(ndb_err(trans)); - int res= trans->restart(); - DBUG_ASSERT(res == 0); - } - m_ops_pending= 0; + if (m_transaction_on) + { + if (execute_no_commit(this,trans,false) != 0) + DBUG_RETURN(-1); + } + else + { + if (execute_commit(this,trans) != 0) + DBUG_RETURN(-1); + if (trans->restart() != 0) + { + DBUG_ASSERT(0); + DBUG_RETURN(-1); + } + } + m_ops_pending= 0; } - - contact_ndb= (check == 2); + contact_ndb= (local_check == 2); } - } while (check == 2); - table->status= STATUS_NOT_FOUND; - if (check == -1) - DBUG_RETURN(ndb_err(trans)); + else + { + DBUG_RETURN(-1); + } + } while (local_check == 2); + + DBUG_RETURN(1); +} + +/* + Get the next record of a started scan. Try to fetch + it locally from NdbApi cached records if possible, + otherwise ask NDB for more. + + NOTE + If this is a update/delete make sure to not contact + NDB before any pending ops have been sent to NDB. + +*/ - // No more records - DBUG_PRINT("info", ("No more records")); - DBUG_RETURN(HA_ERR_END_OF_FILE); +inline int ha_ndbcluster::next_result(byte *buf) +{ + int res; + DBUG_ENTER("next_result"); + + if (!m_active_cursor) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + if ((res= fetch_next(m_active_cursor)) == 0) + { + DBUG_PRINT("info", ("One more record found")); + + unpack_record(buf); + table->status= 0; + DBUG_RETURN(0); + } + else if (res == 1) + { + // No more records + table->status= STATUS_NOT_FOUND; + + DBUG_PRINT("info", ("No more records")); + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + else + { + DBUG_RETURN(ndb_err(m_active_trans)); + } } /* @@ -1520,7 +1876,8 @@ inline int ha_ndbcluster::next_result(byte *buf) */ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, - const key_range *keys[2]) + const key_range *keys[2], + uint range_no) { const KEY *const key_info= table->key_info + active_index; const uint key_parts= key_info->key_parts; @@ -1553,7 +1910,9 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, { KEY_PART_INFO *key_part= &key_info->key_part[i]; Field *field= key_part->field; +#ifndef DBUG_OFF uint part_len= key_part->length; +#endif uint part_store_len= key_part->store_length; // Info about each key part struct part_st { @@ -1568,7 +1927,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, for (j= 0; j <= 1; j++) { - struct part_st &p = part[j]; + struct part_st &p= part[j]; p.key= NULL; p.bound_type= -1; if (tot_len < key_tot_len[j]) @@ -1587,6 +1946,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, case HA_READ_KEY_EXACT: p.bound_type= NdbIndexScanOperation::BoundEQ; break; + // ascending case HA_READ_KEY_OR_NEXT: p.bound_type= NdbIndexScanOperation::BoundLE; break; @@ -1596,6 +1956,19 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, else p.bound_type= NdbIndexScanOperation::BoundLT; break; + // descending + case HA_READ_PREFIX_LAST: // weird + p.bound_type= NdbIndexScanOperation::BoundEQ; + break; + case HA_READ_PREFIX_LAST_OR_PREV: // weird + p.bound_type= NdbIndexScanOperation::BoundGE; + break; + case HA_READ_BEFORE_KEY: + if (! p.part_last) + p.bound_type= NdbIndexScanOperation::BoundGE; + else + p.bound_type= NdbIndexScanOperation::BoundGT; + break; default: break; } @@ -1603,6 +1976,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, if (j == 1) { switch (p.key->flag) { + // ascending case HA_READ_BEFORE_KEY: if (! p.part_last) p.bound_type= NdbIndexScanOperation::BoundGE; @@ -1614,15 +1988,16 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, break; default: break; + // descending strangely sets no end key } } if (p.bound_type == -1) { DBUG_PRINT("error", ("key %d unknown flag %d", j, p.key->flag)); - DBUG_ASSERT(false); + DBUG_ASSERT(FALSE); // Stop setting bounds but continue with what we have - DBUG_RETURN(0); + DBUG_RETURN(op->end_of_bound(range_no)); } } } @@ -1647,7 +2022,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, for (j= 0; j <= 1; j++) { - struct part_st &p = part[j]; + struct part_st &p= part[j]; // Set bound if not done with this key if (p.key != NULL) { @@ -1657,79 +2032,37 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op, // Set bound if not cancelled via type -1 if (p.bound_type != -1) - { - if (op->setBound(i, p.bound_type, p.bound_ptr)) + { + const char* ptr= p.bound_ptr; + char buf[256]; + shrink_varchar(field, ptr, buf); + if (op->setBound(i, p.bound_type, ptr)) ERR_RETURN(op->getNdbError()); - } + } } } tot_len+= part_store_len; } - DBUG_RETURN(0); + DBUG_RETURN(op->end_of_bound(range_no)); } -inline -int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) -{ - uint i; - THD *thd= current_thd; - NdbConnection *trans= m_active_trans; - - DBUG_ENTER("define_read_attrs"); - - // Define attributes to read - for (i= 0; i < table->fields; i++) - { - Field *field= table->field[i]; - if ((thd->query_id == field->query_id) || - (field->flags & PRI_KEY_FLAG) || - m_retrieve_all_fields) - { - if (get_ndb_value(op, field, i, buf)) - ERR_RETURN(op->getNdbError()); - } - else - { - m_value[i].ptr= NULL; - } - } - - if (table->primary_key == MAX_KEY) - { - DBUG_PRINT("info", ("Getting hidden key")); - // Scanning table with no primary key - int hidden_no= table->fields; -#ifndef DBUG_OFF - const NDBTAB *tab= (const NDBTAB *) m_table; - if (!tab->getColumn(hidden_no)) - DBUG_RETURN(1); -#endif - if (get_ndb_value(op, NULL, hidden_no, NULL)) - ERR_RETURN(op->getNdbError()); - } - - if (execute_no_commit(this,trans) != 0) - DBUG_RETURN(ndb_err(trans)); - DBUG_PRINT("exit", ("Scan started successfully")); - DBUG_RETURN(next_result(buf)); -} - /* Start ordered index scan in NDB */ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, byte* buf) + const key_range *end_key, + bool sorted, bool descending, byte* buf) { + int res; bool restart; - NdbConnection *trans= m_active_trans; - NdbResultSet *cursor; + NdbTransaction *trans= m_active_trans; NdbIndexScanOperation *op; - DBUG_ENTER("ordered_index_scan"); - DBUG_PRINT("enter", ("index: %u, sorted: %d", active_index, sorted)); + DBUG_ENTER("ha_ndbcluster::ordered_index_scan"); + DBUG_PRINT("enter", ("index: %u, sorted: %d, descending: %d", + active_index, sorted, descending)); DBUG_PRINT("enter", ("Starting new ordered scan on %s", m_tabname)); // Check that sorted seems to be initialised @@ -1737,131 +2070,83 @@ int ha_ndbcluster::ordered_index_scan(const key_range *start_key, if (m_active_cursor == 0) { - restart= false; + restart= FALSE; NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); bool need_pk = (lm == NdbOperation::LM_Read); if (!(op= trans->getNdbIndexScanOperation((NDBINDEX *) - m_index[active_index].index, - (const NDBTAB *) m_table)) || - !(cursor= op->readTuples(lm, 0, parallelism, sorted, need_pk))) + m_index[active_index].index, + (const NDBTAB *) m_table)) || + op->readTuples(lm, 0, parallelism, sorted, descending, false, need_pk)) ERR_RETURN(trans->getNdbError()); - m_active_cursor= cursor; + m_active_cursor= op; } else { - restart= true; - op= (NdbIndexScanOperation*)m_active_cursor->getOperation(); + restart= TRUE; + op= (NdbIndexScanOperation*)m_active_cursor; DBUG_ASSERT(op->getSorted() == sorted); DBUG_ASSERT(op->getLockMode() == - (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); - if(op->reset_bounds(m_force_send)) + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); + if (op->reset_bounds(m_force_send)) DBUG_RETURN(ndb_err(m_active_trans)); } - + { const key_range *keys[2]= { start_key, end_key }; - int ret= set_bounds(op, keys); - if (ret) - DBUG_RETURN(ret); + res= set_bounds(op, keys); + if (res) + DBUG_RETURN(res); } - if (!restart) - { - DBUG_RETURN(define_read_attrs(buf, op)); - } - else + if (!restart && generate_scan_filter(m_cond_stack, op)) + DBUG_RETURN(ndb_err(trans)); + + if (!restart && (res= define_read_attrs(buf, op))) { - if (execute_no_commit(this,trans) != 0) - DBUG_RETURN(ndb_err(trans)); - - DBUG_RETURN(next_result(buf)); + DBUG_RETURN(res); } -} -/* - Start a filtered scan in NDB. - - NOTE - This function is here as an example of how to start a - filtered scan. It should be possible to replace full_table_scan - with this function and make a best effort attempt - at filtering out the irrelevant data by converting the "items" - into interpreted instructions. - This would speed up table scans where there is a limiting WHERE clause - that doesn't match any index in the table. + if (execute_no_commit(this,trans,false) != 0) + DBUG_RETURN(ndb_err(trans)); + + DBUG_RETURN(next_result(buf)); +} +/* + Unique index scan in NDB (full table scan with scan filter) */ -int ha_ndbcluster::filtered_scan(const byte *key, uint key_len, - byte *buf, - enum ha_rkey_function find_flag) -{ - NdbConnection *trans= m_active_trans; - NdbResultSet *cursor; +int ha_ndbcluster::unique_index_scan(const KEY* key_info, + const byte *key, + uint key_len, + byte *buf) +{ + int res; NdbScanOperation *op; + NdbTransaction *trans= m_active_trans; - DBUG_ENTER("filtered_scan"); - DBUG_PRINT("enter", ("key_len: %u, index: %u", - key_len, active_index)); - DBUG_DUMP("key", (char*)key, key_len); - DBUG_PRINT("info", ("Starting a new filtered scan on %s", - m_tabname)); + DBUG_ENTER("unique_index_scan"); + DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); NdbOperation::LockMode lm= (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); - if (!(op= trans->getNdbScanOperation((const NDBTAB *) m_table)) || - !(cursor= op->readTuples(lm, 0, parallelism))) + bool need_pk = (lm == NdbOperation::LM_Read); + if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) || + op->readTuples(lm, + (need_pk)?NdbScanOperation::SF_KeyInfo:0, + parallelism)) ERR_RETURN(trans->getNdbError()); - m_active_cursor= cursor; - - { - // Start scan filter - NdbScanFilter sf(op); - sf.begin(); - - // Set filter using the supplied key data - byte *key_ptr= (byte *) key; - uint tot_len= 0; - KEY* key_info= table->key_info + active_index; - for (uint k= 0; k < key_info->key_parts; k++) - { - KEY_PART_INFO* key_part= key_info->key_part+k; - Field* field= key_part->field; - uint ndb_fieldnr= key_part->fieldnr-1; - DBUG_PRINT("key_part", ("fieldnr: %d", ndb_fieldnr)); - //const NDBCOL *col= ((const NDBTAB *) m_table)->getColumn(ndb_fieldnr); - uint32 field_len= field->pack_length(); - DBUG_DUMP("key", (char*)key, field_len); - - DBUG_PRINT("info", ("Column %s, type: %d, len: %d", - field->field_name, field->real_type(), field_len)); - - // Define scan filter - if (field->real_type() == MYSQL_TYPE_STRING) - sf.eq(ndb_fieldnr, key_ptr, field_len); - else - { - if (field_len == 8) - sf.eq(ndb_fieldnr, (Uint64)*key_ptr); - else if (field_len <= 4) - sf.eq(ndb_fieldnr, (Uint32)*key_ptr); - else - DBUG_RETURN(1); - } - - key_ptr += field_len; - tot_len += field_len; - - if (tot_len >= key_len) - break; - } - // End scan filter - sf.end(); - } - - DBUG_RETURN(define_read_attrs(buf, op)); -} + m_active_cursor= op; + if (generate_scan_filter_from_key(op, key_info, key, key_len, buf)) + DBUG_RETURN(ndb_err(trans)); + if ((res= define_read_attrs(buf, op))) + DBUG_RETURN(res); + if (execute_no_commit(this,trans,false) != 0) + DBUG_RETURN(ndb_err(trans)); + DBUG_PRINT("exit", ("Scan started successfully")); + DBUG_RETURN(next_result(buf)); +} /* Start full table scan in NDB @@ -1869,10 +2154,9 @@ int ha_ndbcluster::filtered_scan(const byte *key, uint key_len, int ha_ndbcluster::full_table_scan(byte *buf) { - uint i; - NdbResultSet *cursor; + int res; NdbScanOperation *op; - NdbConnection *trans= m_active_trans; + NdbTransaction *trans= m_active_trans; DBUG_ENTER("full_table_scan"); DBUG_PRINT("enter", ("Starting new scan on %s", m_tabname)); @@ -1881,10 +2165,20 @@ int ha_ndbcluster::full_table_scan(byte *buf) (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); bool need_pk = (lm == NdbOperation::LM_Read); if (!(op=trans->getNdbScanOperation((const NDBTAB *) m_table)) || - !(cursor= op->readTuples(lm, 0, parallelism, need_pk))) + op->readTuples(lm, + (need_pk)?NdbScanOperation::SF_KeyInfo:0, + parallelism)) ERR_RETURN(trans->getNdbError()); - m_active_cursor= cursor; - DBUG_RETURN(define_read_attrs(buf, op)); + m_active_cursor= op; + if (generate_scan_filter(m_cond_stack, op)) + DBUG_RETURN(ndb_err(trans)); + if ((res= define_read_attrs(buf, op))) + DBUG_RETURN(res); + + if (execute_no_commit(this,trans,false) != 0) + DBUG_RETURN(ndb_err(trans)); + DBUG_PRINT("exit", ("Scan started successfully")); + DBUG_RETURN(next_result(buf)); } /* @@ -1894,33 +2188,54 @@ int ha_ndbcluster::write_row(byte *record) { bool has_auto_increment; uint i; - NdbConnection *trans= m_active_trans; + NdbTransaction *trans= m_active_trans; NdbOperation *op; int res; + THD *thd= table->in_use; DBUG_ENTER("write_row"); - if(m_ignore_dup_key && table->primary_key != MAX_KEY) + has_auto_increment= (table->next_number_field && record == table->record[0]); + if (table->s->primary_key != MAX_KEY) + { + /* + * Increase any auto_incremented primary key + */ + if (has_auto_increment) + { + int error; + + m_skip_auto_increment= FALSE; + if ((error= update_auto_increment())) + DBUG_RETURN(error); + /* Ensure that handler is always called for auto_increment values */ + thd->next_insert_id= 0; + m_skip_auto_increment= !auto_increment_column_changed; + } + } + + /* + * If IGNORE the ignore constraint violations on primary and unique keys + */ + if (!m_use_write && m_ignore_dup_key) { /* compare if expression with that in start_bulk_insert() start_bulk_insert will set parameters to ensure that each write_row is committed individually */ - int peek_res= peek_row(record); + int peek_res= peek_indexed_rows(record, true); if (!peek_res) { - m_dupkey= table->primary_key; DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); } if (peek_res != HA_ERR_KEY_NOT_FOUND) DBUG_RETURN(peek_res); } - - statistic_increment(ha_write_count,&LOCK_status); + + statistic_increment(thd->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); - has_auto_increment= (table->next_number_field && record == table->record[0]); if (!(op= trans->getNdbOperation((const NDBTAB *) m_table))) ERR_RETURN(trans->getNdbError()); @@ -1929,50 +2244,44 @@ int ha_ndbcluster::write_row(byte *record) if (res != 0) ERR_RETURN(trans->getNdbError()); - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // Table has hidden primary key Ndb *ndb= get_ndb(); - Uint64 auto_value= NDB_FAILED_AUTO_INCREMENT; + int ret; + Uint64 auto_value; uint retries= NDB_AUTO_INCREMENT_RETRIES; do { - auto_value= ndb->getAutoIncrementValue((const NDBTAB *) m_table); - } while (auto_value == NDB_FAILED_AUTO_INCREMENT && + ret= ndb->getAutoIncrementValue((const NDBTAB *) m_table, auto_value, 1); + } while (ret == -1 && --retries && ndb->getNdbError().status == NdbError::TemporaryError); - if (auto_value == NDB_FAILED_AUTO_INCREMENT) + if (ret == -1) ERR_RETURN(ndb->getNdbError()); - if (set_hidden_key(op, table->fields, (const byte*)&auto_value)) + if (set_hidden_key(op, table->s->fields, (const byte*)&auto_value)) ERR_RETURN(op->getNdbError()); } else { - int res; - - if (has_auto_increment) - { - m_skip_auto_increment= FALSE; - update_auto_increment(); - m_skip_auto_increment= !auto_increment_column_changed; - } - if ((res= set_primary_key_from_record(op, record))) return res; } // Set non-key attribute(s) bool set_blob_value= FALSE; - for (i= 0; i < table->fields; i++) + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; if (!(field->flags & PRI_KEY_FLAG) && - set_ndb_value(op, field, i, &set_blob_value)) + set_ndb_value(op, field, i, &set_blob_value)) { m_skip_auto_increment= TRUE; ERR_RETURN(op->getNdbError()); } } + m_rows_changed++; + /* Execute write operation NOTE When doing inserts with many values in @@ -1988,45 +2297,49 @@ int ha_ndbcluster::write_row(byte *record) m_primary_key_update || set_blob_value) { - THD *thd= current_thd; // Send rows to NDB DBUG_PRINT("info", ("Sending inserts to NDB, "\ - "rows_inserted:%d, bulk_insert_rows: %d", - (int)m_rows_inserted, (int)m_bulk_insert_rows)); + "rows_inserted:%d, bulk_insert_rows: %d", + (int)m_rows_inserted, (int)m_bulk_insert_rows)); m_bulk_insert_not_flushed= FALSE; - // if (thd->transaction.on) if (m_transaction_on) { - if (execute_no_commit(this,trans) != 0) + if (execute_no_commit(this,trans,false) != 0) { - m_skip_auto_increment= TRUE; - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); + m_skip_auto_increment= TRUE; + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); } } else { if (execute_commit(this,trans) != 0) { - m_skip_auto_increment= TRUE; - no_uncommitted_rows_execute_failure(); - DBUG_RETURN(ndb_err(trans)); + m_skip_auto_increment= TRUE; + no_uncommitted_rows_execute_failure(); + DBUG_RETURN(ndb_err(trans)); + } + if (trans->restart() != 0) + { + DBUG_ASSERT(0); + DBUG_RETURN(-1); } - int res= trans->restart(); - DBUG_ASSERT(res == 0); } } if ((has_auto_increment) && (m_skip_auto_increment)) { Ndb *ndb= get_ndb(); Uint64 next_val= (Uint64) table->next_number_field->val_int() + 1; +#ifndef DBUG_OFF + char buff[22]; DBUG_PRINT("info", - ("Trying to set next auto increment value to %lu", - (ulong) next_val)); - if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE)) - DBUG_PRINT("info", - ("Setting next auto increment value to %u", next_val)); + ("Trying to set next auto increment value to %s", + llstr(next_val, buff))); +#endif + if (ndb->setAutoIncrementValue((const NDBTAB *) m_table, next_val, TRUE) + == -1) + ERR_RETURN(ndb->getNdbError()); } m_skip_auto_increment= TRUE; @@ -2037,7 +2350,7 @@ int ha_ndbcluster::write_row(byte *record) /* Compare if a key in a row has changed */ int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, - const byte * new_row) + const byte * new_row) { KEY_PART_INFO *key_part=table->key_info[keynr].key_part; KEY_PART_INFO *end=key_part+table->key_info[keynr].key_parts; @@ -2047,22 +2360,22 @@ int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, if (key_part->null_bit) { if ((old_row[key_part->null_offset] & key_part->null_bit) != - (new_row[key_part->null_offset] & key_part->null_bit)) - return 1; + (new_row[key_part->null_offset] & key_part->null_bit)) + return 1; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) { if (key_part->field->cmp_binary((char*) (old_row + key_part->offset), - (char*) (new_row + key_part->offset), - (ulong) key_part->length)) - return 1; + (char*) (new_row + key_part->offset), + (ulong) key_part->length)) + return 1; } else { if (memcmp(old_row+key_part->offset, new_row+key_part->offset, - key_part->length)) - return 1; + key_part->length)) + return 1; } } return 0; @@ -2075,13 +2388,31 @@ int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) { THD *thd= current_thd; - NdbConnection *trans= m_active_trans; - NdbResultSet* cursor= m_active_cursor; + NdbTransaction *trans= m_active_trans; + NdbScanOperation* cursor= m_active_cursor; NdbOperation *op; uint i; + bool pk_update= (table->s->primary_key != MAX_KEY && + key_cmp(table->s->primary_key, old_data, new_data)); DBUG_ENTER("update_row"); - statistic_increment(ha_update_count,&LOCK_status); + /* + * If IGNORE the ignore constraint violations on primary and unique keys, + * but check that it is not part of INSERT ... ON DUPLICATE KEY UPDATE + */ + if (m_ignore_dup_key && thd->lex->sql_command == SQLCOM_UPDATE) + { + int peek_res= peek_indexed_rows(new_data, pk_update); + + if (!peek_res) + { + DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); + } + if (peek_res != HA_ERR_KEY_NOT_FOUND) + DBUG_RETURN(peek_res); + } + + statistic_increment(thd->status_var.ha_update_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) { table->timestamp_field->set_time(); @@ -2090,8 +2421,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) } /* Check for update of primary key for special handling */ - if ((table->primary_key != MAX_KEY) && - (key_cmp(table->primary_key, old_data, new_data))) + if (pk_update) { int read_res, insert_res, delete_res, undo_res; @@ -2148,7 +2478,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) the active record in cursor */ DBUG_PRINT("info", ("Calling updateTuple on cursor")); - if (!(op= cursor->updateTuple())) + if (!(op= cursor->updateCurrentTuple())) ERR_RETURN(trans->getNdbError()); m_lock_tuple= false; m_ops_pending++; @@ -2158,10 +2488,10 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) else { if (!(op= trans->getNdbOperation((const NDBTAB *) m_table)) || - op->updateTuple() != 0) + op->updateTuple() != 0) ERR_RETURN(trans->getNdbError()); - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); @@ -2170,29 +2500,31 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) // read into m_ref DBUG_DUMP("key", m_ref, NDB_HIDDEN_PRIMARY_KEY_LENGTH); - if (set_hidden_key(op, table->fields, m_ref)) - ERR_RETURN(op->getNdbError()); + if (set_hidden_key(op, table->s->fields, m_ref)) + ERR_RETURN(op->getNdbError()); } else { int res; if ((res= set_primary_key_from_record(op, old_data))) - DBUG_RETURN(res); + DBUG_RETURN(res); } } + m_rows_changed++; + // Set non-key attribute(s) - for (i= 0; i < table->fields; i++) + for (i= 0; i < table->s->fields; i++) { Field *field= table->field[i]; if (((thd->query_id == field->query_id) || m_retrieve_all_fields) && (!(field->flags & PRI_KEY_FLAG)) && - set_ndb_value(op, field, i)) + set_ndb_value(op, field, i)) ERR_RETURN(op->getNdbError()); } // Execute update operation - if (!cursor && execute_no_commit(this,trans) != 0) { + if (!cursor && execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2207,12 +2539,14 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) int ha_ndbcluster::delete_row(const byte *record) { - NdbConnection *trans= m_active_trans; - NdbResultSet* cursor= m_active_cursor; + THD *thd= current_thd; + NdbTransaction *trans= m_active_trans; + NdbScanOperation* cursor= m_active_cursor; NdbOperation *op; DBUG_ENTER("delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(thd->status_var.ha_delete_count,&LOCK_status); + m_rows_changed++; if (cursor) { @@ -2224,7 +2558,7 @@ int ha_ndbcluster::delete_row(const byte *record) the active record in cursor */ DBUG_PRINT("info", ("Calling deleteTuple on cursor")); - if (cursor->deleteTuple() != 0) + if (cursor->deleteCurrentTuple() != 0) ERR_RETURN(trans->getNdbError()); m_lock_tuple= false; m_ops_pending++; @@ -2239,18 +2573,18 @@ int ha_ndbcluster::delete_row(const byte *record) { if (!(op=trans->getNdbOperation((const NDBTAB *) m_table)) || - op->deleteTuple() != 0) + op->deleteTuple() != 0) ERR_RETURN(trans->getNdbError()); no_uncommitted_rows_update(-1); - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); - if (set_hidden_key(op, table->fields, m_ref)) - ERR_RETURN(op->getNdbError()); + if (set_hidden_key(op, table->s->fields, m_ref)) + ERR_RETURN(op->getNdbError()); } else { @@ -2259,9 +2593,9 @@ int ha_ndbcluster::delete_row(const byte *record) return res; } } - + // Execute delete operation - if (execute_no_commit(this,trans) != 0) { + if (execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } @@ -2273,7 +2607,7 @@ int ha_ndbcluster::delete_row(const byte *record) SYNOPSIS unpack_record() - buf Buffer to store read row + buf Buffer to store read row NOTE The data for each row is read directly into the @@ -2288,10 +2622,12 @@ void ha_ndbcluster::unpack_record(byte* buf) Field **field, **end; NdbValue *value= m_value; DBUG_ENTER("unpack_record"); + + end= table->field + table->s->fields; // Set null flag(s) - bzero(buf, table->null_bytes); - for (field= table->field, end= field+table->fields; + bzero(buf, table->s->null_bytes); + for (field= table->field; field < end; field++, value++) { @@ -2301,34 +2637,57 @@ void ha_ndbcluster::unpack_record(byte* buf) { if ((*value).rec->isNULL()) (*field)->set_null(row_offset); + else if ((*field)->type() == MYSQL_TYPE_BIT) + { + uint pack_len= (*field)->pack_length(); + if (pack_len < 5) + { + DBUG_PRINT("info", ("bit field H'%.8X", + (*value).rec->u_32_value())); + ((Field_bit *) *field)->store((longlong) + (*value).rec->u_32_value(), + FALSE); + } + else + { + DBUG_PRINT("info", ("bit field H'%.8X%.8X", + *(Uint32 *)(*value).rec->aRef(), + *((Uint32 *)(*value).rec->aRef()+1))); + ((Field_bit *) *field)->store((longlong) + (*value).rec->u_64_value(), TRUE); + } + } } else { NdbBlob* ndb_blob= (*value).blob; bool isNull= TRUE; - int ret= ndb_blob->getNull(isNull); +#ifndef DBUG_OFF + int ret= +#endif + ndb_blob->getNull(isNull); DBUG_ASSERT(ret == 0); if (isNull) - (*field)->set_null(row_offset); + (*field)->set_null(row_offset); } } } - + #ifndef DBUG_OFF // Read and print all values that was fetched - if (table->primary_key == MAX_KEY) + if (table->s->primary_key == MAX_KEY) { // Table with hidden primary key - int hidden_no= table->fields; + int hidden_no= table->s->fields; char buff[22]; const NDBTAB *tab= (const NDBTAB *) m_table; const NDBCOL *hidden_col= tab->getColumn(hidden_no); - NdbRecAttr* rec= m_value[hidden_no].rec; + const NdbRecAttr* rec= m_value[hidden_no].rec; DBUG_ASSERT(rec); DBUG_PRINT("hidden", ("%d: %s \"%s\"", hidden_no, - hidden_col->getName(), + hidden_col->getName(), llstr(rec->u_64_value(), buff))); - } + } print_results(); #endif DBUG_VOID_RETURN; @@ -2340,167 +2699,57 @@ void ha_ndbcluster::unpack_record(byte* buf) void ha_ndbcluster::print_results() { - const NDBTAB *tab= (const NDBTAB*) m_table; DBUG_ENTER("print_results"); #ifndef DBUG_OFF if (!_db_on_) DBUG_VOID_RETURN; - - for (uint f=0; f<table->fields;f++) + + char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH]; + String type(buf_type, sizeof(buf_type), &my_charset_bin); + String val(buf_val, sizeof(buf_val), &my_charset_bin); + for (uint f= 0; f < table->s->fields; f++) { + /* Use DBUG_PRINT since DBUG_FILE cannot be filtered out */ + char buf[2000]; Field *field; - const NDBCOL *col; + void* ptr; NdbValue value; + buf[0]= 0; + field= table->field[f]; if (!(value= m_value[f]).ptr) { - fprintf(DBUG_FILE, "Field %d was not read\n", f); - continue; + strmov(buf, "not read"); + goto print_value; } - field= table->field[f]; - DBUG_DUMP("field->ptr", (char*)field->ptr, field->pack_length()); - col= tab->getColumn(f); - fprintf(DBUG_FILE, "%d: %s\t", f, col->getName()); - NdbBlob *ndb_blob= NULL; + ptr= field->ptr; + if (! (field->flags & BLOB_FLAG)) { if (value.rec->isNULL()) { - fprintf(DBUG_FILE, "NULL\n"); - continue; + strmov(buf, "NULL"); + goto print_value; } + type.length(0); + val.length(0); + field->sql_type(type); + field->val_str(&val); + my_snprintf(buf, sizeof(buf), "%s %s", type.c_ptr(), val.c_ptr()); } else { - ndb_blob= value.blob; + NdbBlob *ndb_blob= value.blob; bool isNull= TRUE; ndb_blob->getNull(isNull); - if (isNull) { - fprintf(DBUG_FILE, "NULL\n"); - continue; - } + if (isNull) + strmov(buf, "NULL"); } - switch (col->getType()) { - case NdbDictionary::Column::Tinyint: { - char value= *field->ptr; - fprintf(DBUG_FILE, "Tinyint\t%d", value); - break; - } - case NdbDictionary::Column::Tinyunsigned: { - unsigned char value= *field->ptr; - fprintf(DBUG_FILE, "Tinyunsigned\t%u", value); - break; - } - case NdbDictionary::Column::Smallint: { - short value= *field->ptr; - fprintf(DBUG_FILE, "Smallint\t%d", value); - break; - } - case NdbDictionary::Column::Smallunsigned: { - unsigned short value= *field->ptr; - fprintf(DBUG_FILE, "Smallunsigned\t%u", value); - break; - } - case NdbDictionary::Column::Mediumint: { - byte value[3]; - memcpy(value, field->ptr, 3); - fprintf(DBUG_FILE, "Mediumint\t%d,%d,%d", value[0], value[1], value[2]); - break; - } - case NdbDictionary::Column::Mediumunsigned: { - byte value[3]; - memcpy(value, field->ptr, 3); - fprintf(DBUG_FILE, "Mediumunsigned\t%u,%u,%u", value[0], value[1], value[2]); - break; - } - case NdbDictionary::Column::Int: { - fprintf(DBUG_FILE, "Int\t%lld", field->val_int()); - break; - } - case NdbDictionary::Column::Unsigned: { - Uint32 value= (Uint32) *field->ptr; - fprintf(DBUG_FILE, "Unsigned\t%u", value); - break; - } - case NdbDictionary::Column::Bigint: { - Int64 value= (Int64) *field->ptr; - fprintf(DBUG_FILE, "Bigint\t%lld", value); - break; - } - case NdbDictionary::Column::Bigunsigned: { - Uint64 value= (Uint64) *field->ptr; - fprintf(DBUG_FILE, "Bigunsigned\t%llu", value); - break; - } - case NdbDictionary::Column::Float: { - float value= (float) *field->ptr; - fprintf(DBUG_FILE, "Float\t%f", value); - break; - } - case NdbDictionary::Column::Double: { - double value= (double) *field->ptr; - fprintf(DBUG_FILE, "Double\t%f", value); - break; - } - case NdbDictionary::Column::Olddecimal: { - char *value= field->ptr; - fprintf(DBUG_FILE, "Olddecimal\t'%-*s'", field->pack_length(), value); - break; - } - case NdbDictionary::Column::Olddecimalunsigned: { - char *value= field->ptr; - fprintf(DBUG_FILE, "Olddecimalunsigned\t'%-*s'", field->pack_length(), value); - break; - } - case NdbDictionary::Column::Char:{ - const char *value= (char *) field->ptr; - fprintf(DBUG_FILE, "Char\t'%.*s'", field->pack_length(), value); - break; - } - case NdbDictionary::Column::Varchar: - case NdbDictionary::Column::Binary: - case NdbDictionary::Column::Varbinary: { - const char *value= (char *) field->ptr; - fprintf(DBUG_FILE, "Var\t'%.*s'", field->pack_length(), value); - break; - } - case NdbDictionary::Column::Datetime: { - Uint64 value= (Uint64) *field->ptr; - fprintf(DBUG_FILE, "Datetime\t%llu", value); - break; - } - case NdbDictionary::Column::Date: { - Uint64 value= (Uint64) *field->ptr; - fprintf(DBUG_FILE, "Date\t%llu", value); - break; - } - case NdbDictionary::Column::Time: { - Uint64 value= (Uint64) *field->ptr; - fprintf(DBUG_FILE, "Time\t%llu", value); - break; - } - case NdbDictionary::Column::Blob: { - Uint64 len= 0; - ndb_blob->getLength(len); - fprintf(DBUG_FILE, "Blob\t[len=%u]", (unsigned)len); - break; - } - case NdbDictionary::Column::Text: { - Uint64 len= 0; - ndb_blob->getLength(len); - fprintf(DBUG_FILE, "Text\t[len=%u]", (unsigned)len); - break; - } - case NdbDictionary::Column::Undefined: - default: - fprintf(DBUG_FILE, "Unknown type: %d", col->getType()); - break; - } - fprintf(DBUG_FILE, "\n"); - +print_value: + DBUG_PRINT("value", ("%u,%s: %s", f, field->field_name, buf)); } #endif DBUG_VOID_RETURN; @@ -2509,9 +2758,9 @@ void ha_ndbcluster::print_results() int ha_ndbcluster::index_init(uint index) { - DBUG_ENTER("index_init"); + DBUG_ENTER("ha_ndbcluster::index_init"); DBUG_PRINT("enter", ("index: %u", index)); - /* + /* Locks are are explicitly released in scan unless m_lock.type == TL_READ_HIGH_PRIORITY and no sub-sequent call to unlock_row() @@ -2523,7 +2772,7 @@ int ha_ndbcluster::index_init(uint index) int ha_ndbcluster::index_end() { - DBUG_ENTER("index_end"); + DBUG_ENTER("ha_ndbcluster::index_end"); DBUG_RETURN(close_scan()); } @@ -2535,14 +2784,14 @@ int check_null_in_key(const KEY* key_info, const byte *key, uint key_len) { KEY_PART_INFO *curr_part, *end_part; - const byte* end_ptr = key + key_len; + const byte* end_ptr= key + key_len; curr_part= key_info->key_part; end_part= curr_part + key_info->key_parts; for (; curr_part != end_part && key < end_ptr; curr_part++) { - if(curr_part->null_bit && *key) + if (curr_part->null_bit && *key) return 1; key += curr_part->store_length; @@ -2551,23 +2800,23 @@ check_null_in_key(const KEY* key_info, const byte *key, uint key_len) } int ha_ndbcluster::index_read(byte *buf, - const byte *key, uint key_len, - enum ha_rkey_function find_flag) + const byte *key, uint key_len, + enum ha_rkey_function find_flag) { - DBUG_ENTER("index_read"); + DBUG_ENTER("ha_ndbcluster::index_read"); DBUG_PRINT("enter", ("active_index: %u, key_len: %u, find_flag: %d", active_index, key_len, find_flag)); int error; - ndb_index_type type = get_index_type(active_index); - const KEY* key_info = table->key_info+active_index; + ndb_index_type type= get_index_type(active_index); + const KEY* key_info= table->key_info+active_index; switch (type){ case PRIMARY_KEY_ORDERED_INDEX: case PRIMARY_KEY_INDEX: if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len) { - if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + if (m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); DBUG_RETURN(pk_read(key, key_len, buf)); } else if (type == PRIMARY_KEY_INDEX) @@ -2578,15 +2827,15 @@ int ha_ndbcluster::index_read(byte *buf, case UNIQUE_ORDERED_INDEX: case UNIQUE_INDEX: if (find_flag == HA_READ_KEY_EXACT && key_info->key_length == key_len && - !check_null_in_key(key_info, key, key_len)) + !check_null_in_key(key_info, key, key_len)) { - if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + if (m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); DBUG_RETURN(unique_index_read(key, key_len, buf)); } else if (type == UNIQUE_INDEX) { - DBUG_RETURN(1); + DBUG_RETURN(unique_index_scan(key_info, key, key_len, buf)); } break; case ORDERED_INDEX: @@ -2599,20 +2848,31 @@ int ha_ndbcluster::index_read(byte *buf, } key_range start_key; - start_key.key = key; - start_key.length = key_len; - start_key.flag = find_flag; - error= ordered_index_scan(&start_key, 0, TRUE, buf); + start_key.key= key; + start_key.length= key_len; + start_key.flag= find_flag; + bool descending= FALSE; + switch (find_flag) { + case HA_READ_KEY_OR_PREV: + case HA_READ_BEFORE_KEY: + case HA_READ_PREFIX_LAST: + case HA_READ_PREFIX_LAST_OR_PREV: + descending= TRUE; + break; + default: + break; + } + error= ordered_index_scan(&start_key, 0, TRUE, descending, buf); DBUG_RETURN(error == HA_ERR_END_OF_FILE ? HA_ERR_KEY_NOT_FOUND : error); } int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, - const byte *key, uint key_len, - enum ha_rkey_function find_flag) + const byte *key, uint key_len, + enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); - DBUG_ENTER("index_read_idx"); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); + DBUG_ENTER("ha_ndbcluster::index_read_idx"); DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); index_init(index_no); DBUG_RETURN(index_read(buf, key, key_len, find_flag)); @@ -2621,72 +2881,69 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, int ha_ndbcluster::index_next(byte *buf) { - DBUG_ENTER("index_next"); - - int error= 1; - statistic_increment(ha_read_next_count,&LOCK_status); + DBUG_ENTER("ha_ndbcluster::index_next"); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); DBUG_RETURN(next_result(buf)); } int ha_ndbcluster::index_prev(byte *buf) { - DBUG_ENTER("index_prev"); - statistic_increment(ha_read_prev_count,&LOCK_status); - DBUG_RETURN(1); + DBUG_ENTER("ha_ndbcluster::index_prev"); + statistic_increment(current_thd->status_var.ha_read_prev_count, + &LOCK_status); + DBUG_RETURN(next_result(buf)); } int ha_ndbcluster::index_first(byte *buf) { - DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count,&LOCK_status); + DBUG_ENTER("ha_ndbcluster::index_first"); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); // Start the ordered index scan and fetch the first row // Only HA_READ_ORDER indexes get called by index_first - DBUG_RETURN(ordered_index_scan(0, 0, TRUE, buf)); + DBUG_RETURN(ordered_index_scan(0, 0, TRUE, FALSE, buf)); } int ha_ndbcluster::index_last(byte *buf) { - DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count,&LOCK_status); - int res; - if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){ - NdbResultSet *cursor= m_active_cursor; - while((res= cursor->nextResult(TRUE, m_force_send)) == 0); - if(res == 1){ - unpack_record(buf); - table->status= 0; - DBUG_RETURN(0); - } - } - DBUG_RETURN(res); + DBUG_ENTER("ha_ndbcluster::index_last"); + statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status); + DBUG_RETURN(ordered_index_scan(0, 0, TRUE, TRUE, buf)); } +int ha_ndbcluster::index_read_last(byte * buf, const byte * key, uint key_len) +{ + DBUG_ENTER("ha_ndbcluster::index_read_last"); + DBUG_RETURN(index_read(buf, key, key_len, HA_READ_PREFIX_LAST)); +} inline int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted, - byte* buf) + const key_range *end_key, + bool eq_r, bool sorted, + byte* buf) { - KEY* key_info; + ndb_index_type type= get_index_type(active_index); +KEY* key_info; int error= 1; DBUG_ENTER("ha_ndbcluster::read_range_first_to_buf"); - DBUG_PRINT("info", ("eq_range: %d, sorted: %d", eq_range, sorted)); + DBUG_PRINT("info", ("eq_r: %d, sorted: %d", eq_r, sorted)); - switch (get_index_type(active_index)){ + switch (type){ case PRIMARY_KEY_ORDERED_INDEX: case PRIMARY_KEY_INDEX: key_info= table->key_info + active_index; if (start_key && - start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT) + start_key->length == key_info->key_length && + start_key->flag == HA_READ_KEY_EXACT) { - if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + if (m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); error= pk_read(start_key->key, start_key->length, buf); DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); } @@ -2695,37 +2952,42 @@ int ha_ndbcluster::read_range_first_to_buf(const key_range *start_key, case UNIQUE_INDEX: key_info= table->key_info + active_index; if (start_key && start_key->length == key_info->key_length && - start_key->flag == HA_READ_KEY_EXACT && - !check_null_in_key(key_info, start_key->key, start_key->length)) + start_key->flag == HA_READ_KEY_EXACT && + !check_null_in_key(key_info, start_key->key, start_key->length)) { - if(m_active_cursor && (error= close_scan())) - DBUG_RETURN(error); + if (m_active_cursor && (error= close_scan())) + DBUG_RETURN(error); error= unique_index_read(start_key->key, start_key->length, buf); DBUG_RETURN(error == HA_ERR_KEY_NOT_FOUND ? HA_ERR_END_OF_FILE : error); } + else if (type == UNIQUE_INDEX) + DBUG_RETURN(unique_index_scan(key_info, + start_key->key, + start_key->length, + buf)); break; default: break; } // Start the ordered index scan and fetch the first row - error= ordered_index_scan(start_key, end_key, sorted, buf); + error= ordered_index_scan(start_key, end_key, sorted, FALSE, buf); DBUG_RETURN(error); } int ha_ndbcluster::read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted) + const key_range *end_key, + bool eq_r, bool sorted) { byte* buf= table->record[0]; DBUG_ENTER("ha_ndbcluster::read_range_first"); DBUG_RETURN(read_range_first_to_buf(start_key, - end_key, - eq_range, - sorted, - buf)); + end_key, + eq_r, + sorted, + buf)); } int ha_ndbcluster::read_range_next() @@ -2737,7 +2999,7 @@ int ha_ndbcluster::read_range_next() int ha_ndbcluster::rnd_init(bool scan) { - NdbResultSet *cursor= m_active_cursor; + NdbScanOperation *cursor= m_active_cursor; DBUG_ENTER("rnd_init"); DBUG_PRINT("enter", ("scan: %d", scan)); // Check if scan is to be restarted @@ -2745,22 +3007,27 @@ int ha_ndbcluster::rnd_init(bool scan) { if (!scan) DBUG_RETURN(1); - int res= cursor->restart(m_force_send); - DBUG_ASSERT(res == 0); + if (cursor->restart(m_force_send) != 0) + { + DBUG_ASSERT(0); + DBUG_RETURN(-1); + } } - index_init(table->primary_key); + index_init(table->s->primary_key); DBUG_RETURN(0); } int ha_ndbcluster::close_scan() { - NdbResultSet *cursor= m_active_cursor; - NdbConnection *trans= m_active_trans; + NdbTransaction *trans= m_active_trans; DBUG_ENTER("close_scan"); - if (!cursor) + m_multi_cursor= 0; + if (!m_active_cursor && !m_multi_cursor) DBUG_RETURN(1); + NdbScanOperation *cursor= m_active_cursor ? m_active_cursor : m_multi_cursor; + if (m_lock_tuple) { /* @@ -2773,7 +3040,7 @@ int ha_ndbcluster::close_scan() // Lock row DBUG_PRINT("info", ("Keeping lock on scanned row")); - if (!(op= m_active_cursor->lockTuple())) + if (!(op= cursor->lockCurrentTuple())) { m_lock_tuple= false; ERR_RETURN(trans->getNdbError()); @@ -2787,16 +3054,16 @@ int ha_ndbcluster::close_scan() Take over any pending transactions to the deleteing/updating transaction before closing the scan */ - DBUG_PRINT("info", ("ops_pending: %d", m_ops_pending)); - if (execute_no_commit(this,trans) != 0) { + DBUG_PRINT("info", ("ops_pending: %ld", (long) m_ops_pending)); + if (execute_no_commit(this,trans,false) != 0) { no_uncommitted_rows_execute_failure(); DBUG_RETURN(ndb_err(trans)); } m_ops_pending= 0; } - cursor->close(m_force_send); - m_active_cursor= NULL; + cursor->close(m_force_send, TRUE); + m_active_cursor= m_multi_cursor= NULL; DBUG_RETURN(0); } @@ -2810,7 +3077,8 @@ int ha_ndbcluster::rnd_end() int ha_ndbcluster::rnd_next(byte *buf) { DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); if (!m_active_cursor) DBUG_RETURN(full_table_scan(buf)); @@ -2828,7 +3096,8 @@ int ha_ndbcluster::rnd_next(byte *buf) int ha_ndbcluster::rnd_pos(byte *buf, byte *pos) { DBUG_ENTER("rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, + &LOCK_status); // The primary key for the record is stored in pos // Perform a pk_read using primary key "index" DBUG_RETURN(pk_read(pos, ref_length, buf)); @@ -2849,9 +3118,9 @@ void ha_ndbcluster::position(const byte *record) byte *buff; DBUG_ENTER("position"); - if (table->primary_key != MAX_KEY) + if (table->s->primary_key != MAX_KEY) { - key_info= table->key_info + table->primary_key; + key_info= table->key_info + table->s->primary_key; key_part= key_info->key_part; end= key_part + key_info->key_parts; buff= ref; @@ -2867,22 +3136,46 @@ void ha_ndbcluster::position(const byte *record) } *buff++= 0; } - memcpy(buff, record + key_part->offset, key_part->length); - buff += key_part->length; + + size_t len = key_part->length; + const byte * ptr = record + key_part->offset; + Field *field = key_part->field; + if (field->type() == MYSQL_TYPE_VARCHAR) + { + if (((Field_varstring*)field)->length_bytes == 1) + { + /** + * Keys always use 2 bytes length + */ + buff[0] = ptr[0]; + buff[1] = 0; + memcpy(buff+2, ptr + 1, len); + } + else + { + memcpy(buff, ptr, len + 2); + } + len += 2; + } + else + { + memcpy(buff, ptr, len); + } + buff += len; } } else { // No primary key, get hidden key DBUG_PRINT("info", ("Getting hidden key")); - int hidden_no= table->fields; - NdbRecAttr* rec= m_value[hidden_no].rec; +#ifndef DBUG_OFF + int hidden_no= table->s->fields; const NDBTAB *tab= (const NDBTAB *) m_table; const NDBCOL *hidden_col= tab->getColumn(hidden_no); DBUG_ASSERT(hidden_col->getPrimaryKey() && hidden_col->getAutoIncrement() && - rec != NULL && ref_length == NDB_HIDDEN_PRIMARY_KEY_LENGTH); +#endif memcpy(ref, m_ref, ref_length); } @@ -2909,7 +3202,7 @@ int ha_ndbcluster::info(uint flag) if (m_table_info) { if (m_ha_not_exact_count) - records= 100; + records= 100; else result= records_update(); } @@ -2918,11 +3211,21 @@ int ha_ndbcluster::info(uint flag) if ((my_errno= check_ndb_connection())) DBUG_RETURN(my_errno); Ndb *ndb= get_ndb(); - Uint64 rows= 100; + struct Ndb_statistics stat; ndb->setDatabaseName(m_dbname); - if (current_thd->variables.ndb_use_exact_count) - result= ndb_get_table_statistics(this, true, ndb, m_tabname, &rows, 0); - records= rows; + if (current_thd->variables.ndb_use_exact_count && + (result= ndb_get_table_statistics(this, true, ndb, m_tabname, &stat)) + == 0) + { + mean_rec_length= stat.row_size; + data_file_length= stat.fragment_memory; + records= stat.row_count; + } + else + { + mean_rec_length= 0; + records= 100; + } } } if (flag & HA_STATUS_CONST) @@ -2938,12 +3241,21 @@ int ha_ndbcluster::info(uint flag) if (flag & HA_STATUS_AUTO) { DBUG_PRINT("info", ("HA_STATUS_AUTO")); - if (m_table) + if (m_table && table->found_next_number_field) { Ndb *ndb= get_ndb(); - auto_increment_value= - ndb->readAutoIncrementValue((const NDBTAB *) m_table); + Uint64 auto_increment_value64; + if (ndb->readAutoIncrementValue((const NDBTAB *) m_table, + auto_increment_value64) == -1) + { + const NdbError err= ndb->getNdbError(); + sql_print_error("Error %lu in readAutoIncrementValue(): %s", + (ulong) err.code, err.message); + auto_increment_value= ~(Uint64)0; + } + else + auto_increment_value= (ulonglong)auto_increment_value64; } } @@ -2966,6 +3278,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_RESET: /* Reset database to after open */ DBUG_PRINT("info", ("HA_EXTRA_RESET")); + DBUG_PRINT("info", ("Clearing condition stack")); + cond_clear(); break; case HA_EXTRA_CACHE: /* Cash record in HA_rrnd() */ DBUG_PRINT("info", ("HA_EXTRA_CACHE")); @@ -3035,25 +3349,16 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); - if (current_thd->lex->sql_command == SQLCOM_REPLACE) - { - DBUG_PRINT("info", ("Turning ON use of write instead of insert")); - m_use_write= TRUE; - } else - { - DBUG_PRINT("info", ("Ignoring duplicate key")); - m_ignore_dup_key= TRUE; - } + DBUG_PRINT("info", ("Ignoring duplicate key")); + m_ignore_dup_key= TRUE; break; case HA_EXTRA_NO_IGNORE_DUP_KEY: DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); - DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); - m_use_write= FALSE; m_ignore_dup_key= FALSE; break; case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those - where field->query_id is the same as - the current query id */ + where field->query_id is the same as + the current query id */ DBUG_PRINT("info", ("HA_EXTRA_RETRIEVE_ALL_COLS")); m_retrieve_all_fields= TRUE; break; @@ -3075,8 +3380,22 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_CHANGE_KEY_TO_DUP: DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP")); + case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: + DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS")); + break; + case HA_EXTRA_WRITE_CAN_REPLACE: + DBUG_PRINT("info", ("HA_EXTRA_WRITE_CAN_REPLACE")); + if (!m_has_unique_index) + { + DBUG_PRINT("info", ("Turning ON use of write instead of insert")); + m_use_write= TRUE; + } + break; + case HA_EXTRA_WRITE_CANNOT_REPLACE: + DBUG_PRINT("info", ("HA_EXTRA_WRITE_CANNOT_REPLACE")); + DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); + m_use_write= FALSE; break; - } DBUG_RETURN(0); @@ -3101,11 +3420,11 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) DBUG_PRINT("enter", ("rows: %d", (int)rows)); m_rows_inserted= (ha_rows) 0; - if (m_ignore_dup_key && table->primary_key != MAX_KEY) + if (!m_use_write && m_ignore_dup_key) { /* compare if expression with that in write_row - we have a situation where peek_row() will be called + we have a situation where peek_indexed_rows() will be called so we cannot batch */ DBUG_PRINT("info", ("Batching turned off as duplicate key is " @@ -3150,7 +3469,7 @@ int ha_ndbcluster::end_bulk_insert() // Check if last inserts need to be flushed if (m_bulk_insert_not_flushed) { - NdbConnection *trans= m_active_trans; + NdbTransaction *trans= m_active_trans; // Send rows to NDB DBUG_PRINT("info", ("Sending inserts to NDB, "\ "rows_inserted:%d, bulk_insert_rows: %d", @@ -3158,7 +3477,7 @@ int ha_ndbcluster::end_bulk_insert() m_bulk_insert_not_flushed= FALSE; if (m_transaction_on) { - if (execute_no_commit(this, trans) != 0) + if (execute_no_commit(this, trans,false) != 0) { no_uncommitted_rows_execute_failure(); my_errno= error= ndb_err(trans); @@ -3173,7 +3492,7 @@ int ha_ndbcluster::end_bulk_insert() } else { - int res= trans->restart(); + IF_DBUG(int res=) trans->restart(); DBUG_ASSERT(res == 0); } } @@ -3192,19 +3511,16 @@ int ha_ndbcluster::extra_opt(enum ha_extra_function operation, ulong cache_size) DBUG_RETURN(extra(operation)); } +static const char *ha_ndbcluster_exts[] = { + ha_ndb_ext, + NullS +}; -int ha_ndbcluster::reset() +const char** ha_ndbcluster::bas_ext() const { - DBUG_ENTER("reset"); - // Reset what? - DBUG_RETURN(1); + return ha_ndbcluster_exts; } -static const char *ha_ndb_bas_exts[]= { ha_ndb_ext, NullS }; -const char **ha_ndbcluster::bas_ext() const -{ return ha_ndb_bas_exts; } - - /* How many seeks it will take to read through the table This is to be comparable to the number returned by records_in_range so @@ -3216,7 +3532,7 @@ double ha_ndbcluster::scan_time() DBUG_ENTER("ha_ndbcluster::scan_time()"); double res= rows2double(records*1000); DBUG_PRINT("exit", ("table: %s value: %f", - m_tabname, res)); + m_tabname, res)); DBUG_RETURN(res); } @@ -3282,9 +3598,9 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, As MySQL will execute an external lock for every new table it uses we can use this to start the transactions. If we are in auto_commit mode we just need to start a transaction - for the statement, this will be stored in transaction.stmt. + for the statement, this will be stored in thd_ndb.stmt. If not, we have to start a master transaction if there doesn't exist - one from before, this will be stored in transaction.all + one from before, this will be stored in thd_ndb.all When a table lock is held one transaction will be started which holds the table lock and for each statement a hupp transaction will be started @@ -3297,45 +3613,57 @@ THR_LOCK_DATA **ha_ndbcluster::store_lock(THD *thd, int ha_ndbcluster::external_lock(THD *thd, int lock_type) { int error=0; - NdbConnection* trans= NULL; + NdbTransaction* trans= NULL; DBUG_ENTER("external_lock"); /* Check that this handler instance has a connection set up to the Ndb object of thd */ - if (check_ndb_connection()) + if (check_ndb_connection(thd)) DBUG_RETURN(1); - - Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; + + Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; - DBUG_PRINT("enter", ("transaction.thd_ndb->lock_count: %d", - thd_ndb->lock_count)); + DBUG_PRINT("enter", ("thd: 0x%lx thd_ndb: 0x%lx thd_ndb->lock_count: %d", + (long) thd, (long) thd_ndb, thd_ndb->lock_count)); if (lock_type != F_UNLCK) { DBUG_PRINT("info", ("lock_type != F_UNLCK")); + if (thd->lex->sql_command == SQLCOM_LOAD) + { + m_transaction_on= FALSE; + /* Would be simpler if has_transactions() didn't always say "yes" */ + thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; + thd->no_trans_update= TRUE; + } + else if (!thd->transaction.on) + m_transaction_on= FALSE; + else + m_transaction_on= thd->variables.ndb_use_transactions; if (!thd_ndb->lock_count++) { PRINT_OPTION_FLAGS(thd); - if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) { // Autocommit transaction - DBUG_ASSERT(!thd->transaction.stmt.ndb_tid); + DBUG_ASSERT(!thd_ndb->stmt); DBUG_PRINT("trans",("Starting transaction stmt")); trans= ndb->startTransaction(); if (trans == NULL) ERR_RETURN(ndb->getNdbError()); - no_uncommitted_rows_reset(thd); - thd->transaction.stmt.ndb_tid= trans; + no_uncommitted_rows_reset(thd); + thd_ndb->stmt= trans; + thd_ndb->query_state&= NDB_QUERY_NORMAL; + trans_register_ha(thd, FALSE, &ndbcluster_hton); } else { - if (!thd->transaction.all.ndb_tid) - { + if (!thd_ndb->all) + { // Not autocommit transaction // A "master" transaction ha not been started yet DBUG_PRINT("trans",("starting transaction, all")); @@ -3343,7 +3671,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) trans= ndb->startTransaction(); if (trans == NULL) ERR_RETURN(ndb->getNdbError()); - no_uncommitted_rows_reset(thd); + no_uncommitted_rows_reset(thd); + thd_ndb->all= trans; + thd_ndb->query_state&= NDB_QUERY_NORMAL; + trans_register_ha(thd, TRUE, &ndbcluster_hton); /* If this is the start of a LOCK TABLE, a table look @@ -3352,12 +3683,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) Check if it should be read or write lock */ if (thd->options & (OPTION_TABLE_LOCK)) - { + { //lockThisTable(); DBUG_PRINT("info", ("Locking the table..." )); } - thd->transaction.all.ndb_tid= trans; } } } @@ -3378,26 +3708,20 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_ha_not_exact_count= !thd->variables.ndb_use_exact_count; m_autoincrement_prefetch= (ha_rows) thd->variables.ndb_autoincrement_prefetch_sz; - if (!thd->transaction.on) - m_transaction_on= FALSE; - else - m_transaction_on= thd->variables.ndb_use_transactions; - // m_use_local_query_cache= thd->variables.ndb_use_local_query_cache; - m_active_trans= thd->transaction.all.ndb_tid ? - (NdbConnection*)thd->transaction.all.ndb_tid: - (NdbConnection*)thd->transaction.stmt.ndb_tid; + m_active_trans= thd_ndb->all ? thd_ndb->all : thd_ndb->stmt; DBUG_ASSERT(m_active_trans); // Start of transaction + m_rows_changed= 0; m_retrieve_all_fields= FALSE; m_retrieve_primary_key= FALSE; - m_ops_pending= 0; + m_ops_pending= 0; { NDBDICT *dict= ndb->getDictionary(); const NDBTAB *tab; void *tab_info; if (!(tab= dict->getTable(m_tabname, &tab_info))) - ERR_RETURN(dict->getNdbError()); + ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); // Check if thread has stale local cache @@ -3427,9 +3751,9 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) if ((my_errno= build_index_list(ndb, table, ILBP_OPEN))) DBUG_RETURN(my_errno); - const void *data, *pack_data; + const void *data= NULL, *pack_data= NULL; uint length, pack_length; - if (readfrm(table->path, &data, &length) || + if (readfrm(table->s->path, &data, &length) || packfrm(data, length, &pack_data, &pack_length) || pack_length != tab->getFrmLength() || memcmp(pack_data, tab->getFrmData(), pack_length)) @@ -3445,16 +3769,34 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) m_table_info= tab_info; } no_uncommitted_rows_init(thd); - } - else + } + else { DBUG_PRINT("info", ("lock_type == F_UNLCK")); + + if (ndb_cache_check_time && m_rows_changed) + { + DBUG_PRINT("info", ("Rows has changed and util thread is running")); + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) + { + DBUG_PRINT("info", ("Add share to list of tables to be invalidated")); + /* NOTE push_back allocates memory using transactions mem_root! */ + thd_ndb->changed_tables.push_back(m_share, &thd->transaction.mem_root); + } + + pthread_mutex_lock(&m_share->mutex); + DBUG_PRINT("info", ("Invalidating commit_count")); + m_share->commit_count= 0; + m_share->commit_count_lock++; + pthread_mutex_unlock(&m_share->mutex); + } + if (!--thd_ndb->lock_count) { DBUG_PRINT("trans", ("Last external_lock")); PRINT_OPTION_FLAGS(thd); - if (thd->transaction.stmt.ndb_tid) + if (thd_ndb->stmt) { /* Unlock is done without a transaction commit / rollback. @@ -3463,10 +3805,11 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) */ DBUG_PRINT("trans",("ending non-updating transaction")); ndb->closeTransaction(m_active_trans); - thd->transaction.stmt.ndb_tid= 0; + thd_ndb->stmt= NULL; } } m_table_info= NULL; + /* This is the place to make sure this handler instance no longer are connected to the active transaction. @@ -3480,6 +3823,10 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) DBUG_PRINT("warning", ("m_active_cursor != NULL")); m_active_cursor= NULL; + if (m_multi_cursor) + DBUG_PRINT("warning", ("m_multi_cursor != NULL")); + m_multi_cursor= NULL; + if (m_blobs_pending) DBUG_PRINT("warning", ("blobs_pending != 0")); m_blobs_pending= 0; @@ -3515,27 +3862,26 @@ void ha_ndbcluster::unlock_row() since ndb does not currently does not support table locking */ -int ha_ndbcluster::start_stmt(THD *thd) +int ha_ndbcluster::start_stmt(THD *thd, thr_lock_type lock_type) { int error=0; DBUG_ENTER("start_stmt"); PRINT_OPTION_FLAGS(thd); - NdbConnection *trans= - (thd->transaction.stmt.ndb_tid) - ? (NdbConnection *)(thd->transaction.stmt.ndb_tid) - : (NdbConnection *)(thd->transaction.all.ndb_tid); + Thd_ndb *thd_ndb= get_thd_ndb(thd); + NdbTransaction *trans= (thd_ndb->stmt)?thd_ndb->stmt:thd_ndb->all; if (!trans){ - Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; + Ndb *ndb= thd_ndb->ndb; DBUG_PRINT("trans",("Starting transaction stmt")); trans= ndb->startTransaction(); if (trans == NULL) ERR_RETURN(ndb->getNdbError()); no_uncommitted_rows_reset(thd); - thd->transaction.stmt.ndb_tid= trans; + thd_ndb->stmt= trans; + thd_ndb->query_state&= NDB_QUERY_NORMAL; + trans_register_ha(thd, FALSE, &ndbcluster_hton); } m_active_trans= trans; - // Start of statement m_retrieve_all_fields= FALSE; m_retrieve_primary_key= FALSE; @@ -3546,18 +3892,19 @@ int ha_ndbcluster::start_stmt(THD *thd) /* - Commit a transaction started in NDB + Commit a transaction started in NDB */ -int ndbcluster_commit(THD *thd, void *ndb_transaction) +int ndbcluster_commit(THD *thd, bool all) { int res= 0; - Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; - NdbConnection *trans= (NdbConnection*)ndb_transaction; + Thd_ndb *thd_ndb= get_thd_ndb(thd); + Ndb *ndb= thd_ndb->ndb; + NdbTransaction *trans= all ? thd_ndb->all : thd_ndb->stmt; DBUG_ENTER("ndbcluster_commit"); DBUG_PRINT("transaction",("%s", - trans == thd->transaction.stmt.ndb_tid ? + trans == thd_ndb->stmt ? "stmt" : "all")); DBUG_ASSERT(ndb && trans); @@ -3565,12 +3912,32 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction) { const NdbError err= trans->getNdbError(); const NdbOperation *error_op= trans->getNdbErrorOperation(); - ERR_PRINT(err); + ERR_PRINT(err); res= ndb_to_mysql_error(&err); - if (res != -1) + if (res != -1) ndbcluster_print_error(res, error_op); } ndb->closeTransaction(trans); + + if (all) + thd_ndb->all= NULL; + else + thd_ndb->stmt= NULL; + + /* Clear commit_count for tables changed by transaction */ + NDB_SHARE* share; + List_iterator_fast<NDB_SHARE> it(thd_ndb->changed_tables); + while ((share= it++)) + { + pthread_mutex_lock(&share->mutex); + DBUG_PRINT("info", ("Invalidate commit_count for %s, share->commit_count: %lu", + share->table_name, (ulong) share->commit_count)); + share->commit_count= 0; + share->commit_count_lock++; + pthread_mutex_unlock(&share->mutex); + } + thd_ndb->changed_tables.empty(); + DBUG_RETURN(res); } @@ -3579,19 +3946,20 @@ int ndbcluster_commit(THD *thd, void *ndb_transaction) Rollback a transaction started in NDB */ -int ndbcluster_rollback(THD *thd, void *ndb_transaction) +int ndbcluster_rollback(THD *thd, bool all) { int res= 0; - Ndb *ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; - NdbConnection *trans= (NdbConnection*)ndb_transaction; + Thd_ndb *thd_ndb= get_thd_ndb(thd); + Ndb *ndb= thd_ndb->ndb; + NdbTransaction *trans= all ? thd_ndb->all : thd_ndb->stmt; DBUG_ENTER("ndbcluster_rollback"); DBUG_PRINT("transaction",("%s", - trans == thd->transaction.stmt.ndb_tid ? + trans == thd_ndb->stmt ? "stmt" : "all")); DBUG_ASSERT(ndb && trans); - if (trans->execute(Rollback) != 0) + if (trans->execute(NdbTransaction::Rollback) != 0) { const NdbError err= trans->getNdbError(); const NdbOperation *error_op= trans->getNdbErrorOperation(); @@ -3601,7 +3969,16 @@ int ndbcluster_rollback(THD *thd, void *ndb_transaction) ndbcluster_print_error(res, error_op); } ndb->closeTransaction(trans); - DBUG_RETURN(0); + + if (all) + thd_ndb->all= NULL; + else + thd_ndb->stmt= NULL; + + /* Clear list of tables changed by transaction */ + thd_ndb->changed_tables.empty(); + + DBUG_RETURN(res); } @@ -3609,6 +3986,9 @@ int ndbcluster_rollback(THD *thd, void *ndb_transaction) Define NDB column based on Field. Returns 0 or mysql error code. Not member of ha_ndbcluster because NDBCOL cannot be declared. + + MySQL text types with character set "binary" are mapped to true + NDB binary types without a character set. This may change. */ static int create_ndb_column(NDBCOL &col, @@ -3616,12 +3996,7 @@ static int create_ndb_column(NDBCOL &col, HA_CREATE_INFO *info) { // Set name - { - char truncated_field_name[NDB_MAX_ATTR_NAME_SIZE]; - strnmov(truncated_field_name,field->field_name,sizeof(truncated_field_name)); - truncated_field_name[sizeof(truncated_field_name)-1]= '\0'; - col.setName(truncated_field_name); - } + col.setName(field->field_name); // Get char set CHARSET_INFO *cs= field->charset(); // Set type and sizes @@ -3691,6 +4066,24 @@ static int create_ndb_column(NDBCOL &col, col.setLength(1); } break; + case MYSQL_TYPE_NEWDECIMAL: + { + Field_new_decimal *f= (Field_new_decimal*)field; + uint precision= f->precision; + uint scale= f->decimals(); + if (field->flags & UNSIGNED_FLAG) + { + col.setType(NDBCOL::Decimalunsigned); + } + else + { + col.setType(NDBCOL::Decimal); + } + col.setPrecision(precision); + col.setScale(scale); + col.setLength(1); + } + break; // Date types case MYSQL_TYPE_DATETIME: col.setType(NDBCOL::Datetime); @@ -3718,30 +4111,56 @@ static int create_ndb_column(NDBCOL &col, break; // Char types case MYSQL_TYPE_STRING: - if (field->flags & BINARY_FLAG) + if (field->pack_length() == 0) + { + col.setType(NDBCOL::Bit); + col.setLength(1); + } + else if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) + { col.setType(NDBCOL::Binary); - else { - col.setType(NDBCOL::Char); - col.setCharset(cs); + col.setLength(field->pack_length()); } - if (field->pack_length() == 0) - col.setLength(1); // currently ndb does not support size 0 else + { + col.setType(NDBCOL::Char); + col.setCharset(cs); col.setLength(field->pack_length()); + } break; - case MYSQL_TYPE_VAR_STRING: - if (field->flags & BINARY_FLAG) - col.setType(NDBCOL::Varbinary); - else { - col.setType(NDBCOL::Varchar); - col.setCharset(cs); + case MYSQL_TYPE_VAR_STRING: // ? + case MYSQL_TYPE_VARCHAR: + { + Field_varstring* f= (Field_varstring*)field; + if (f->length_bytes == 1) + { + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) + col.setType(NDBCOL::Varbinary); + else { + col.setType(NDBCOL::Varchar); + col.setCharset(cs); + } + } + else if (f->length_bytes == 2) + { + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) + col.setType(NDBCOL::Longvarbinary); + else { + col.setType(NDBCOL::Longvarchar); + col.setCharset(cs); + } + } + else + { + return HA_ERR_UNSUPPORTED; + } + col.setLength(field->field_length); } - col.setLength(field->pack_length()); break; // Blob types (all come in as MYSQL_TYPE_BLOB) mysql_type_tiny_blob: case MYSQL_TYPE_TINY_BLOB: - if (field->flags & BINARY_FLAG) + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) col.setType(NDBCOL::Blob); else { col.setType(NDBCOL::Text); @@ -3752,31 +4171,42 @@ static int create_ndb_column(NDBCOL &col, col.setPartSize(0); col.setStripeSize(0); break; - mysql_type_blob: + //mysql_type_blob: + case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_BLOB: - if (field->flags & BINARY_FLAG) + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) col.setType(NDBCOL::Blob); else { col.setType(NDBCOL::Text); col.setCharset(cs); } - // Use "<=" even if "<" is the exact condition - if (field->max_length() <= (1 << 8)) - goto mysql_type_tiny_blob; - else if (field->max_length() <= (1 << 16)) { - col.setInlineSize(256); - col.setPartSize(2000); - col.setStripeSize(16); + Field_blob *field_blob= (Field_blob *)field; + /* + * max_data_length is 2^8-1, 2^16-1, 2^24-1 for tiny, blob, medium. + * Tinyblob gets no blob parts. The other cases are just a crude + * way to control part size and striping. + * + * In mysql blob(256) is promoted to blob(65535) so it does not + * in fact fit "inline" in NDB. + */ + if (field_blob->max_data_length() < (1 << 8)) + goto mysql_type_tiny_blob; + else if (field_blob->max_data_length() < (1 << 16)) + { + col.setInlineSize(256); + col.setPartSize(2000); + col.setStripeSize(16); + } + else if (field_blob->max_data_length() < (1 << 24)) + goto mysql_type_medium_blob; + else + goto mysql_type_long_blob; } - else if (field->max_length() <= (1 << 24)) - goto mysql_type_medium_blob; - else - goto mysql_type_long_blob; break; mysql_type_medium_blob: case MYSQL_TYPE_MEDIUM_BLOB: - if (field->flags & BINARY_FLAG) + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) col.setType(NDBCOL::Blob); else { col.setType(NDBCOL::Text); @@ -3788,7 +4218,7 @@ static int create_ndb_column(NDBCOL &col, break; mysql_type_long_blob: case MYSQL_TYPE_LONG_BLOB: - if (field->flags & BINARY_FLAG) + if ((field->flags & BINARY_FLAG) && cs == &my_charset_bin) col.setType(NDBCOL::Blob); else { col.setType(NDBCOL::Text); @@ -3807,8 +4237,17 @@ static int create_ndb_column(NDBCOL &col, col.setType(NDBCOL::Char); col.setLength(field->pack_length()); break; + case MYSQL_TYPE_BIT: + { + int no_of_bits= field->field_length; + col.setType(NDBCOL::Bit); + if (!no_of_bits) + col.setLength(1); + else + col.setLength(no_of_bits); + break; + } case MYSQL_TYPE_NULL: - case MYSQL_TYPE_GEOMETRY: goto mysql_type_unsupported; mysql_type_unsupported: default: @@ -3820,10 +4259,13 @@ static int create_ndb_column(NDBCOL &col, // Set autoincrement if (field->flags & AUTO_INCREMENT_FLAG) { +#ifndef DBUG_OFF + char buff[22]; +#endif col.setAutoIncrement(TRUE); ulonglong value= info->auto_increment_value ? info->auto_increment_value : (ulonglong) 1; - DBUG_PRINT("info", ("Autoincrement key, initial: %llu", value)); + DBUG_PRINT("info", ("Autoincrement key, initial: %s", llstr(value, buff))); col.setAutoIncrementInitialValue(value); } else @@ -3837,7 +4279,11 @@ static int create_ndb_column(NDBCOL &col, static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { - if (form->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */ + ha_rows max_rows= form->s->max_rows; + ha_rows min_rows= form->s->min_rows; + if (max_rows < min_rows) + max_rows= min_rows; + if (max_rows == (ha_rows)0) /* default setting, don't set fragmentation */ return; /** * get the number of fragments right @@ -3855,12 +4301,16 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) acc_row_size+= 4 + /*safety margin*/ 4; #endif ulonglong acc_fragment_size= 512*1024*1024; - ulonglong max_rows= form->max_rows; + /* + * if not --with-big-tables then max_rows is ulong + * the warning in this case is misleading though + */ + ulonglong big_max_rows = (ulonglong)max_rows; #if MYSQL_VERSION_ID >= 50100 - no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; + no_fragments= (big_max_rows*acc_row_size)/acc_fragment_size+1; #else - no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1 - +1/*correct rounding*/)/2; + no_fragments= ((big_max_rows*acc_row_size)/acc_fragment_size+1 + +1/*correct rounding*/)/2; #endif } { @@ -3870,8 +4320,8 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { ftype= NDBTAB::FragAllLarge; if (no_fragments > 4*no_nodes) - push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, - "Ndb might have problems storing the max amount of rows specified"); + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount of rows specified"); } else if (no_fragments > no_nodes) ftype= NDBTAB::FragAllMedium; @@ -3879,21 +4329,22 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) ftype= NDBTAB::FragAllSmall; tab.setFragmentType(ftype); } + tab.setMaxRows(max_rows); + tab.setMinRows(min_rows); } int ha_ndbcluster::create(const char *name, - TABLE *form, - HA_CREATE_INFO *info) + TABLE *form, + HA_CREATE_INFO *create_info) { NDBTAB tab; NDBCOL col; uint pack_length, length, i, pk_length= 0; - const void *data, *pack_data; - const char **key_names= form->keynames.type_names; + const void *data= NULL, *pack_data= NULL; char name2[FN_HEADLEN]; - bool create_from_engine= (info->table_options & HA_OPTION_CREATE_FROM_ENGINE); - - DBUG_ENTER("create"); + bool create_from_engine= (create_info->table_options & HA_OPTION_CREATE_FROM_ENGINE); + + DBUG_ENTER("ha_ndbcluster::create"); DBUG_PRINT("enter", ("name: %s", name)); fn_format(name2, name, "", "",2); // Remove the .frm extension set_dbname(name2); @@ -3918,34 +4369,37 @@ int ha_ndbcluster::create(const char *name, DBUG_PRINT("table", ("name: %s", m_tabname)); tab.setName(m_tabname); - tab.setLogging(!(info->options & HA_LEX_CREATE_TMP_TABLE)); + tab.setLogging(!(create_info->options & HA_LEX_CREATE_TMP_TABLE)); // Save frm data for this table if (readfrm(name, &data, &length)) DBUG_RETURN(1); if (packfrm(data, length, &pack_data, &pack_length)) + { + my_free((char*)data, MYF(0)); DBUG_RETURN(2); - - DBUG_PRINT("info", ("setFrm data=%x, len=%d", pack_data, pack_length)); + } + + DBUG_PRINT("info", ("setFrm data: 0x%lx len: %d", (long) pack_data, pack_length)); tab.setFrm(pack_data, pack_length); my_free((char*)data, MYF(0)); my_free((char*)pack_data, MYF(0)); - for (i= 0; i < form->fields; i++) + for (i= 0; i < form->s->fields; i++) { Field *field= form->field[i]; DBUG_PRINT("info", ("name: %s, type: %u, pack_length: %d", field->field_name, field->real_type(), - field->pack_length())); - if ((my_errno= create_ndb_column(col, field, info))) + field->pack_length())); + if ((my_errno= create_ndb_column(col, field, create_info))) DBUG_RETURN(my_errno); tab.addColumn(col); - if(col.getPrimaryKey()) + if (col.getPrimaryKey()) pk_length += (field->pack_length() + 3) / 4; } // No primary key, create shadow key as 64 bit, auto increment - if (form->primary_key == MAX_KEY) + if (form->s->primary_key == MAX_KEY) { DBUG_PRINT("info", ("Generating shadow key")); col.setName("$PK"); @@ -3959,7 +4413,7 @@ int ha_ndbcluster::create(const char *name, } // Make sure that blob tables don't have to big part size - for (i= 0; i < form->fields; i++) + for (i= 0; i < form->s->fields; i++) { /** * The extra +7 concists @@ -3967,17 +4421,18 @@ int ha_ndbcluster::create(const char *name, * 5 - from extra words added by tup/dict?? */ switch (form->field[i]->real_type()) { + case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_BLOB: case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: { - NdbDictionary::Column * col = tab.getColumn(i); - int size = pk_length + (col->getPartSize()+3)/4 + 7; - if(size > NDB_MAX_TUPLE_SIZE_IN_WORDS && - (pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS) + NdbDictionary::Column * column= tab.getColumn(i); + int size= pk_length + (column->getPartSize()+3)/4 + 7; + if (size > NDB_MAX_TUPLE_SIZE_IN_WORDS && + (pk_length+7) < NDB_MAX_TUPLE_SIZE_IN_WORDS) { - size = NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7; - col->setPartSize(4*size); + size= NDB_MAX_TUPLE_SIZE_IN_WORDS - pk_length - 7; + column->setPartSize(4*size); } /** * If size > NDB_MAX and pk_length+7 >= NDB_MAX @@ -4019,17 +4474,17 @@ int ha_ndbcluster::create(const char *name, int ha_ndbcluster::create_ordered_index(const char *name, - KEY *key_info) + KEY *key_info) { - DBUG_ENTER("create_ordered_index"); + DBUG_ENTER("ha_ndbcluster::create_ordered_index"); DBUG_RETURN(create_index(name, key_info, FALSE)); } int ha_ndbcluster::create_unique_index(const char *name, - KEY *key_info) + KEY *key_info) { - DBUG_ENTER("create_unique_index"); + DBUG_ENTER("ha_ndbcluster::create_unique_index"); DBUG_RETURN(create_index(name, key_info, TRUE)); } @@ -4039,15 +4494,15 @@ int ha_ndbcluster::create_unique_index(const char *name, */ int ha_ndbcluster::create_index(const char *name, - KEY *key_info, - bool unique) + KEY *key_info, + bool unique) { Ndb *ndb= get_ndb(); NdbDictionary::Dictionary *dict= ndb->getDictionary(); KEY_PART_INFO *key_part= key_info->key_part; KEY_PART_INFO *end= key_part + key_info->key_parts; - DBUG_ENTER("create_index"); + DBUG_ENTER("ha_ndbcluster::create_index"); DBUG_PRINT("enter", ("name: %s ", name)); NdbDictionary::Index ndb_index(name); @@ -4065,12 +4520,7 @@ int ha_ndbcluster::create_index(const char *name, { Field *field= key_part->field; DBUG_PRINT("info", ("attr: %s", field->field_name)); - { - char truncated_field_name[NDB_MAX_ATTR_NAME_SIZE]; - strnmov(truncated_field_name,field->field_name,sizeof(truncated_field_name)); - truncated_field_name[sizeof(truncated_field_name)-1]= '\0'; - ndb_index.addColumnName(truncated_field_name); - } + ndb_index.addColumnName(field->field_name); } if (dict->createIndex(ndb_index)) @@ -4104,7 +4554,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) if (check_ndb_connection()) DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION); - + Ndb *ndb= get_ndb(); dict= ndb->getDictionary(); if (!(orig_tab= dict->getTable(m_tabname))) @@ -4173,7 +4623,6 @@ int ha_ndbcluster::alter_table_name(const char *to) Ndb *ndb= get_ndb(); NDBDICT *dict= ndb->getDictionary(); const NDBTAB *orig_tab= (const NDBTAB *) m_table; - int ret; DBUG_ENTER("alter_table_name_table"); NdbDictionary::Table new_tab= *orig_tab; @@ -4189,26 +4638,30 @@ int ha_ndbcluster::alter_table_name(const char *to) /* - Delete a table from NDB Cluster + Delete table from NDB Cluster + */ int ha_ndbcluster::delete_table(const char *name) { - DBUG_ENTER("delete_table"); + DBUG_ENTER("ha_ndbcluster::delete_table"); DBUG_PRINT("enter", ("name: %s", name)); set_dbname(name); set_tabname(name); - + if (check_ndb_connection()) DBUG_RETURN(HA_ERR_NO_CONNECTION); - // Remove .ndb file + + /* Call ancestor function to delete .ndb file */ handler::delete_table(name); + + /* Drop the table from NDB */ DBUG_RETURN(drop_table()); } /* - Drop a table in NDB Cluster + Drop table in NDB Cluster */ int ha_ndbcluster::drop_table() @@ -4216,10 +4669,11 @@ int ha_ndbcluster::drop_table() THD *thd= current_thd; Ndb *ndb= get_ndb(); NdbDictionary::Dictionary *dict= ndb->getDictionary(); - + DBUG_ENTER("drop_table"); DBUG_PRINT("enter", ("Deleting %s", m_tabname)); + release_metadata(); while (dict->dropTable(m_tabname)) { const NdbError err= dict->getNdbError(); @@ -4232,45 +4686,42 @@ int ha_ndbcluster::drop_table() default: break; } - if (err.code != 709) // 709: No such table existed - ERR_RETURN(dict->getNdbError()); - break; + ERR_RETURN(dict->getNdbError()); } - release_metadata(); DBUG_RETURN(0); } -longlong ha_ndbcluster::get_auto_increment() +ulonglong ha_ndbcluster::get_auto_increment() { + int cache_size; + Uint64 auto_value; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); Ndb *ndb= get_ndb(); - + if (m_rows_inserted > m_rows_to_insert) { /* We guessed too low */ m_rows_to_insert+= m_autoincrement_prefetch; } - int cache_size= - (int) - (m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? - m_rows_to_insert - m_rows_inserted - : (m_rows_to_insert > m_autoincrement_prefetch) ? - m_rows_to_insert - : m_autoincrement_prefetch; - Uint64 auto_value= NDB_FAILED_AUTO_INCREMENT; + cache_size= + (int) ((m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? + m_rows_to_insert - m_rows_inserted : + ((m_rows_to_insert > m_autoincrement_prefetch) ? + m_rows_to_insert : m_autoincrement_prefetch)); + int ret; uint retries= NDB_AUTO_INCREMENT_RETRIES; do { - auto_value= - (m_skip_auto_increment) ? - ndb->readAutoIncrementValue((const NDBTAB *) m_table) - : ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size); - } while (auto_value == NDB_FAILED_AUTO_INCREMENT && + ret= + m_skip_auto_increment ? + ndb->readAutoIncrementValue((const NDBTAB *) m_table, auto_value) : + ndb->getAutoIncrementValue((const NDBTAB *) m_table, auto_value, cache_size); + } while (ret == -1 && --retries && ndb->getNdbError().status == NdbError::TemporaryError); - if (auto_value == NDB_FAILED_AUTO_INCREMENT) + if (ret == -1) { const NdbError err= ndb->getNdbError(); sql_print_error("Error %lu in ::get_auto_increment(): %s", @@ -4286,29 +4737,36 @@ longlong ha_ndbcluster::get_auto_increment() */ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): - handler(table_arg), + handler(&ndbcluster_hton, table_arg), m_active_trans(NULL), m_active_cursor(NULL), m_table(NULL), m_table_version(-1), m_table_info(NULL), m_table_flags(HA_REC_NOT_IN_SEQ | - HA_NULL_IN_KEY | - HA_AUTO_PART_KEY | - HA_NO_PREFIX_CHAR_KEYS), + HA_NULL_IN_KEY | + HA_AUTO_PART_KEY | + HA_NO_PREFIX_CHAR_KEYS | + HA_NEED_READ_RANGE_BUFFER | + HA_CAN_GEOMETRY | + HA_CAN_BIT_FIELD | + HA_PARTIAL_COLUMN_READ), m_share(0), m_use_write(FALSE), m_ignore_dup_key(FALSE), + m_has_unique_index(FALSE), m_primary_key_update(FALSE), m_retrieve_all_fields(FALSE), m_retrieve_primary_key(FALSE), m_rows_to_insert((ha_rows) 1), m_rows_inserted((ha_rows) 0), m_bulk_insert_rows((ha_rows) 1024), + m_rows_changed((ha_rows) 0), m_bulk_insert_not_flushed(FALSE), m_ops_pending(0), m_skip_auto_increment(TRUE), m_blobs_pending(0), + m_blobs_offset(0), m_blobs_buffer(0), m_blobs_buffer_size(0), m_dupkey((uint) -1), @@ -4316,10 +4774,11 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_force_send(TRUE), m_autoincrement_prefetch((ha_rows) 32), m_transaction_on(TRUE), - m_use_local_query_cache(FALSE) -{ + m_cond_stack(NULL), + m_multi_cursor(NULL) +{ int i; - + DBUG_ENTER("ha_ndbcluster"); m_tabname[0]= '\0'; @@ -4362,10 +4821,15 @@ ha_ndbcluster::~ha_ndbcluster() } DBUG_ASSERT(m_active_trans == NULL); + // Discard the condition stack + DBUG_PRINT("info", ("Clearing condition stack")); + cond_clear(); + DBUG_VOID_RETURN; } + /* Open a table for further use - fetch metadata for this table from NDB @@ -4383,9 +4847,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) // Setup ref_length to make room for the whole // primary key to be written in the ref variable - if (table->primary_key != MAX_KEY) + if (table->s->primary_key != MAX_KEY) { - key= table->key_info+table->primary_key; + key= table->key_info+table->s->primary_key; ref_length= key->key_length; DBUG_PRINT("info", (" ref_length: %d", ref_length)); } @@ -4407,9 +4871,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) { Ndb *ndb= get_ndb(); ndb->setDatabaseName(m_dbname); - Uint64 rows= 0; - res= ndb_get_table_statistics(NULL, false, ndb, m_tabname, &rows, 0); - records= rows; + struct Ndb_statistics stat; + res= ndb_get_table_statistics(NULL, false, ndb, m_tabname, &stat); + records= stat.row_count; if(!res) res= info(HA_STATUS_CONST); } @@ -4474,23 +4938,20 @@ void ha_ndbcluster::release_thd_ndb(Thd_ndb* thd_ndb) Ndb* check_ndb_in_thd(THD* thd) { - DBUG_ENTER("check_ndb_in_thd"); - Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; - + Thd_ndb *thd_ndb= get_thd_ndb(thd); if (!thd_ndb) { if (!(thd_ndb= ha_ndbcluster::seize_thd_ndb())) - DBUG_RETURN(NULL); - thd->transaction.thd_ndb= thd_ndb; + return NULL; + set_thd_ndb(thd, thd_ndb); } - DBUG_RETURN(thd_ndb->ndb); + return thd_ndb->ndb; } -int ha_ndbcluster::check_ndb_connection() +int ha_ndbcluster::check_ndb_connection(THD* thd) { - THD* thd= current_thd; Ndb *ndb; DBUG_ENTER("check_ndb_connection"); @@ -4501,16 +4962,16 @@ int ha_ndbcluster::check_ndb_connection() } -void ndbcluster_close_connection(THD *thd) +int ndbcluster_close_connection(THD *thd) { - Thd_ndb *thd_ndb= (Thd_ndb*)thd->transaction.thd_ndb; + Thd_ndb *thd_ndb= get_thd_ndb(thd); DBUG_ENTER("ndbcluster_close_connection"); if (thd_ndb) { ha_ndbcluster::release_thd_ndb(thd_ndb); - thd->transaction.thd_ndb= NULL; + set_thd_ndb(thd, NULL); // not strictly required but does not hurt either } - DBUG_VOID_RETURN; + DBUG_RETURN(0); } @@ -4519,7 +4980,7 @@ void ndbcluster_close_connection(THD *thd) */ int ndbcluster_discover(THD* thd, const char *db, const char *name, - const void** frmblob, uint* frmlen) + const void** frmblob, uint* frmlen) { uint len; const void* data; @@ -4565,33 +5026,31 @@ int ndbcluster_discover(THD* thd, const char *db, const char *name, /* Check if a table exists in NDB - + */ int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name) { - uint len; - const void* data; const NDBTAB* tab; Ndb* ndb; DBUG_ENTER("ndbcluster_table_exists_in_engine"); - DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); + DBUG_PRINT("enter", ("db: %s, name: %s", db, name)); if (!(ndb= check_ndb_in_thd(thd))) - DBUG_RETURN(HA_ERR_NO_CONNECTION); + DBUG_RETURN(HA_ERR_NO_CONNECTION); ndb->setDatabaseName(db); NDBDICT* dict= ndb->getDictionary(); dict->set_local_table_data_size(sizeof(Ndb_local_table_statistics)); dict->invalidateTable(name); if (!(tab= dict->getTable(name))) - { + { const NdbError err= dict->getNdbError(); if (err.code == 709) DBUG_RETURN(0); ERR_RETURN(err); } - + DBUG_PRINT("info", ("Found table %s", tab->getName())); DBUG_RETURN(1); } @@ -4599,7 +5058,7 @@ int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name extern "C" byte* tables_get_key(const char *entry, uint *length, - my_bool not_used __attribute__((unused))) + my_bool not_used __attribute__((unused))) { *length= strlen(entry); return (byte*) entry; @@ -4673,7 +5132,7 @@ int ndbcluster_drop_database(const char *path) int ndbcluster_find_files(THD *thd,const char *db,const char *path, - const char *wild, bool dir, List<char> *files) + const char *wild, bool dir, List<char> *files) { DBUG_ENTER("ndbcluster_find_files"); DBUG_PRINT("enter", ("db: %s", db)); @@ -4693,18 +5152,18 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, // List tables in NDB NDBDICT *dict= ndb->getDictionary(); if (dict->listObjects(list, - NdbDictionary::Object::UserTable) != 0) + NdbDictionary::Object::UserTable) != 0) ERR_RETURN(dict->getNdbError()); if (hash_init(&ndb_tables, system_charset_info,list.count,0,0, - (hash_get_key)tables_get_key,0,0)) + (hash_get_key)tables_get_key,0,0)) { DBUG_PRINT("error", ("Failed to init HASH ndb_tables")); DBUG_RETURN(-1); } if (hash_init(&ok_tables, system_charset_info,32,0,0, - (hash_get_key)tables_get_key,0,0)) + (hash_get_key)tables_get_key,0,0)) { DBUG_PRINT("error", ("Failed to init HASH ok_tables")); hash_free(&ndb_tables); @@ -4725,11 +5184,11 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, { if (lower_case_table_names) { - if (wild_case_compare(files_charset_info, t.name, wild)) - continue; + if (wild_case_compare(files_charset_info, t.name, wild)) + continue; } else if (wild_compare(t.name,wild,0)) - continue; + continue; } DBUG_PRINT("info", ("Inserting %s into ndb_tables hash", t.name)); my_hash_insert(&ndb_tables, (byte*)thd->strdup(t.name)); @@ -4750,7 +5209,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, // Check for .ndb file with this name (void)strxnmov(name, FN_REFLEN, - mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS); + mysql_data_home,"/",db,"/",file_name,ha_ndb_ext,NullS); DBUG_PRINT("info", ("Check access for %s", name)); if (access(name, F_OK)) { @@ -4810,17 +5269,20 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, // Delete old files List_iterator_fast<char> it3(delete_list); while ((file_name=it3++)) - { - DBUG_PRINT("info", ("Remove table %s/%s",db, file_name )); + { + DBUG_PRINT("info", ("Remove table %s/%s", db, file_name)); // Delete the table and all related files TABLE_LIST table_list; bzero((char*) &table_list,sizeof(table_list)); table_list.db= (char*) db; - table_list.alias=table_list.real_name=(char*)file_name; - (void)mysql_rm_table_part2(thd, &table_list, - /* if_exists */ TRUE, - /* drop_temporary */ FALSE, - /* dont_log_query*/ TRUE); + table_list.alias= table_list.table_name= (char*)file_name; + (void)mysql_rm_table_part2(thd, &table_list, + /* if_exists */ FALSE, + /* drop_temporary */ FALSE, + /* drop_view */ FALSE, + /* dont_log_query*/ TRUE); + /* Clear error message that is returned when table is deleted */ + thd->clear_error(); } } @@ -4828,7 +5290,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, List_iterator_fast<char> it2(create_list); while ((file_name=it2++)) { - DBUG_PRINT("info", ("Table %s need discovery", name)); + DBUG_PRINT("info", ("Table %s need discovery", file_name)); if (ha_create_table_from_engine(thd, db, file_name) == 0) files->push_back(thd->strdup(file_name)); } @@ -4847,10 +5309,21 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, a NDB Cluster table handler */ +/* Call back after cluster connect */ +static int connect_callback() +{ + update_status_variables(g_ndb_cluster_connection); + return 0; +} + bool ndbcluster_init() { int res; DBUG_ENTER("ndbcluster_init"); + + if (have_ndbcluster != SHOW_OPTION_YES) + goto ndbcluster_init_error; + // Set connectstring if specified if (opt_ndbcluster_connectstring != 0) DBUG_PRINT("connectstring", ("%s", opt_ndbcluster_connectstring)); @@ -4858,15 +5331,23 @@ bool ndbcluster_init() new Ndb_cluster_connection(opt_ndbcluster_connectstring)) == 0) { DBUG_PRINT("error",("Ndb_cluster_connection(%s)", - opt_ndbcluster_connectstring)); + opt_ndbcluster_connectstring)); goto ndbcluster_init_error; } - + { + char buf[128]; + my_snprintf(buf, sizeof(buf), "mysqld --server-id=%lu", server_id); + g_ndb_cluster_connection->set_name(buf); + } g_ndb_cluster_connection->set_optimized_node_selection (opt_ndb_optimized_node_selection); // Create a Ndb object to open the connection to NDB - g_ndb= new Ndb(g_ndb_cluster_connection, "sys"); + if ( (g_ndb= new Ndb(g_ndb_cluster_connection, "sys")) == 0 ) + { + DBUG_PRINT("error", ("failed to create global ndb object")); + goto ndbcluster_init_error; + } g_ndb->getDictionary()->set_local_table_data_size(sizeof(Ndb_local_table_statistics)); if (g_ndb->init() != 0) { @@ -4876,22 +5357,29 @@ bool ndbcluster_init() if ((res= g_ndb_cluster_connection->connect(0,0,0)) == 0) { + connect_callback(); DBUG_PRINT("info",("NDBCLUSTER storage engine at %s on port %d", - g_ndb_cluster_connection->get_connected_host(), - g_ndb_cluster_connection->get_connected_port())); + g_ndb_cluster_connection->get_connected_host(), + g_ndb_cluster_connection->get_connected_port())); g_ndb_cluster_connection->wait_until_ready(10,3); } - else if(res == 1) + else if (res == 1) { - if (g_ndb_cluster_connection->start_connect_thread()) { + if (g_ndb_cluster_connection->start_connect_thread(connect_callback)) + { DBUG_PRINT("error", ("g_ndb_cluster_connection->start_connect_thread()")); goto ndbcluster_init_error; } +#ifndef DBUG_OFF { char buf[1024]; - DBUG_PRINT("info",("NDBCLUSTER storage engine not started, will connect using %s", - g_ndb_cluster_connection->get_connectstring(buf,sizeof(buf)))); + DBUG_PRINT("info", + ("NDBCLUSTER storage engine not started, " + "will connect using %s", + g_ndb_cluster_connection-> + get_connectstring(buf,sizeof(buf)))); } +#endif } else { @@ -4903,12 +5391,34 @@ bool ndbcluster_init() (void) hash_init(&ndbcluster_open_tables,system_charset_info,32,0,0, (hash_get_key) ndbcluster_get_key,0,0); pthread_mutex_init(&ndbcluster_mutex,MY_MUTEX_INIT_FAST); + pthread_mutex_init(&LOCK_ndb_util_thread, MY_MUTEX_INIT_FAST); + pthread_cond_init(&COND_ndb_util_thread, NULL); + + ndb_cache_check_time = opt_ndb_cache_check_time; + // Create utility thread + pthread_t tmp; + if (pthread_create(&tmp, &connection_attrib, ndb_util_thread_func, 0)) + { + DBUG_PRINT("error", ("Could not create ndb utility thread")); + hash_free(&ndbcluster_open_tables); + pthread_mutex_destroy(&ndbcluster_mutex); + pthread_mutex_destroy(&LOCK_ndb_util_thread); + pthread_cond_destroy(&COND_ndb_util_thread); + goto ndbcluster_init_error; + } + ndbcluster_inited= 1; DBUG_RETURN(FALSE); - ndbcluster_init_error: - ndbcluster_end(); +ndbcluster_init_error: + if (g_ndb) + delete g_ndb; + g_ndb= NULL; + if (g_ndb_cluster_connection) + delete g_ndb_cluster_connection; + g_ndb_cluster_connection= NULL; + have_ndbcluster= SHOW_OPTION_DISABLED; // If we couldn't use handler DBUG_RETURN(TRUE); } @@ -4916,16 +5426,27 @@ bool ndbcluster_init() /* End use of the NDB Cluster table handler - free all global variables allocated by - ndcluster_init() + ndbcluster_init() */ bool ndbcluster_end() { DBUG_ENTER("ndbcluster_end"); - if(g_ndb) + + if (!ndbcluster_inited) + DBUG_RETURN(0); + + // Kill ndb utility thread + (void) pthread_mutex_lock(&LOCK_ndb_util_thread); + DBUG_PRINT("exit",("killing ndb util thread: %lx", ndb_util_thread)); + (void) pthread_cond_signal(&COND_ndb_util_thread); + (void) pthread_mutex_unlock(&LOCK_ndb_util_thread); + + if (g_ndb) { #ifndef DBUG_OFF - Ndb::Free_list_usage tmp; tmp.m_name= 0; + Ndb::Free_list_usage tmp; + tmp.m_name= 0; while (g_ndb->get_free_list_usage(&tmp)) { uint leaked= (uint) tmp.m_created - tmp.m_free; @@ -4937,15 +5458,15 @@ bool ndbcluster_end() } #endif delete g_ndb; + g_ndb= NULL; } - g_ndb= NULL; - if (g_ndb_cluster_connection) - delete g_ndb_cluster_connection; + delete g_ndb_cluster_connection; g_ndb_cluster_connection= NULL; - if (!ndbcluster_inited) - DBUG_RETURN(0); + hash_free(&ndbcluster_open_tables); pthread_mutex_destroy(&ndbcluster_mutex); + pthread_mutex_destroy(&LOCK_ndb_util_thread); + pthread_cond_destroy(&COND_ndb_util_thread); ndbcluster_inited= 0; DBUG_RETURN(0); } @@ -4961,7 +5482,7 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op) DBUG_ENTER("ndbcluster_print_error"); TABLE tab; const char *tab_name= (error_op) ? error_op->getTableName() : ""; - tab.table_name= (char *) tab_name; + tab.alias= (char *) tab_name; ha_ndbcluster error_handler(&tab); tab.file= &error_handler; error_handler.print_error(error, MYF(0)); @@ -5101,6 +5622,10 @@ uint ha_ndbcluster::max_supported_key_length() const { return NDB_MAX_KEY_SIZE; } +uint ha_ndbcluster::max_supported_key_part_length() const +{ + return NDB_MAX_KEY_SIZE; +} bool ha_ndbcluster::low_byte_first() const { #ifdef WORDS_BIGENDIAN @@ -5126,16 +5651,226 @@ const char* ha_ndbcluster::index_type(uint key_number) return "HASH"; } } + uint8 ha_ndbcluster::table_cache_type() { - if (m_use_local_query_cache) - return HA_CACHE_TBL_TRANSACT; + DBUG_ENTER("ha_ndbcluster::table_cache_type=HA_CACHE_TBL_ASKTRANSACT"); + DBUG_RETURN(HA_CACHE_TBL_ASKTRANSACT); +} + + +uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname, + Uint64 *commit_count) +{ + DBUG_ENTER("ndb_get_commitcount"); + + char name[FN_REFLEN]; + NDB_SHARE *share; + (void)strxnmov(name, FN_REFLEN, "./",dbname,"/",tabname,NullS); + DBUG_PRINT("enter", ("name: %s", name)); + pthread_mutex_lock(&ndbcluster_mutex); + if (!(share=(NDB_SHARE*) hash_search(&ndbcluster_open_tables, + (byte*) name, + strlen(name)))) + { + pthread_mutex_unlock(&ndbcluster_mutex); + DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", + name)); + DBUG_RETURN(1); + } + share->use_count++; + pthread_mutex_unlock(&ndbcluster_mutex); + + pthread_mutex_lock(&share->mutex); + if (ndb_cache_check_time > 0) + { + if (share->commit_count != 0) + { + *commit_count= share->commit_count; +#ifndef DBUG_OFF + char buff[22]; +#endif + DBUG_PRINT("info", ("Getting commit_count: %s from share", + llstr(share->commit_count, buff))); + pthread_mutex_unlock(&share->mutex); + free_share(share); + DBUG_RETURN(0); + } + } + DBUG_PRINT("info", ("Get commit_count from NDB")); + Ndb *ndb; + if (!(ndb= check_ndb_in_thd(thd))) + DBUG_RETURN(1); + ndb->setDatabaseName(dbname); + uint lock= share->commit_count_lock; + pthread_mutex_unlock(&share->mutex); + + struct Ndb_statistics stat; + if (ndb_get_table_statistics(NULL, false, ndb, tabname, &stat)) + { + free_share(share); + DBUG_RETURN(1); + } + + pthread_mutex_lock(&share->mutex); + if (share->commit_count_lock == lock) + { +#ifndef DBUG_OFF + char buff[22]; +#endif + DBUG_PRINT("info", ("Setting commit_count to %s", + llstr(stat.commit_count, buff))); + share->commit_count= stat.commit_count; + *commit_count= stat.commit_count; + } else - return HA_CACHE_TBL_NOCACHE; + { + DBUG_PRINT("info", ("Discarding commit_count, comit_count_lock changed")); + *commit_count= 0; + } + pthread_mutex_unlock(&share->mutex); + free_share(share); + DBUG_RETURN(0); +} + + +/* + Check if a cached query can be used. + This is done by comparing the supplied engine_data to commit_count of + the table. + The commit_count is either retrieved from the share for the table, where + it has been cached by the util thread. If the util thread is not started, + NDB has to be contacetd to retrieve the commit_count, this will introduce + a small delay while waiting for NDB to answer. + + + SYNOPSIS + ndbcluster_cache_retrieval_allowed + thd thread handle + full_name concatenation of database name, + the null character '\0', and the table + name + full_name_len length of the full name, + i.e. len(dbname) + len(tablename) + 1 + + engine_data parameter retrieved when query was first inserted into + the cache. If the value of engine_data is changed, + all queries for this table should be invalidated. + + RETURN VALUE + TRUE Yes, use the query from cache + FALSE No, don't use the cached query, and if engine_data + has changed, all queries for this table should be invalidated + +*/ + +static my_bool +ndbcluster_cache_retrieval_allowed(THD *thd, + char *full_name, uint full_name_len, + ulonglong *engine_data) +{ + Uint64 commit_count; + bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + char *dbname= full_name; + char *tabname= dbname+strlen(dbname)+1; +#ifndef DBUG_OFF + char buff[22], buff2[22]; +#endif + DBUG_ENTER("ndbcluster_cache_retrieval_allowed"); + DBUG_PRINT("enter", ("dbname: %s, tabname: %s, is_autocommit: %d", + dbname, tabname, is_autocommit)); + + if (!is_autocommit) + { + DBUG_PRINT("exit", ("No, don't use cache in transaction")); + DBUG_RETURN(FALSE); + } + + if (ndb_get_commitcount(thd, dbname, tabname, &commit_count)) + { + *engine_data= 0; /* invalidate */ + DBUG_PRINT("exit", ("No, could not retrieve commit_count")); + DBUG_RETURN(FALSE); + } + DBUG_PRINT("info", ("*engine_data: %s, commit_count: %s", + llstr(*engine_data, buff), llstr(commit_count, buff2))); + if (commit_count == 0) + { + *engine_data= 0; /* invalidate */ + DBUG_PRINT("exit", ("No, local commit has been performed")); + DBUG_RETURN(FALSE); + } + else if (*engine_data != commit_count) + { + *engine_data= commit_count; /* invalidate */ + DBUG_PRINT("exit", ("No, commit_count has changed")); + DBUG_RETURN(FALSE); + } + + DBUG_PRINT("exit", ("OK to use cache, engine_data: %s", + llstr(*engine_data, buff))); + DBUG_RETURN(TRUE); } + +/** + Register a table for use in the query cache. Fetch the commit_count + for the table and return it in engine_data, this will later be used + to check if the table has changed, before the cached query is reused. + + SYNOPSIS + ha_ndbcluster::can_query_cache_table + thd thread handle + full_name concatenation of database name, + the null character '\0', and the table + name + full_name_len length of the full name, + i.e. len(dbname) + len(tablename) + 1 + qc_engine_callback function to be called before using cache on this table + engine_data out, commit_count for this table + + RETURN VALUE + TRUE Yes, it's ok to cahce this query + FALSE No, don't cach the query + +*/ + +my_bool +ha_ndbcluster::register_query_cache_table(THD *thd, + char *full_name, uint full_name_len, + qc_engine_callback *engine_callback, + ulonglong *engine_data) +{ + Uint64 commit_count; +#ifndef DBUG_OFF + char buff[22]; +#endif + bool is_autocommit= !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + DBUG_ENTER("ha_ndbcluster::register_query_cache_table"); + DBUG_PRINT("enter",("dbname: %s, tabname: %s, is_autocommit: %d", + m_dbname, m_tabname, is_autocommit)); + + if (!is_autocommit) + { + DBUG_PRINT("exit", ("Can't register table during transaction")) + DBUG_RETURN(FALSE); + } + + if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count)) + { + *engine_data= 0; + DBUG_PRINT("exit", ("Error, could not get commitcount")) + DBUG_RETURN(FALSE); + } + *engine_data= commit_count; + *engine_callback= ndbcluster_cache_retrieval_allowed; + DBUG_PRINT("exit", ("commit_count: %s", llstr(commit_count, buff))); + DBUG_RETURN(commit_count > 0); +} + + /* - Handling the shared NDB_SHARE structure that is needed to + Handling the shared NDB_SHARE structure that is needed to provide table locking. It's also used for sharing data with other NDB handlers in the same MySQL Server. There is currently not much @@ -5143,7 +5878,7 @@ uint8 ha_ndbcluster::table_cache_type() */ static byte* ndbcluster_get_key(NDB_SHARE *share,uint *length, - my_bool not_used __attribute__((unused))) + my_bool not_used __attribute__((unused))) { *length=share->table_name_length; return (byte*) share->table_name; @@ -5172,9 +5907,22 @@ static NDB_SHARE* get_share(const char *table_name) } thr_lock_init(&share->lock); pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + share->commit_count= 0; + share->commit_count_lock= 0; + } + else + { + DBUG_PRINT("error", ("Failed to alloc share")); + pthread_mutex_unlock(&ndbcluster_mutex); + return 0; } } share->use_count++; + + DBUG_PRINT("share", + ("table_name: %s length: %d use_count: %d commit_count: %lu", + share->table_name, share->table_name_length, share->use_count, + (ulong) share->commit_count)); pthread_mutex_unlock(&ndbcluster_mutex); return share; } @@ -5185,7 +5933,7 @@ static void free_share(NDB_SHARE *share) pthread_mutex_lock(&ndbcluster_mutex); if (!--share->use_count) { - hash_delete(&ndbcluster_open_tables, (byte*) share); + hash_delete(&ndbcluster_open_tables, (byte*) share); thr_lock_delete(&share->lock); pthread_mutex_destroy(&share->mutex); my_free((gptr) share, MYF(0)); @@ -5214,21 +5962,21 @@ struct frm_blob_struct static int packfrm(const void *data, uint len, - const void **pack_data, uint *pack_len) + const void **pack_data, uint *pack_len) { int error; ulong org_len, comp_len; uint blob_len; frm_blob_struct* blob; DBUG_ENTER("packfrm"); - DBUG_PRINT("enter", ("data: %x, len: %d", data, len)); + DBUG_PRINT("enter", ("data: 0x%lx, len: %d", (long) data, len)); error= 1; org_len= len; if (my_compress((byte*)data, &org_len, &comp_len)) goto err; - DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len)); + DBUG_PRINT("info", ("org_len: %lu comp_len: %lu", org_len, comp_len)); DBUG_DUMP("compressed", (char*)data, org_len); error= 2; @@ -5248,7 +5996,7 @@ static int packfrm(const void *data, uint len, *pack_len= blob_len; error= 0; - DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len)); + DBUG_PRINT("exit", ("pack_data: 0x%lx, pack_len: %d", (long) *pack_data, *pack_len)); err: DBUG_RETURN(error); @@ -5256,20 +6004,20 @@ err: static int unpackfrm(const void **unpack_data, uint *unpack_len, - const void *pack_data) + const void *pack_data) { const frm_blob_struct *blob= (frm_blob_struct*)pack_data; byte *data; ulong complen, orglen, ver; DBUG_ENTER("unpackfrm"); - DBUG_PRINT("enter", ("pack_data: %x", pack_data)); + DBUG_PRINT("enter", ("pack_data: 0x%lx", (long) pack_data)); - complen= uint4korr((char*)&blob->head.complen); - orglen= uint4korr((char*)&blob->head.orglen); - ver= uint4korr((char*)&blob->head.ver); + complen= uint4korr((char*)&blob->head.complen); + orglen= uint4korr((char*)&blob->head.orglen); + ver= uint4korr((char*)&blob->head.ver); - DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", - ver,complen,orglen)); + DBUG_PRINT("blob",("ver: %lu complen: %lu orglen: %lu", + ver,complen,orglen)); DBUG_DUMP("blob->data", (char*) blob->data, complen); if (ver != 1) @@ -5287,7 +6035,7 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, *unpack_data= data; *unpack_len= complen; - DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len)); + DBUG_PRINT("exit", ("frmdata: 0x%lx, len: %d", (long) *unpack_data, *unpack_len)); DBUG_RETURN(0); } @@ -5295,24 +6043,30 @@ static int unpackfrm(const void **unpack_data, uint *unpack_len, static int ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, - const char * table, - Uint64* row_count, Uint64* commit_count) + const char* table, + struct Ndb_statistics * ndbstat) { - DBUG_ENTER("ndb_get_table_statistics"); - DBUG_PRINT("enter", ("table: %s", table)); - NdbConnection* pTrans; + NdbTransaction* pTrans; NdbError error; - int reterr= 0; int retries= 10; + int reterr= 0; int retry_sleep= 30 * 1000; /* 30 milliseconds */ +#ifndef DBUG_OFF + char buff[22], buff2[22], buff3[22], buff4[22]; +#endif + DBUG_ENTER("ndb_get_table_statistics"); + DBUG_PRINT("enter", ("table: %s", table)); do { - Uint64 rows, commits; + Uint64 rows, commits, mem; + Uint32 size; + Uint32 count= 0; Uint64 sum_rows= 0; Uint64 sum_commits= 0; + Uint64 sum_row_size= 0; + Uint64 sum_mem= 0; NdbScanOperation*pOp; - NdbResultSet *rs; int check; if ((pTrans= ndb->startTransaction()) == NULL) @@ -5327,7 +6081,7 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, goto retry; } - if ((rs= pOp->readTuples(NdbOperation::LM_CommittedRead)) == 0) + if (pOp->readTuples(NdbOperation::LM_CommittedRead)) { error= pOp->getNdbError(); goto retry; @@ -5341,17 +6095,25 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); + pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size); + pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem); - if (pTrans->execute(NoCommit, AbortOnError, TRUE) == -1) + if (pTrans->execute(NdbTransaction::NoCommit, + NdbTransaction::AbortOnError, + TRUE) == -1) { error= pTrans->getNdbError(); goto retry; } - while((check= rs->nextResult(TRUE, TRUE)) == 0) + while ((check= pOp->nextResult(TRUE, TRUE)) == 0) { sum_rows+= rows; sum_commits+= commits; + if (sum_row_size < size) + sum_row_size= size; + sum_mem+= mem; + count++; } if (check == -1) @@ -5360,16 +6122,24 @@ ndb_get_table_statistics(ha_ndbcluster* file, bool report_error, Ndb* ndb, goto retry; } - rs->close(TRUE); + pOp->close(TRUE); ndb->closeTransaction(pTrans); - if(row_count) - * row_count= sum_rows; - if(commit_count) - * commit_count= sum_commits; - DBUG_PRINT("exit", ("records: %u commits: %u", sum_rows, sum_commits)); - DBUG_RETURN(0); + ndbstat->row_count= sum_rows; + ndbstat->commit_count= sum_commits; + ndbstat->row_size= sum_row_size; + ndbstat->fragment_memory= sum_mem; + + DBUG_PRINT("exit", ("records: %s commits: %s " + "row_size: %s mem: %s count: %u", + llstr(sum_rows, buff), + llstr(sum_commits, buff2), + llstr(sum_row_size, buff3), + llstr(sum_mem, buff4), + count)); + + DBUG_RETURN(0); retry: if(report_error) { @@ -5384,6 +6154,9 @@ retry: reterr= ndb_to_mysql_error(&tmp); } } + else + reterr= error.code; + if (pTrans) { ndb->closeTransaction(pTrans); @@ -5416,7 +6189,7 @@ int ha_ndbcluster::write_ndb_file() DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname)); (void)strxnmov(path, FN_REFLEN, - mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS); + mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS); if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) { @@ -5428,7 +6201,8 @@ int ha_ndbcluster::write_ndb_file() } void -ha_ndbcluster::release_completed_operations(NdbConnection *trans) +ha_ndbcluster::release_completed_operations(NdbTransaction *trans, + bool force_release) { if (trans->hasBlobOperation()) { @@ -5437,20 +6211,2009 @@ ha_ndbcluster::release_completed_operations(NdbConnection *trans) */ return; } + if (!force_release) + { + if (get_thd_ndb(current_thd)->query_state & NDB_QUERY_MULTI_READ_RANGE) + { + /* We are batching reads and have not consumed all fetched + rows yet, releasing operation records is unsafe + */ + return; + } + } trans->releaseCompletedOperations(); } +bool +ha_ndbcluster::null_value_index_search(KEY_MULTI_RANGE *ranges, + KEY_MULTI_RANGE *end_range, + HANDLER_BUFFER *buffer) +{ + DBUG_ENTER("null_value_index_search"); + KEY* key_info= table->key_info + active_index; + KEY_MULTI_RANGE *range= ranges; + ulong reclength= table->s->reclength; + byte *curr= (byte*)buffer->buffer; + byte *end_of_buffer= (byte*)buffer->buffer_end; + + for (; range<end_range && curr+reclength <= end_of_buffer; + range++) + { + const byte *key= range->start_key.key; + uint key_len= range->start_key.length; + if (check_null_in_key(key_info, key, key_len)) + DBUG_RETURN(true); + curr += reclength; + } + DBUG_RETURN(false); +} + +int +ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE *ranges, + uint range_count, + bool sorted, + HANDLER_BUFFER *buffer) +{ + int res; + KEY* key_info= table->key_info + active_index; + NDB_INDEX_TYPE cur_index_type= get_index_type(active_index); + ulong reclength= table->s->reclength; + NdbOperation* op; + Thd_ndb *thd_ndb= get_thd_ndb(current_thd); + DBUG_ENTER("ha_ndbcluster::read_multi_range_first"); + + /** + * blobs and unique hash index with NULL can't be batched currently + */ + if (uses_blob_value(m_retrieve_all_fields) || + (cur_index_type == UNIQUE_INDEX && + has_null_in_unique_index(active_index) && + null_value_index_search(ranges, ranges+range_count, buffer))) + { + m_disable_multi_read= TRUE; + DBUG_RETURN(handler::read_multi_range_first(found_range_p, + ranges, + range_count, + sorted, + buffer)); + } + thd_ndb->query_state|= NDB_QUERY_MULTI_READ_RANGE; + m_disable_multi_read= FALSE; + + /** + * Copy arguments into member variables + */ + m_multi_ranges= ranges; + multi_range_curr= ranges; + multi_range_end= ranges+range_count; + multi_range_sorted= sorted; + multi_range_buffer= buffer; + + /** + * read multi range will read ranges as follows (if not ordered) + * + * input read order + * ====== ========== + * pk-op 1 pk-op 1 + * pk-op 2 pk-op 2 + * range 3 range (3,5) NOTE result rows will be intermixed + * pk-op 4 pk-op 4 + * range 5 + * pk-op 6 pk-ok 6 + */ + + /** + * Variables for loop + */ + byte *curr= (byte*)buffer->buffer; + byte *end_of_buffer= (byte*)buffer->buffer_end; + NdbOperation::LockMode lm= + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type); + bool need_pk = (lm == NdbOperation::LM_Read); + const NDBTAB *tab= (const NDBTAB *) m_table; + const NDBINDEX *unique_idx= (NDBINDEX *) m_index[active_index].unique_index; + const NDBINDEX *idx= (NDBINDEX *) m_index[active_index].index; + const NdbOperation* lastOp= m_active_trans->getLastDefinedOperation(); + NdbIndexScanOperation* scanOp= 0; + for (; multi_range_curr<multi_range_end && curr+reclength <= end_of_buffer; + multi_range_curr++) + { + switch (cur_index_type) { + case PRIMARY_KEY_ORDERED_INDEX: + if (!(multi_range_curr->start_key.length == key_info->key_length && + multi_range_curr->start_key.flag == HA_READ_KEY_EXACT)) + goto range; + /* fall through */ + case PRIMARY_KEY_INDEX: + multi_range_curr->range_flag |= UNIQUE_RANGE; + if ((op= m_active_trans->getNdbOperation(tab)) && + !op->readTuple(lm) && + !set_primary_key(op, multi_range_curr->start_key.key) && + !define_read_attrs(curr, op) && + (op->setAbortOption(AO_IgnoreError), TRUE)) + curr += reclength; + else + ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); + break; + case UNIQUE_ORDERED_INDEX: + if (!(multi_range_curr->start_key.length == key_info->key_length && + multi_range_curr->start_key.flag == HA_READ_KEY_EXACT && + !check_null_in_key(key_info, multi_range_curr->start_key.key, + multi_range_curr->start_key.length))) + goto range; + /* fall through */ + case UNIQUE_INDEX: + multi_range_curr->range_flag |= UNIQUE_RANGE; + if ((op= m_active_trans->getNdbIndexOperation(unique_idx, tab)) && + !op->readTuple(lm) && + !set_index_key(op, key_info, multi_range_curr->start_key.key) && + !define_read_attrs(curr, op) && + (op->setAbortOption(AO_IgnoreError), TRUE)) + curr += reclength; + else + ERR_RETURN(op ? op->getNdbError() : m_active_trans->getNdbError()); + break; + case ORDERED_INDEX: + { + range: + multi_range_curr->range_flag &= ~(uint)UNIQUE_RANGE; + if (scanOp == 0) + { + if (m_multi_cursor) + { + scanOp= m_multi_cursor; + DBUG_ASSERT(scanOp->getSorted() == sorted); + DBUG_ASSERT(scanOp->getLockMode() == + (NdbOperation::LockMode)get_ndb_lock_type(m_lock.type)); + if (scanOp->reset_bounds(m_force_send)) + DBUG_RETURN(ndb_err(m_active_trans)); + + end_of_buffer -= reclength; + } + else if ((scanOp= m_active_trans->getNdbIndexScanOperation(idx, tab)) + &&!scanOp->readTuples(lm, 0, parallelism, sorted, + FALSE, TRUE, need_pk, TRUE) + &&!generate_scan_filter(m_cond_stack, scanOp) + &&!define_read_attrs(end_of_buffer-reclength, scanOp)) + { + m_multi_cursor= scanOp; + m_multi_range_cursor_result_ptr= end_of_buffer-reclength; + } + else + { + ERR_RETURN(scanOp ? scanOp->getNdbError() : + m_active_trans->getNdbError()); + } + } + + const key_range *keys[2]= { &multi_range_curr->start_key, + &multi_range_curr->end_key }; + if ((res= set_bounds(scanOp, keys, multi_range_curr-ranges))) + DBUG_RETURN(res); + break; + } + case UNDEFINED_INDEX: + DBUG_ASSERT(FALSE); + DBUG_RETURN(1); + break; + } + } + + if (multi_range_curr != multi_range_end) + { + /** + * Mark that we're using entire buffer (even if might not) as + * we haven't read all ranges for some reason + * This as we don't want mysqld to reuse the buffer when we read + * the remaining ranges + */ + buffer->end_of_used_area= (byte*)buffer->buffer_end; + } + else + { + buffer->end_of_used_area= curr; + } + + /** + * Set first operation in multi range + */ + m_current_multi_operation= + lastOp ? lastOp->next() : m_active_trans->getFirstDefinedOperation(); + if (!(res= execute_no_commit_ie(this, m_active_trans, true))) + { + m_multi_range_defined= multi_range_curr; + multi_range_curr= ranges; + m_multi_range_result_ptr= (byte*)buffer->buffer; + DBUG_RETURN(read_multi_range_next(found_range_p)); + } + ERR_RETURN(m_active_trans->getNdbError()); +} + +#if 0 +#define DBUG_MULTI_RANGE(x) printf("read_multi_range_next: case %d\n", x); +#else +#define DBUG_MULTI_RANGE(x) +#endif + +int +ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) +{ + DBUG_ENTER("ha_ndbcluster::read_multi_range_next"); + if (m_disable_multi_read) + { + DBUG_RETURN(handler::read_multi_range_next(multi_range_found_p)); + } + + int res; + int range_no; + ulong reclength= table->s->reclength; + const NdbOperation* op= m_current_multi_operation; + for (;multi_range_curr < m_multi_range_defined; multi_range_curr++) + { + if (multi_range_curr->range_flag & UNIQUE_RANGE) + { + if (op->getNdbError().code == 0) + goto found_next; + + op= m_active_trans->getNextCompletedOperation(op); + m_multi_range_result_ptr += reclength; + continue; + } + else if (m_multi_cursor && !multi_range_sorted) + { + DBUG_MULTI_RANGE(1); + if ((res= fetch_next(m_multi_cursor)) == 0) + { + DBUG_MULTI_RANGE(2); + range_no= m_multi_cursor->get_range_no(); + goto found; + } + else + { + goto close_scan; + } + } + else if (m_multi_cursor && multi_range_sorted) + { + if (m_active_cursor && (res= fetch_next(m_multi_cursor))) + { + DBUG_MULTI_RANGE(3); + goto close_scan; + } + + range_no= m_multi_cursor->get_range_no(); + uint current_range_no= multi_range_curr - m_multi_ranges; + if ((uint) range_no == current_range_no) + { + DBUG_MULTI_RANGE(4); + // return current row + goto found; + } + else if (range_no > (int)current_range_no) + { + DBUG_MULTI_RANGE(5); + // wait with current row + m_active_cursor= 0; + continue; + } + else + { + DBUG_MULTI_RANGE(6); + // First fetch from cursor + DBUG_ASSERT(range_no == -1); + if ((res= m_multi_cursor->nextResult(true))) + { + goto close_scan; + } + multi_range_curr--; // Will be increased in for-loop + continue; + } + } + else /** m_multi_cursor == 0 */ + { + DBUG_MULTI_RANGE(7); + /** + * Corresponds to range 5 in example in read_multi_range_first + */ + (void)1; + continue; + } + + DBUG_ASSERT(FALSE); // Should only get here via goto's +close_scan: + if (res == 1) + { + m_multi_cursor->close(FALSE, TRUE); + m_active_cursor= m_multi_cursor= 0; + DBUG_MULTI_RANGE(8); + continue; + } + else + { + DBUG_RETURN(ndb_err(m_active_trans)); + } + } + + if (multi_range_curr == multi_range_end) + { + Thd_ndb *thd_ndb= get_thd_ndb(current_thd); + thd_ndb->query_state&= NDB_QUERY_NORMAL; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + + /** + * Read remaining ranges + */ + DBUG_RETURN(read_multi_range_first(multi_range_found_p, + multi_range_curr, + multi_range_end - multi_range_curr, + multi_range_sorted, + multi_range_buffer)); + +found: + /** + * Found a record belonging to a scan + */ + m_active_cursor= m_multi_cursor; + * multi_range_found_p= m_multi_ranges + range_no; + memcpy(table->record[0], m_multi_range_cursor_result_ptr, reclength); + setup_recattr(m_active_cursor->getFirstRecAttr()); + unpack_record(table->record[0]); + table->status= 0; + DBUG_RETURN(0); + +found_next: + /** + * Found a record belonging to a pk/index op, + * copy result and move to next to prepare for next call + */ + * multi_range_found_p= multi_range_curr; + memcpy(table->record[0], m_multi_range_result_ptr, reclength); + setup_recattr(op->getFirstRecAttr()); + unpack_record(table->record[0]); + table->status= 0; + + multi_range_curr++; + m_current_multi_operation= m_active_trans->getNextCompletedOperation(op); + m_multi_range_result_ptr += reclength; + DBUG_RETURN(0); +} + +int +ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) +{ + DBUG_ENTER("setup_recattr"); + + Field **field, **end; + NdbValue *value= m_value; + + end= table->field + table->s->fields; + + for (field= table->field; field < end; field++, value++) + { + if ((* value).ptr) + { + DBUG_ASSERT(curr != 0); + NdbValue* val= m_value + curr->getColumn()->getColumnNo(); + DBUG_ASSERT(val->ptr); + val->rec= curr; + curr= curr->next(); + } + } + + DBUG_RETURN(0); +} + +char* +ha_ndbcluster::update_table_comment( + /* out: table comment + additional */ + const char* comment)/* in: table comment defined by user */ +{ + uint length= strlen(comment); + if (length > 64000 - 3) + { + return((char*)comment); /* string too long */ + } + + Ndb* ndb; + if (!(ndb= get_ndb())) + { + return((char*)comment); + } + + ndb->setDatabaseName(m_dbname); + NDBDICT* dict= ndb->getDictionary(); + const NDBTAB* tab; + if (!(tab= dict->getTable(m_tabname))) + { + return((char*)comment); + } + + char *str; + const char *fmt="%s%snumber_of_replicas: %d"; + const unsigned fmt_len_plus_extra= length + strlen(fmt); + if ((str= my_malloc(fmt_len_plus_extra, MYF(0))) == NULL) + { + return (char*)comment; + } + + my_snprintf(str,fmt_len_plus_extra,fmt,comment, + length > 0 ? " ":"", + tab->getReplicaCount()); + return str; +} + + +// Utility thread main loop +pthread_handler_t ndb_util_thread_func(void *arg __attribute__((unused))) +{ + THD *thd; /* needs to be first for thread_stack */ + Ndb* ndb; + struct timespec abstime; + + my_thread_init(); + DBUG_ENTER("ndb_util_thread"); + DBUG_PRINT("enter", ("ndb_cache_check_time: %lu", ndb_cache_check_time)); + + thd= new THD; /* note that contructor of THD uses DBUG_ */ + THD_CHECK_SENTRY(thd); + ndb= new Ndb(g_ndb_cluster_connection, ""); + + pthread_detach_this_thread(); + ndb_util_thread= pthread_self(); + + thd->thread_stack= (char*)&thd; /* remember where our stack is */ + if (thd->store_globals() || (ndb->init() != 0)) + { + thd->cleanup(); + delete thd; + delete ndb; + DBUG_RETURN(NULL); + } + + List<NDB_SHARE> util_open_tables; + set_timespec(abstime, 0); + for (;;) + { + + if (abort_loop) + break; /* Shutting down server */ + + pthread_mutex_lock(&LOCK_ndb_util_thread); + pthread_cond_timedwait(&COND_ndb_util_thread, + &LOCK_ndb_util_thread, + &abstime); + pthread_mutex_unlock(&LOCK_ndb_util_thread); + + DBUG_PRINT("ndb_util_thread", ("Started, ndb_cache_check_time: %lu", + ndb_cache_check_time)); + + if (abort_loop) + break; /* Shutting down server */ + + if (ndb_cache_check_time == 0) + { + /* Wake up in 1 second to check if value has changed */ + set_timespec(abstime, 1); + continue; + } + + /* Lock mutex and fill list with pointers to all open tables */ + NDB_SHARE *share; + pthread_mutex_lock(&ndbcluster_mutex); + for (uint i= 0; i < ndbcluster_open_tables.records; i++) + { + share= (NDB_SHARE *)hash_element(&ndbcluster_open_tables, i); + share->use_count++; /* Make sure the table can't be closed */ + DBUG_PRINT("ndb_util_thread", + ("Found open table[%d]: %s, use_count: %d", + i, share->table_name, share->use_count)); + + /* Store pointer to table */ + util_open_tables.push_back(share); + } + pthread_mutex_unlock(&ndbcluster_mutex); + + /* Iterate through the open files list */ + List_iterator_fast<NDB_SHARE> it(util_open_tables); + while ((share= it++)) + { + /* Split tab- and dbname */ + char buf[FN_REFLEN]; + char *tabname, *db; + uint length= dirname_length(share->table_name); + tabname= share->table_name+length; + memcpy(buf, share->table_name, length-1); + buf[length-1]= 0; + db= buf+dirname_length(buf); + DBUG_PRINT("ndb_util_thread", + ("Fetching commit count for: %s", + share->table_name)); + + /* Contact NDB to get commit count for table */ + ndb->setDatabaseName(db); + struct Ndb_statistics stat; + + uint lock; + pthread_mutex_lock(&share->mutex); + lock= share->commit_count_lock; + pthread_mutex_unlock(&share->mutex); + + if (ndb_get_table_statistics(NULL, false, ndb, tabname, &stat) == 0) + { +#ifndef DBUG_OFF + char buff[22], buff2[22]; +#endif + DBUG_PRINT("ndb_util_thread", + ("Table: %s commit_count: %s rows: %s", + share->table_name, + llstr(stat.commit_count, buff), + llstr(stat.row_count, buff2))); + } + else + { + DBUG_PRINT("ndb_util_thread", + ("Error: Could not get commit count for table %s", + share->table_name)); + stat.commit_count= 0; + } + + pthread_mutex_lock(&share->mutex); + if (share->commit_count_lock == lock) + share->commit_count= stat.commit_count; + pthread_mutex_unlock(&share->mutex); + + /* Decrease the use count and possibly free share */ + free_share(share); + } + + /* Clear the list of open tables */ + util_open_tables.empty(); + + /* Calculate new time to wake up */ + int secs= 0; + int msecs= ndb_cache_check_time; + + struct timeval tick_time; + gettimeofday(&tick_time, 0); + abstime.tv_sec= tick_time.tv_sec; + abstime.tv_nsec= tick_time.tv_usec * 1000; + + if (msecs >= 1000){ + secs= msecs / 1000; + msecs= msecs % 1000; + } + + abstime.tv_sec+= secs; + abstime.tv_nsec+= msecs * 1000000; + if (abstime.tv_nsec >= 1000000000) { + abstime.tv_sec+= 1; + abstime.tv_nsec-= 1000000000; + } + } + + thd->cleanup(); + delete thd; + delete ndb; + DBUG_PRINT("exit", ("ndb_util_thread")); + my_thread_end(); + pthread_exit(0); + DBUG_RETURN(NULL); +} + +/* + Condition pushdown +*/ +/* + Push a condition to ndbcluster storage engine for evaluation + during table and index scans. The conditions will be stored on a stack + for possibly storing several conditions. The stack can be popped + by calling cond_pop, handler::extra(HA_EXTRA_RESET) (handler::reset()) + will clear the stack. + The current implementation supports arbitrary AND/OR nested conditions + with comparisons between columns and constants (including constant + expressions and function calls) and the following comparison operators: + =, !=, >, >=, <, <=, "is null", and "is not null". + + RETURN + NULL The condition was supported and will be evaluated for each + row found during the scan + cond The condition was not supported and all rows will be returned from + the scan for evaluation (and thus not saved on stack) +*/ +const +COND* +ha_ndbcluster::cond_push(const COND *cond) +{ + DBUG_ENTER("cond_push"); + Ndb_cond_stack *ndb_cond = new Ndb_cond_stack(); + DBUG_EXECUTE("where",print_where((COND *)cond, m_tabname);); + if (m_cond_stack) + ndb_cond->next= m_cond_stack; + else + ndb_cond->next= NULL; + m_cond_stack= ndb_cond; + + if (serialize_cond(cond, ndb_cond)) + { + DBUG_RETURN(NULL); + } + else + { + cond_pop(); + } + DBUG_RETURN(cond); +} + +/* + Pop the top condition from the condition stack of the handler instance. +*/ +void +ha_ndbcluster::cond_pop() +{ + Ndb_cond_stack *ndb_cond_stack= m_cond_stack; + if (ndb_cond_stack) + { + m_cond_stack= ndb_cond_stack->next; + delete ndb_cond_stack; + } +} + +/* + Clear the condition stack +*/ +void +ha_ndbcluster::cond_clear() +{ + DBUG_ENTER("cond_clear"); + while (m_cond_stack) + cond_pop(); + + DBUG_VOID_RETURN; +} + +/* + Serialize the item tree into a linked list represented by Ndb_cond + for fast generation of NbdScanFilter. Adds information such as + position of fields that is not directly available in the Item tree. + Also checks if condition is supported. +*/ +void ndb_serialize_cond(const Item *item, void *arg) +{ + Ndb_cond_traverse_context *context= (Ndb_cond_traverse_context *) arg; + DBUG_ENTER("ndb_serialize_cond"); + + // Check if we are skipping arguments to a function to be evaluated + if (context->skip) + { + DBUG_PRINT("info", ("Skiping argument %d", context->skip)); + context->skip--; + switch (item->type()) { + case Item::FUNC_ITEM: + { + Item_func *func_item= (Item_func *) item; + context->skip+= func_item->argument_count(); + break; + } + case Item::INT_ITEM: + case Item::REAL_ITEM: + case Item::STRING_ITEM: + case Item::VARBIN_ITEM: + case Item::DECIMAL_ITEM: + break; + default: + context->supported= FALSE; + break; + } + + DBUG_VOID_RETURN; + } + + if (context->supported) + { + Ndb_rewrite_context *rewrite_context2= context->rewrite_stack; + const Item_func *rewrite_func_item; + // Check if we are rewriting some unsupported function call + if (rewrite_context2 && + (rewrite_func_item= rewrite_context2->func_item) && + rewrite_context2->count++ == 0) + { + switch (rewrite_func_item->functype()) { + case Item_func::BETWEEN: + /* + Rewrite + <field>|<const> BETWEEN <const1>|<field1> AND <const2>|<field2> + to <field>|<const> > <const1>|<field1> AND + <field>|<const> < <const2>|<field2> + or actually in prefix format + BEGIN(AND) GT(<field>|<const>, <const1>|<field1>), + LT(<field>|<const>, <const2>|<field2>), END() + */ + case Item_func::IN_FUNC: + { + /* + Rewrite <field>|<const> IN(<const1>|<field1>, <const2>|<field2>,..) + to <field>|<const> = <const1>|<field1> OR + <field> = <const2>|<field2> ... + or actually in prefix format + BEGIN(OR) EQ(<field>|<const>, <const1><field1>), + EQ(<field>|<const>, <const2>|<field2>), ... END() + Each part of the disjunction is added for each call + to ndb_serialize_cond and end of rewrite statement + is wrapped in end of ndb_serialize_cond + */ + if (context->expecting(item->type())) + { + // This is the <field>|<const> item, save it in the rewrite context + rewrite_context2->left_hand_item= item; + if (item->type() == Item::FUNC_ITEM) + { + Item_func *func_item= (Item_func *) item; + if (func_item->functype() == Item_func::UNKNOWN_FUNC && + func_item->const_item()) + { + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + } + else + { + DBUG_PRINT("info", ("Found unsupported functional expression in BETWEEN|IN")); + context->supported= FALSE; + DBUG_VOID_RETURN; + + } + } + } + else + { + // Non-supported BETWEEN|IN expression + DBUG_PRINT("info", ("Found unexpected item of type %u in BETWEEN|IN", + item->type())); + context->supported= FALSE; + DBUG_VOID_RETURN; + } + break; + } + default: + context->supported= FALSE; + break; + } + DBUG_VOID_RETURN; + } + else + { + Ndb_cond_stack *ndb_stack= context->stack_ptr; + Ndb_cond *prev_cond= context->cond_ptr; + Ndb_cond *curr_cond= context->cond_ptr= new Ndb_cond(); + if (!ndb_stack->ndb_cond) + ndb_stack->ndb_cond= curr_cond; + curr_cond->prev= prev_cond; + if (prev_cond) prev_cond->next= curr_cond; + // Check if we are rewriting some unsupported function call + if (context->rewrite_stack) + { + Ndb_rewrite_context *rewrite_context= context->rewrite_stack; + const Item_func *func_item= rewrite_context->func_item; + switch (func_item->functype()) { + case Item_func::BETWEEN: + { + /* + Rewrite + <field>|<const> BETWEEN <const1>|<field1> AND <const2>|<field2> + to <field>|<const> > <const1>|<field1> AND + <field>|<const> < <const2>|<field2> + or actually in prefix format + BEGIN(AND) GT(<field>|<const>, <const1>|<field1>), + LT(<field>|<const>, <const2>|<field2>), END() + */ + if (rewrite_context->count == 2) + { + // Lower limit of BETWEEN + DBUG_PRINT("info", ("GE_FUNC")); + curr_cond->ndb_item= new Ndb_item(Item_func::GE_FUNC, 2); + } + else if (rewrite_context->count == 3) + { + // Upper limit of BETWEEN + DBUG_PRINT("info", ("LE_FUNC")); + curr_cond->ndb_item= new Ndb_item(Item_func::LE_FUNC, 2); + } + else + { + // Illegal BETWEEN expression + DBUG_PRINT("info", ("Illegal BETWEEN expression")); + context->supported= FALSE; + DBUG_VOID_RETURN; + } + break; + } + case Item_func::IN_FUNC: + { + /* + Rewrite <field>|<const> IN(<const1>|<field1>, <const2>|<field2>,..) + to <field>|<const> = <const1>|<field1> OR + <field> = <const2>|<field2> ... + or actually in prefix format + BEGIN(OR) EQ(<field>|<const>, <const1><field1>), + EQ(<field>|<const>, <const2>|<field2>), ... END() + Each part of the disjunction is added for each call + to ndb_serialize_cond and end of rewrite statement + is wrapped in end of ndb_serialize_cond + */ + DBUG_PRINT("info", ("EQ_FUNC")); + curr_cond->ndb_item= new Ndb_item(Item_func::EQ_FUNC, 2); + break; + } + default: + context->supported= FALSE; + } + // Handle left hand <field>|<const> + context->rewrite_stack= NULL; // Disable rewrite mode + context->expect_only(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + context->expect(Item::INT_ITEM); + context->expect(Item::STRING_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FUNC_ITEM); + ndb_serialize_cond(rewrite_context->left_hand_item, arg); + context->skip= 0; // Any FUNC_ITEM expression has already been parsed + context->rewrite_stack= rewrite_context; // Enable rewrite mode + if (!context->supported) + DBUG_VOID_RETURN; + + prev_cond= context->cond_ptr; + curr_cond= context->cond_ptr= new Ndb_cond(); + prev_cond->next= curr_cond; + } + + // Check for end of AND/OR expression + if (!item) + { + // End marker for condition group + DBUG_PRINT("info", ("End of condition group")); + curr_cond->ndb_item= new Ndb_item(NDB_END_COND); + } + else + { + switch (item->type()) { + case Item::FIELD_ITEM: + { + Item_field *field_item= (Item_field *) item; + Field *field= field_item->field; + enum_field_types type= field->type(); + /* + Check that the field is part of the table of the handler + instance and that we expect a field with of this result type. + */ + if (context->table == field->table) + { + const NDBTAB *tab= (const NDBTAB *) context->ndb_table; + DBUG_PRINT("info", ("FIELD_ITEM")); + DBUG_PRINT("info", ("table %s", tab->getName())); + DBUG_PRINT("info", ("column %s", field->field_name)); + DBUG_PRINT("info", ("type %d", field->type())); + DBUG_PRINT("info", ("result type %d", field->result_type())); + + // Check that we are expecting a field and with the correct + // result type + if (context->expecting(Item::FIELD_ITEM) && + context->expecting_field_type(field->type()) && + (context->expecting_field_result(field->result_type()) || + // Date and year can be written as string or int + ((type == MYSQL_TYPE_TIME || + type == MYSQL_TYPE_DATE || + type == MYSQL_TYPE_YEAR || + type == MYSQL_TYPE_DATETIME) + ? (context->expecting_field_result(STRING_RESULT) || + context->expecting_field_result(INT_RESULT)) + : true)) && + // Bit fields no yet supported in scan filter + type != MYSQL_TYPE_BIT && + // No BLOB support in scan filter + type != MYSQL_TYPE_TINY_BLOB && + type != MYSQL_TYPE_MEDIUM_BLOB && + type != MYSQL_TYPE_LONG_BLOB && + type != MYSQL_TYPE_BLOB) + { + const NDBCOL *col= tab->getColumn(field->field_name); + DBUG_ASSERT(col); + curr_cond->ndb_item= new Ndb_item(field, col->getColumnNo()); + context->dont_expect(Item::FIELD_ITEM); + context->expect_no_field_result(); + if (! context->expecting_nothing()) + { + // We have not seen second argument yet + if (type == MYSQL_TYPE_TIME || + type == MYSQL_TYPE_DATE || + type == MYSQL_TYPE_YEAR || + type == MYSQL_TYPE_DATETIME) + { + context->expect_only(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + } + else + switch (field->result_type()) { + case STRING_RESULT: + // Expect char string or binary string + context->expect_only(Item::STRING_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect_collation(field_item->collation.collation); + break; + case REAL_RESULT: + context->expect_only(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::INT_ITEM); + break; + case INT_RESULT: + context->expect_only(Item::INT_ITEM); + context->expect(Item::VARBIN_ITEM); + break; + case DECIMAL_RESULT: + context->expect_only(Item::DECIMAL_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::INT_ITEM); + break; + default: + break; + } + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + // Check that field and string constant collations are the same + if ((field->result_type() == STRING_RESULT) && + !context->expecting_collation(item->collation.collation) + && type != MYSQL_TYPE_TIME + && type != MYSQL_TYPE_DATE + && type != MYSQL_TYPE_YEAR + && type != MYSQL_TYPE_DATETIME) + { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported= FALSE; + } + } + break; + } + else + { + DBUG_PRINT("info", ("Was not expecting field of type %u(%u)", + field->result_type(), type)); + context->supported= FALSE; + } + } + else + { + DBUG_PRINT("info", ("Was not expecting field from table %s(%s)", + context->table->s->table_name, + field->table->s->table_name)); + context->supported= FALSE; + } + break; + } + case Item::FUNC_ITEM: + { + Item_func *func_item= (Item_func *) item; + // Check that we expect a function or functional expression here + if (context->expecting(Item::FUNC_ITEM) || + func_item->functype() == Item_func::UNKNOWN_FUNC) + context->expect_nothing(); + else + { + // Did not expect function here + context->supported= FALSE; + break; + } + + switch (func_item->functype()) { + case Item_func::EQ_FUNC: + { + DBUG_PRINT("info", ("EQ_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::NE_FUNC: + { + DBUG_PRINT("info", ("NE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::LT_FUNC: + { + DBUG_PRINT("info", ("LT_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::LE_FUNC: + { + DBUG_PRINT("info", ("LE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::GE_FUNC: + { + DBUG_PRINT("info", ("GE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::GT_FUNC: + { + DBUG_PRINT("info", ("GT_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::REAL_ITEM); + context->expect(Item::DECIMAL_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::LIKE_FUNC: + { + DBUG_PRINT("info", ("LIKE_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::STRING_ITEM); + context->expect(Item::FIELD_ITEM); + context->expect_only_field_type(MYSQL_TYPE_STRING); + context->expect_field_type(MYSQL_TYPE_VAR_STRING); + context->expect_field_type(MYSQL_TYPE_VARCHAR); + context->expect_field_result(STRING_RESULT); + context->expect(Item::FUNC_ITEM); + break; + } + case Item_func::ISNULL_FUNC: + { + DBUG_PRINT("info", ("ISNULL_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::ISNOTNULL_FUNC: + { + DBUG_PRINT("info", ("ISNOTNULL_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::FIELD_ITEM); + context->expect_field_result(STRING_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(INT_RESULT); + context->expect_field_result(DECIMAL_RESULT); + break; + } + case Item_func::NOT_FUNC: + { + DBUG_PRINT("info", ("NOT_FUNC")); + curr_cond->ndb_item= new Ndb_item(func_item->functype(), + func_item); + context->expect(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + break; + } + case Item_func::BETWEEN: + { + DBUG_PRINT("info", ("BETWEEN, rewriting using AND")); + Item_func_between *between_func= (Item_func_between *) func_item; + Ndb_rewrite_context *rewrite_context= + new Ndb_rewrite_context(func_item); + rewrite_context->next= context->rewrite_stack; + context->rewrite_stack= rewrite_context; + if (between_func->negated) + { + DBUG_PRINT("info", ("NOT_FUNC")); + curr_cond->ndb_item= new Ndb_item(Item_func::NOT_FUNC, 1); + prev_cond= curr_cond; + curr_cond= context->cond_ptr= new Ndb_cond(); + curr_cond->prev= prev_cond; + prev_cond->next= curr_cond; + } + DBUG_PRINT("info", ("COND_AND_FUNC")); + curr_cond->ndb_item= + new Ndb_item(Item_func::COND_AND_FUNC, + func_item->argument_count() - 1); + context->expect_only(Item::FIELD_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::STRING_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FUNC_ITEM); + break; + } + case Item_func::IN_FUNC: + { + DBUG_PRINT("info", ("IN_FUNC, rewriting using OR")); + Item_func_in *in_func= (Item_func_in *) func_item; + Ndb_rewrite_context *rewrite_context= + new Ndb_rewrite_context(func_item); + rewrite_context->next= context->rewrite_stack; + context->rewrite_stack= rewrite_context; + if (in_func->negated) + { + DBUG_PRINT("info", ("NOT_FUNC")); + curr_cond->ndb_item= new Ndb_item(Item_func::NOT_FUNC, 1); + prev_cond= curr_cond; + curr_cond= context->cond_ptr= new Ndb_cond(); + curr_cond->prev= prev_cond; + prev_cond->next= curr_cond; + } + DBUG_PRINT("info", ("COND_OR_FUNC")); + curr_cond->ndb_item= new Ndb_item(Item_func::COND_OR_FUNC, + func_item->argument_count() - 1); + context->expect_only(Item::FIELD_ITEM); + context->expect(Item::INT_ITEM); + context->expect(Item::STRING_ITEM); + context->expect(Item::VARBIN_ITEM); + context->expect(Item::FUNC_ITEM); + break; + } + case Item_func::UNKNOWN_FUNC: + { + DBUG_PRINT("info", ("UNKNOWN_FUNC %s", + func_item->const_item()?"const":"")); + DBUG_PRINT("info", ("result type %d", func_item->result_type())); + if (func_item->const_item()) + { + switch (func_item->result_type()) { + case STRING_RESULT: + { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::STRING_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(STRING_RESULT); + context->expect_collation(func_item->collation.collation); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + // Check that string result have correct collation + if (!context->expecting_collation(item->collation.collation)) + { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported= FALSE; + } + } + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + case REAL_RESULT: + { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::REAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(REAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + case INT_RESULT: + { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::INT_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(INT_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + case DECIMAL_RESULT: + { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::DECIMAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(DECIMAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + // Skip any arguments since we will evaluate function instead + DBUG_PRINT("info", ("Skip until end of arguments marker")); + context->skip= func_item->argument_count(); + break; + } + default: + break; + } + } + else + // Function does not return constant expression + context->supported= FALSE; + break; + } + default: + { + DBUG_PRINT("info", ("Found func_item of type %d", + func_item->functype())); + context->supported= FALSE; + } + } + break; + } + case Item::STRING_ITEM: + DBUG_PRINT("info", ("STRING_ITEM")); + if (context->expecting(Item::STRING_ITEM)) + { +#ifndef DBUG_OFF + char buff[256]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + str.length(0); + Item_string *string_item= (Item_string *) item; + DBUG_PRINT("info", ("value \"%s\"", + string_item->val_str(&str)->ptr())); +#endif + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::STRING_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(STRING_RESULT); + context->expect_collation(item->collation.collation); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + // Check that we are comparing with a field with same collation + if (!context->expecting_collation(item->collation.collation)) + { + DBUG_PRINT("info", ("Found non-matching collation %s", + item->collation.collation->name)); + context->supported= FALSE; + } + } + } + else + context->supported= FALSE; + break; + case Item::INT_ITEM: + DBUG_PRINT("info", ("INT_ITEM")); + if (context->expecting(Item::INT_ITEM)) + { + DBUG_PRINT("info", ("value %ld", + (long) ((Item_int*) item)->value)); + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::INT_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(INT_RESULT); + context->expect_field_result(REAL_RESULT); + context->expect_field_result(DECIMAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case Item::REAL_ITEM: + DBUG_PRINT("info", ("REAL_ITEM")); + if (context->expecting(Item::REAL_ITEM)) + { + DBUG_PRINT("info", ("value %f", ((Item_float *) item)->value)); + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::REAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(REAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case Item::VARBIN_ITEM: + DBUG_PRINT("info", ("VARBIN_ITEM")); + if (context->expecting(Item::VARBIN_ITEM)) + { + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::VARBIN_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(STRING_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case Item::DECIMAL_ITEM: + DBUG_PRINT("info", ("DECIMAL_ITEM")); + if (context->expecting(Item::DECIMAL_ITEM)) + { + DBUG_PRINT("info", ("value %f", + ((Item_decimal*) item)->val_real())); + NDB_ITEM_QUALIFICATION q; + q.value_type= Item::DECIMAL_ITEM; + curr_cond->ndb_item= new Ndb_item(NDB_VALUE, q, item); + if (! context->expecting_no_field_result()) + { + // We have not seen the field argument yet + context->expect_only(Item::FIELD_ITEM); + context->expect_only_field_result(REAL_RESULT); + context->expect_field_result(DECIMAL_RESULT); + } + else + { + // Expect another logical expression + context->expect_only(Item::FUNC_ITEM); + context->expect(Item::COND_ITEM); + } + } + else + context->supported= FALSE; + break; + case Item::COND_ITEM: + { + Item_cond *cond_item= (Item_cond *) item; + + if (context->expecting(Item::COND_ITEM)) + { + switch (cond_item->functype()) { + case Item_func::COND_AND_FUNC: + DBUG_PRINT("info", ("COND_AND_FUNC")); + curr_cond->ndb_item= new Ndb_item(cond_item->functype(), + cond_item); + break; + case Item_func::COND_OR_FUNC: + DBUG_PRINT("info", ("COND_OR_FUNC")); + curr_cond->ndb_item= new Ndb_item(cond_item->functype(), + cond_item); + break; + default: + DBUG_PRINT("info", ("COND_ITEM %d", cond_item->functype())); + context->supported= FALSE; + break; + } + } + else + { + /* Did not expect condition */ + context->supported= FALSE; + } + break; + } + default: + { + DBUG_PRINT("info", ("Found item of type %d", item->type())); + context->supported= FALSE; + } + } + } + if (context->supported && context->rewrite_stack) + { + Ndb_rewrite_context *rewrite_context= context->rewrite_stack; + if (rewrite_context->count == + rewrite_context->func_item->argument_count()) + { + // Rewrite is done, wrap an END() at the en + DBUG_PRINT("info", ("End of condition group")); + prev_cond= curr_cond; + curr_cond= context->cond_ptr= new Ndb_cond(); + curr_cond->prev= prev_cond; + prev_cond->next= curr_cond; + curr_cond->ndb_item= new Ndb_item(NDB_END_COND); + // Pop rewrite stack + context->rewrite_stack= rewrite_context->next; + rewrite_context->next= NULL; + delete(rewrite_context); + } + } + } + } + + DBUG_VOID_RETURN; +} + +bool +ha_ndbcluster::serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond) +{ + DBUG_ENTER("serialize_cond"); + Item *item= (Item *) cond; + Ndb_cond_traverse_context context(table, (void *)m_table, ndb_cond); + // Expect a logical expression + context.expect(Item::FUNC_ITEM); + context.expect(Item::COND_ITEM); + item->traverse_cond(&ndb_serialize_cond, (void *) &context, Item::PREFIX); + DBUG_PRINT("info", ("The pushed condition is %ssupported", (context.supported)?"":"not ")); + + DBUG_RETURN(context.supported); +} + +int +ha_ndbcluster::build_scan_filter_predicate(Ndb_cond * &cond, + NdbScanFilter *filter, + bool negated) +{ + DBUG_ENTER("build_scan_filter_predicate"); + switch (cond->ndb_item->type) { + case NDB_FUNCTION: + { + if (!cond->next) + break; + Ndb_item *a= cond->next->ndb_item; + Ndb_item *b, *field, *value= NULL; + LINT_INIT(field); + + switch (cond->ndb_item->argument_count()) { + case 1: + field= + (a->type == NDB_FIELD)? a : NULL; + break; + case 2: + if (!cond->next->next) + break; + b= cond->next->next->ndb_item; + value= + (a->type == NDB_VALUE)? a + : (b->type == NDB_VALUE)? b + : NULL; + field= + (a->type == NDB_FIELD)? a + : (b->type == NDB_FIELD)? b + : NULL; + break; + default: + break; + } + switch ((negated) ? + Ndb_item::negate(cond->ndb_item->qualification.function_type) + : cond->ndb_item->qualification.function_type) { + case NDB_EQ_FUNC: + { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating EQ filter")); + if (filter->cmp(NdbScanFilter::COND_EQ, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_NE_FUNC: + { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating NE filter")); + if (filter->cmp(NdbScanFilter::COND_NE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_LT_FUNC: + { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating LT filter")); + if (filter->cmp(NdbScanFilter::COND_LT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating GT filter")); + if (filter->cmp(NdbScanFilter::COND_GT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_LE_FUNC: + { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating LE filter")); + if (filter->cmp(NdbScanFilter::COND_LE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating GE filter")); + if (filter->cmp(NdbScanFilter::COND_GE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_GE_FUNC: + { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating GE filter")); + if (filter->cmp(NdbScanFilter::COND_GE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating LE filter")); + if (filter->cmp(NdbScanFilter::COND_LE, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_GT_FUNC: + { + if (!value || !field) break; + // Save value in right format for the field type + value->save_in_field(field); + if (a == field) + { + DBUG_PRINT("info", ("Generating GT filter")); + if (filter->cmp(NdbScanFilter::COND_GT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating LT filter")); + if (filter->cmp(NdbScanFilter::COND_LT, + field->get_field_no(), + field->get_val(), + field->pack_length()) == -1) + DBUG_RETURN(1); + } + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_LIKE_FUNC: + { + if (!value || !field) break; + if ((value->qualification.value_type != Item::STRING_ITEM) && + (value->qualification.value_type != Item::VARBIN_ITEM)) + break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating LIKE filter: like(%d,%s,%d)", + field->get_field_no(), value->get_val(), + value->pack_length())); + if (filter->cmp(NdbScanFilter::COND_LIKE, + field->get_field_no(), + value->get_val(), + value->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_NOTLIKE_FUNC: + { + if (!value || !field) break; + if ((value->qualification.value_type != Item::STRING_ITEM) && + (value->qualification.value_type != Item::VARBIN_ITEM)) + break; + // Save value in right format for the field type + value->save_in_field(field); + DBUG_PRINT("info", ("Generating NOTLIKE filter: notlike(%d,%s,%d)", + field->get_field_no(), value->get_val(), + value->pack_length())); + if (filter->cmp(NdbScanFilter::COND_NOT_LIKE, + field->get_field_no(), + value->get_val(), + value->pack_length()) == -1) + DBUG_RETURN(1); + cond= cond->next->next->next; + DBUG_RETURN(0); + } + case NDB_ISNULL_FUNC: + if (!field) + break; + DBUG_PRINT("info", ("Generating ISNULL filter")); + if (filter->isnull(field->get_field_no()) == -1) + DBUG_RETURN(1); + cond= cond->next->next; + DBUG_RETURN(0); + case NDB_ISNOTNULL_FUNC: + { + if (!field) + break; + DBUG_PRINT("info", ("Generating ISNOTNULL filter")); + if (filter->isnotnull(field->get_field_no()) == -1) + DBUG_RETURN(1); + cond= cond->next->next; + DBUG_RETURN(0); + } + default: + break; + } + break; + } + default: + break; + } + DBUG_PRINT("info", ("Found illegal condition")); + DBUG_RETURN(1); +} + + +int +ha_ndbcluster::build_scan_filter_group(Ndb_cond* &cond, NdbScanFilter *filter) +{ + uint level=0; + bool negated= FALSE; + DBUG_ENTER("build_scan_filter_group"); + + do + { + if (!cond) + DBUG_RETURN(1); + switch (cond->ndb_item->type) { + case NDB_FUNCTION: + { + switch (cond->ndb_item->qualification.function_type) { + case NDB_COND_AND_FUNC: + { + level++; + DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NAND":"AND", + level)); + if ((negated) ? filter->begin(NdbScanFilter::NAND) + : filter->begin(NdbScanFilter::AND) == -1) + DBUG_RETURN(1); + negated= FALSE; + cond= cond->next; + break; + } + case NDB_COND_OR_FUNC: + { + level++; + DBUG_PRINT("info", ("Generating %s group %u", (negated)?"NOR":"OR", + level)); + if ((negated) ? filter->begin(NdbScanFilter::NOR) + : filter->begin(NdbScanFilter::OR) == -1) + DBUG_RETURN(1); + negated= FALSE; + cond= cond->next; + break; + } + case NDB_NOT_FUNC: + { + DBUG_PRINT("info", ("Generating negated query")); + cond= cond->next; + negated= TRUE; + break; + } + default: + if (build_scan_filter_predicate(cond, filter, negated)) + DBUG_RETURN(1); + negated= FALSE; + break; + } + break; + } + case NDB_END_COND: + DBUG_PRINT("info", ("End of group %u", level)); + level--; + if (cond) cond= cond->next; + if (filter->end() == -1) + DBUG_RETURN(1); + if (!negated) + break; + // else fall through (NOT END is an illegal condition) + default: + { + DBUG_PRINT("info", ("Illegal scan filter")); + } + } + } while (level > 0 || negated); + + DBUG_RETURN(0); +} + + +int +ha_ndbcluster::build_scan_filter(Ndb_cond * &cond, NdbScanFilter *filter) +{ + bool simple_cond= TRUE; + DBUG_ENTER("build_scan_filter"); + + switch (cond->ndb_item->type) { + case NDB_FUNCTION: + switch (cond->ndb_item->qualification.function_type) { + case NDB_COND_AND_FUNC: + case NDB_COND_OR_FUNC: + simple_cond= FALSE; + break; + default: + break; + } + break; + default: + break; + } + if (simple_cond && filter->begin() == -1) + DBUG_RETURN(1); + if (build_scan_filter_group(cond, filter)) + DBUG_RETURN(1); + if (simple_cond && filter->end() == -1) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +int +ha_ndbcluster::generate_scan_filter(Ndb_cond_stack *ndb_cond_stack, + NdbScanOperation *op) +{ + DBUG_ENTER("generate_scan_filter"); + + if (ndb_cond_stack) + { + NdbScanFilter filter(op); + + DBUG_RETURN(generate_scan_filter_from_cond(ndb_cond_stack, filter)); + } + else + { + DBUG_PRINT("info", ("Empty stack")); + } + + DBUG_RETURN(0); +} + + +int +ha_ndbcluster::generate_scan_filter_from_cond(Ndb_cond_stack *ndb_cond_stack, + NdbScanFilter& filter) +{ + bool multiple_cond= FALSE; + DBUG_ENTER("generate_scan_filter_from_cond"); + + // Wrap an AND group around multiple conditions + if (ndb_cond_stack->next) + { + multiple_cond= TRUE; + if (filter.begin() == -1) + DBUG_RETURN(1); + } + for (Ndb_cond_stack *stack= ndb_cond_stack; + (stack); + stack= stack->next) + { + Ndb_cond *cond= stack->ndb_cond; + + if (build_scan_filter(cond, &filter)) + { + DBUG_PRINT("info", ("build_scan_filter failed")); + DBUG_RETURN(1); + } + } + if (multiple_cond && filter.end() == -1) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + + +int ha_ndbcluster::generate_scan_filter_from_key(NdbScanOperation *op, + const KEY* key_info, + const byte *key, + uint key_len, + byte *buf) +{ + KEY_PART_INFO* key_part= key_info->key_part; + KEY_PART_INFO* end= key_part+key_info->key_parts; + NdbScanFilter filter(op); + int res; + DBUG_ENTER("generate_scan_filter_from_key"); + + filter.begin(NdbScanFilter::AND); + for (; key_part != end; key_part++) + { + Field* field= key_part->field; + uint32 pack_len= field->pack_length(); + const byte* ptr= key; + DBUG_PRINT("info", ("Filtering value for %s", field->field_name)); + DBUG_DUMP("key", (char*)ptr, pack_len); + if (key_part->null_bit) + { + DBUG_PRINT("info", ("Generating ISNULL filter")); + if (filter.isnull(key_part->fieldnr-1) == -1) + DBUG_RETURN(1); + } + else + { + DBUG_PRINT("info", ("Generating EQ filter")); + if (filter.cmp(NdbScanFilter::COND_EQ, + key_part->fieldnr-1, + ptr, + pack_len) == -1) + DBUG_RETURN(1); + } + key += key_part->store_length; + } + // Add any pushed condition + if (m_cond_stack && + (res= generate_scan_filter_from_cond(m_cond_stack, filter))) + DBUG_RETURN(res); + + if (filter.end() == -1) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + int ndbcluster_show_status(THD* thd) { Protocol *protocol= thd->protocol; - DBUG_ENTER("ndbcluster_show_status"); if (have_ndbcluster != SHOW_OPTION_YES) { my_message(ER_NOT_SUPPORTED_YET, - "Cannot call SHOW NDBCLUSTER STATUS because skip-ndbcluster is defined", + "Cannot call SHOW NDBCLUSTER STATUS because skip-ndbcluster is " + "defined", MYF(0)); DBUG_RETURN(TRUE); } @@ -5461,14 +8224,15 @@ ndbcluster_show_status(THD* thd) field_list.push_back(new Item_return_int("free", 10,MYSQL_TYPE_LONG)); field_list.push_back(new Item_return_int("sizeof", 10,MYSQL_TYPE_LONG)); - if (protocol->send_fields(&field_list, 1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(TRUE); - if (thd->transaction.thd_ndb && - ((Thd_ndb*)thd->transaction.thd_ndb)->ndb) + if (get_thd_ndb(thd) && get_thd_ndb(thd)->ndb) { - Ndb* ndb= ((Thd_ndb*)thd->transaction.thd_ndb)->ndb; - Ndb::Free_list_usage tmp; tmp.m_name= 0; + Ndb* ndb= (get_thd_ndb(thd))->ndb; + Ndb::Free_list_usage tmp; + tmp.m_name= 0; while (ndb->get_free_list_usage(&tmp)) { protocol->prepare_for_resend(); diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index e3dbf5e26d0..274dc53e547 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -31,15 +30,16 @@ class Ndb; // Forward declaration class NdbOperation; // Forward declaration -class NdbConnection; // Forward declaration +class NdbTransaction; // Forward declaration class NdbRecAttr; // Forward declaration -class NdbResultSet; // Forward declaration class NdbScanOperation; +class NdbScanFilter; class NdbIndexScanOperation; class NdbBlob; // connectstring to cluster if given by mysqld extern const char *ndbcluster_connectstring; +extern ulong ndb_cache_check_time; typedef enum ndb_index_type { UNDEFINED_INDEX = 0, @@ -55,6 +55,7 @@ typedef struct ndb_index_data { void *index; void *unique_index; unsigned char *unique_index_attrid_map; + bool null_in_unique_index; } NDB_INDEX_DATA; typedef struct st_ndbcluster_share { @@ -62,20 +63,443 @@ typedef struct st_ndbcluster_share { pthread_mutex_t mutex; char *table_name; uint table_name_length,use_count; + uint commit_count_lock; + ulonglong commit_count; } NDB_SHARE; +typedef enum ndb_item_type { + NDB_VALUE = 0, // Qualified more with Item::Type + NDB_FIELD = 1, // Qualified from table definition + NDB_FUNCTION = 2,// Qualified from Item_func::Functype + NDB_END_COND = 3 // End marker for condition group +} NDB_ITEM_TYPE; + +typedef enum ndb_func_type { + NDB_EQ_FUNC = 0, + NDB_NE_FUNC = 1, + NDB_LT_FUNC = 2, + NDB_LE_FUNC = 3, + NDB_GT_FUNC = 4, + NDB_GE_FUNC = 5, + NDB_ISNULL_FUNC = 6, + NDB_ISNOTNULL_FUNC = 7, + NDB_LIKE_FUNC = 8, + NDB_NOTLIKE_FUNC = 9, + NDB_NOT_FUNC = 10, + NDB_UNKNOWN_FUNC = 11, + NDB_COND_AND_FUNC = 12, + NDB_COND_OR_FUNC = 13, + NDB_UNSUPPORTED_FUNC = 14 +} NDB_FUNC_TYPE; + +typedef union ndb_item_qualification { + Item::Type value_type; + enum_field_types field_type; // Instead of Item::FIELD_ITEM + NDB_FUNC_TYPE function_type; // Instead of Item::FUNC_ITEM +} NDB_ITEM_QUALIFICATION; + +typedef struct ndb_item_field_value { + Field* field; + int column_no; +} NDB_ITEM_FIELD_VALUE; + +typedef union ndb_item_value { + const Item *item; + NDB_ITEM_FIELD_VALUE *field_value; + uint arg_count; +} NDB_ITEM_VALUE; + +struct negated_function_mapping +{ + NDB_FUNC_TYPE pos_fun; + NDB_FUNC_TYPE neg_fun; +}; + +/* + Define what functions can be negated in condition pushdown. + Note, these HAVE to be in the same order as in definition enum +*/ +static const negated_function_mapping neg_map[]= +{ + {NDB_EQ_FUNC, NDB_NE_FUNC}, + {NDB_NE_FUNC, NDB_EQ_FUNC}, + {NDB_LT_FUNC, NDB_GE_FUNC}, + {NDB_LE_FUNC, NDB_GT_FUNC}, + {NDB_GT_FUNC, NDB_LE_FUNC}, + {NDB_GE_FUNC, NDB_LT_FUNC}, + {NDB_ISNULL_FUNC, NDB_ISNOTNULL_FUNC}, + {NDB_ISNOTNULL_FUNC, NDB_ISNULL_FUNC}, + {NDB_LIKE_FUNC, NDB_NOTLIKE_FUNC}, + {NDB_NOTLIKE_FUNC, NDB_LIKE_FUNC}, + {NDB_NOT_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_UNKNOWN_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_COND_AND_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_COND_OR_FUNC, NDB_UNSUPPORTED_FUNC}, + {NDB_UNSUPPORTED_FUNC, NDB_UNSUPPORTED_FUNC} +}; + +/* + This class is the construction element for serialization of Item tree + in condition pushdown. + An instance of Ndb_Item represents a constant, table field reference, + unary or binary comparison predicate, and start/end of AND/OR. + Instances of Ndb_Item are stored in a linked list implemented by Ndb_cond + class. + The order of elements produced by Ndb_cond::next corresponds to + breadth-first traversal of the Item (i.e. expression) tree in prefix order. + AND and OR have arbitrary arity, so the end of AND/OR group is marked with + Ndb_item with type == NDB_END_COND. + NOT items represent negated conditions and generate NAND/NOR groups. +*/ +class Ndb_item { + public: + Ndb_item(NDB_ITEM_TYPE item_type) : type(item_type) {}; + Ndb_item(NDB_ITEM_TYPE item_type, + NDB_ITEM_QUALIFICATION item_qualification, + const Item *item_value) + : type(item_type), qualification(item_qualification) + { + switch(item_type) { + case(NDB_VALUE): + value.item= item_value; + break; + case(NDB_FIELD): { + NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE(); + Item_field *field_item= (Item_field *) item_value; + field_value->field= field_item->field; + field_value->column_no= -1; // Will be fetched at scan filter generation + value.field_value= field_value; + break; + } + case(NDB_FUNCTION): + value.item= item_value; + value.arg_count= ((Item_func *) item_value)->argument_count(); + break; + case(NDB_END_COND): + break; + } + }; + Ndb_item(Field *field, int column_no) : type(NDB_FIELD) + { + NDB_ITEM_FIELD_VALUE *field_value= new NDB_ITEM_FIELD_VALUE(); + qualification.field_type= field->type(); + field_value->field= field; + field_value->column_no= column_no; + value.field_value= field_value; + }; + Ndb_item(Item_func::Functype func_type, const Item *item_value) + : type(NDB_FUNCTION) + { + qualification.function_type= item_func_to_ndb_func(func_type); + value.item= item_value; + value.arg_count= ((Item_func *) item_value)->argument_count(); + }; + Ndb_item(Item_func::Functype func_type, uint no_args) + : type(NDB_FUNCTION) + { + qualification.function_type= item_func_to_ndb_func(func_type); + value.arg_count= no_args; + }; + ~Ndb_item() + { + if (type == NDB_FIELD) + { + delete value.field_value; + value.field_value= NULL; + } + }; + + uint32 pack_length() + { + switch(type) { + case(NDB_VALUE): + if(qualification.value_type == Item::STRING_ITEM) + return value.item->str_value.length(); + break; + case(NDB_FIELD): + return value.field_value->field->pack_length(); + default: + break; + } + + return 0; + }; + + Field * get_field() { return value.field_value->field; }; + + int get_field_no() { return value.field_value->column_no; }; + + int argument_count() + { + return value.arg_count; + }; + + const char* get_val() + { + switch(type) { + case(NDB_VALUE): + if(qualification.value_type == Item::STRING_ITEM) + return value.item->str_value.ptr(); + break; + case(NDB_FIELD): + return value.field_value->field->ptr; + default: + break; + } + + return NULL; + }; + + void save_in_field(Ndb_item *field_item) + { + Field *field = field_item->value.field_value->field; + const Item *item= value.item; + + if (item && field) + ((Item *)item)->save_in_field(field, false); + }; + + static NDB_FUNC_TYPE item_func_to_ndb_func(Item_func::Functype fun) + { + switch (fun) { + case (Item_func::EQ_FUNC): { return NDB_EQ_FUNC; } + case (Item_func::NE_FUNC): { return NDB_NE_FUNC; } + case (Item_func::LT_FUNC): { return NDB_LT_FUNC; } + case (Item_func::LE_FUNC): { return NDB_LE_FUNC; } + case (Item_func::GT_FUNC): { return NDB_GT_FUNC; } + case (Item_func::GE_FUNC): { return NDB_GE_FUNC; } + case (Item_func::ISNULL_FUNC): { return NDB_ISNULL_FUNC; } + case (Item_func::ISNOTNULL_FUNC): { return NDB_ISNOTNULL_FUNC; } + case (Item_func::LIKE_FUNC): { return NDB_LIKE_FUNC; } + case (Item_func::NOT_FUNC): { return NDB_NOT_FUNC; } + case (Item_func::UNKNOWN_FUNC): { return NDB_UNKNOWN_FUNC; } + case (Item_func::COND_AND_FUNC): { return NDB_COND_AND_FUNC; } + case (Item_func::COND_OR_FUNC): { return NDB_COND_OR_FUNC; } + default: { return NDB_UNSUPPORTED_FUNC; } + } + }; + + static NDB_FUNC_TYPE negate(NDB_FUNC_TYPE fun) + { + uint i= (uint) fun; + DBUG_ASSERT(fun == neg_map[i].pos_fun); + return neg_map[i].neg_fun; + }; + + NDB_ITEM_TYPE type; + NDB_ITEM_QUALIFICATION qualification; + private: + NDB_ITEM_VALUE value; +}; + +/* + This class implements a linked list used for storing a + serialization of the Item tree for condition pushdown. + */ +class Ndb_cond +{ + public: + Ndb_cond() : ndb_item(NULL), next(NULL), prev(NULL) {}; + ~Ndb_cond() + { + if (ndb_item) delete ndb_item; + ndb_item= NULL; + if (next) delete next; + next= prev= NULL; + }; + Ndb_item *ndb_item; + Ndb_cond *next; + Ndb_cond *prev; +}; + +/* + This class implements a stack for storing several conditions + for pushdown (represented as serialized Item trees using Ndb_cond). + The current implementation only pushes one condition, but is + prepared for handling several (C1 AND C2 ...) if the logic for + pushing conditions is extended in sql_select. +*/ +class Ndb_cond_stack +{ + public: + Ndb_cond_stack() : ndb_cond(NULL), next(NULL) {}; + ~Ndb_cond_stack() + { + if (ndb_cond) delete ndb_cond; + ndb_cond= NULL; + if (next) delete next; + next= NULL; + }; + Ndb_cond *ndb_cond; + Ndb_cond_stack *next; +}; + +class Ndb_rewrite_context +{ +public: + Ndb_rewrite_context(Item_func *func) + : func_item(func), left_hand_item(NULL), count(0) {}; + ~Ndb_rewrite_context() + { + if (next) delete next; + } + const Item_func *func_item; + const Item *left_hand_item; + uint count; + Ndb_rewrite_context *next; +}; + +/* + This class is used for storing the context when traversing + the Item tree. It stores a reference to the table the condition + is defined on, the serialized representation being generated, + if the condition found is supported, and information what is + expected next in the tree inorder for the condition to be supported. +*/ +class Ndb_cond_traverse_context +{ + public: + Ndb_cond_traverse_context(TABLE *tab, void* ndb_tab, Ndb_cond_stack* stack) + : table(tab), ndb_table(ndb_tab), + supported(TRUE), stack_ptr(stack), cond_ptr(NULL), + skip(0), collation(NULL), rewrite_stack(NULL) + { + // Allocate type checking bitmaps + bitmap_init(&expect_mask, 0, 512, FALSE); + bitmap_init(&expect_field_type_mask, 0, 512, FALSE); + bitmap_init(&expect_field_result_mask, 0, 512, FALSE); + + if (stack) + cond_ptr= stack->ndb_cond; + }; + ~Ndb_cond_traverse_context() + { + bitmap_free(&expect_mask); + bitmap_free(&expect_field_type_mask); + bitmap_free(&expect_field_result_mask); + if (rewrite_stack) delete rewrite_stack; + } + void expect(Item::Type type) + { + bitmap_set_bit(&expect_mask, (uint) type); + if (type == Item::FIELD_ITEM) expect_all_field_types(); + }; + void dont_expect(Item::Type type) + { + bitmap_clear_bit(&expect_mask, (uint) type); + }; + bool expecting(Item::Type type) + { + return bitmap_is_set(&expect_mask, (uint) type); + }; + void expect_nothing() + { + bitmap_clear_all(&expect_mask); + }; + bool expecting_nothing() + { + return bitmap_is_clear_all(&expect_mask); + } + void expect_only(Item::Type type) + { + expect_nothing(); + expect(type); + }; + + void expect_field_type(enum_field_types type) + { + bitmap_set_bit(&expect_field_type_mask, (uint) type); + }; + void expect_all_field_types() + { + bitmap_set_all(&expect_field_type_mask); + }; + bool expecting_field_type(enum_field_types type) + { + return bitmap_is_set(&expect_field_type_mask, (uint) type); + }; + void expect_no_field_type() + { + bitmap_clear_all(&expect_field_type_mask); + }; + bool expecting_no_field_type() + { + return bitmap_is_clear_all(&expect_field_type_mask); + } + void expect_only_field_type(enum_field_types result) + { + expect_no_field_type(); + expect_field_type(result); + }; + + void expect_field_result(Item_result result) + { + bitmap_set_bit(&expect_field_result_mask, (uint) result); + }; + bool expecting_field_result(Item_result result) + { + return bitmap_is_set(&expect_field_result_mask, (uint) result); + }; + void expect_no_field_result() + { + bitmap_clear_all(&expect_field_result_mask); + }; + bool expecting_no_field_result() + { + return bitmap_is_clear_all(&expect_field_result_mask); + } + void expect_only_field_result(Item_result result) + { + expect_no_field_result(); + expect_field_result(result); + }; + void expect_collation(CHARSET_INFO* col) + { + collation= col; + }; + bool expecting_collation(CHARSET_INFO* col) + { + bool matching= (!collation) ? true : (collation == col); + collation= NULL; + + return matching; + }; + + TABLE* table; + void* ndb_table; + bool supported; + Ndb_cond_stack* stack_ptr; + Ndb_cond* cond_ptr; + MY_BITMAP expect_mask; + MY_BITMAP expect_field_type_mask; + MY_BITMAP expect_field_result_mask; + uint skip; + CHARSET_INFO* collation; + Ndb_rewrite_context *rewrite_stack; +}; + +typedef enum ndb_query_state_bits { + NDB_QUERY_NORMAL = 0, + NDB_QUERY_MULTI_READ_RANGE = 1 +} NDB_QUERY_STATE_BITS; + /* Place holder for ha_ndbcluster thread specific data */ -class Thd_ndb { +class Thd_ndb +{ public: Thd_ndb(); ~Thd_ndb(); Ndb *ndb; ulong count; uint lock_count; + NdbTransaction *all; + NdbTransaction *stmt; int error; + List<NDB_SHARE> changed_tables; + uint query_state; }; class ha_ndbcluster: public handler @@ -93,35 +517,45 @@ class ha_ndbcluster: public handler int index_init(uint index); int index_end(); int index_read(byte *buf, const byte *key, uint key_len, - enum ha_rkey_function find_flag); + enum ha_rkey_function find_flag); int index_read_idx(byte *buf, uint index, const byte *key, uint key_len, - enum ha_rkey_function find_flag); + enum ha_rkey_function find_flag); int index_next(byte *buf); int index_prev(byte *buf); int index_first(byte *buf); int index_last(byte *buf); + int index_read_last(byte * buf, const byte * key, uint key_len); int rnd_init(bool scan); int rnd_end(); int rnd_next(byte *buf); int rnd_pos(byte *buf, byte *pos); void position(const byte *record); int read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted); + const key_range *end_key, + bool eq_range, bool sorted); int read_range_first_to_buf(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted, - byte* buf); + const key_range *end_key, + bool eq_range, bool sorted, + byte* buf); int read_range_next(); + /** + * Multi range stuff + */ + int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE*ranges, uint range_count, + bool sorted, HANDLER_BUFFER *buffer); + int read_multi_range_next(KEY_MULTI_RANGE **found_range_p); + bool null_value_index_search(KEY_MULTI_RANGE *ranges, + KEY_MULTI_RANGE *end_range, + HANDLER_BUFFER *buffer); bool get_error_message(int error, String *buf); int info(uint); int extra(enum ha_extra_function operation); int extra_opt(enum ha_extra_function operation, ulong cache_size); - int reset(); int external_lock(THD *thd, int lock_type); void unlock_row(); - int start_stmt(THD *thd); + int start_stmt(THD *thd, thr_lock_type lock_type); const char * table_type() const; const char ** bas_ext() const; ulong table_flags(void) const; @@ -130,13 +564,14 @@ class ha_ndbcluster: public handler uint max_supported_keys() const; uint max_supported_key_parts() const; uint max_supported_key_length() const; + uint max_supported_key_part_length() const; int rename_table(const char *from, const char *to); int delete_table(const char *name); int create(const char *name, TABLE *form, HA_CREATE_INFO *info); THR_LOCK_DATA **store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type); + THR_LOCK_DATA **to, + enum thr_lock_type lock_type); bool low_byte_first() const; bool has_transactions(); @@ -149,17 +584,61 @@ class ha_ndbcluster: public handler static Thd_ndb* seize_thd_ndb(); static void release_thd_ndb(Thd_ndb* thd_ndb); - uint8 table_cache_type(); - static void set_dbname(const char *pathname, char *dbname); - static void set_tabname(const char *pathname, char *tabname); +static void set_dbname(const char *pathname, char *dbname); +static void set_tabname(const char *pathname, char *tabname); + + /* + Condition pushdown + */ + + /* + Push condition down to the table handler. + SYNOPSIS + cond_push() + cond Condition to be pushed. The condition tree must not be + modified by the by the caller. + RETURN + The 'remainder' condition that caller must use to filter out records. + NULL means the handler will not return rows that do not match the + passed condition. + NOTES + The pushed conditions form a stack (from which one can remove the + last pushed condition using cond_pop). + The table handler filters out rows using (pushed_cond1 AND pushed_cond2 + AND ... AND pushed_condN) + or less restrictive condition, depending on handler's capabilities. + + handler->extra(HA_EXTRA_RESET) call empties the condition stack. + Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the + condition stack. + The current implementation supports arbitrary AND/OR nested conditions + with comparisons between columns and constants (including constant + expressions and function calls) and the following comparison operators: + =, !=, >, >=, <, <=, like, "not like", "is null", and "is not null". + Negated conditions are supported by NOT which generate NAND/NOR groups. + */ + const COND *cond_push(const COND *cond); + /* + Pop the top condition from the condition stack of the handler instance. + SYNOPSIS + cond_pop() + Pops the top if condition stack, if stack is not empty + */ + void cond_pop(); + + uint8 table_cache_type(); /* * Internal to ha_ndbcluster, used by C functions */ - int ndb_err(NdbConnection*); + int ndb_err(NdbTransaction*); - private: + my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback *engine_callback, + ulonglong *engine_data); +private: int alter_table_name(const char *to); int drop_table(); int create_index(const char *name, KEY *key_info, bool unique); @@ -172,22 +651,33 @@ class ha_ndbcluster: public handler void release_metadata(); NDB_INDEX_TYPE get_index_type(uint idx_no) const; NDB_INDEX_TYPE get_index_type_from_table(uint index_no) const; - int check_index_fields_not_null(uint index_no); + bool has_null_in_unique_index(uint idx_no) const; + bool check_index_fields_not_null(uint index_no); int pk_read(const byte *key, uint key_len, byte *buf); int complemented_pk_read(const byte *old_data, byte *new_data); - int peek_row(const byte *record); + bool check_all_operations_for_error(NdbTransaction *trans, + const NdbOperation *first, + const NdbOperation *last, + uint errcode); + int peek_indexed_rows(const byte *record, bool check_pk); int unique_index_read(const byte *key, uint key_len, - byte *buf); + byte *buf); int ordered_index_scan(const key_range *start_key, - const key_range *end_key, - bool sorted, byte* buf); + const key_range *end_key, + bool sorted, bool descending, byte* buf); + int unique_index_scan(const KEY* key_info, + const byte *key, + uint key_len, + byte *buf); + int full_table_scan(byte * buf); + int fetch_next(NdbScanOperation* op); int next_result(byte *buf); int define_read_attrs(byte* buf, NdbOperation* op); int filtered_scan(const byte *key, uint key_len, - byte *buf, - enum ha_rkey_function find_flag); + byte *buf, + enum ha_rkey_function find_flag); int close_scan(); void unpack_record(byte *buf); int get_ndb_lock_type(enum thr_lock_type type); @@ -196,30 +686,67 @@ class ha_ndbcluster: public handler void set_tabname(const char *pathname); bool set_hidden_key(NdbOperation*, - uint fieldnr, const byte* field_ptr); + uint fieldnr, const byte* field_ptr); int set_ndb_key(NdbOperation*, Field *field, - uint fieldnr, const byte* field_ptr); + uint fieldnr, const byte* field_ptr); int set_ndb_value(NdbOperation*, Field *field, uint fieldnr, bool *set_blob_value= 0); int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*); friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg); - int get_ndb_blobs_value(NdbBlob *last_ndb_blob); + int get_ndb_blobs_value(NdbBlob *last_ndb_blob, my_ptrdiff_t ptrdiff); int set_primary_key(NdbOperation *op, const byte *key); int set_primary_key_from_record(NdbOperation *op, const byte *record); - int set_bounds(NdbIndexScanOperation *ndb_op, const key_range *keys[2]); + int set_index_key_from_record(NdbOperation *op, const byte *record, + uint keyno); + int set_bounds(NdbIndexScanOperation*, const key_range *keys[2], uint= 0); int key_cmp(uint keynr, const byte * old_row, const byte * new_row); + int set_index_key(NdbOperation *, const KEY *key_info, const byte *key_ptr); void print_results(); - longlong get_auto_increment(); + ulonglong get_auto_increment(); void invalidate_dictionary_cache(bool global); - bool uses_blob_value(bool all_fields); + +bool uses_blob_value(bool all_fields); + + char *update_table_comment(const char * comment); int write_ndb_file(); - private: - int check_ndb_connection(); + int check_ndb_connection(THD* thd= current_thd); + + void set_rec_per_key(); + int records_update(); + void no_uncommitted_rows_execute_failure(); + void no_uncommitted_rows_update(int); + void no_uncommitted_rows_init(THD *); + void no_uncommitted_rows_reset(THD *); - NdbConnection *m_active_trans; - NdbResultSet *m_active_cursor; + /* + Condition pushdown + */ + void cond_clear(); + bool serialize_cond(const COND *cond, Ndb_cond_stack *ndb_cond); + int build_scan_filter_predicate(Ndb_cond* &cond, + NdbScanFilter* filter, + bool negated= false); + int build_scan_filter_group(Ndb_cond* &cond, + NdbScanFilter* filter); + int build_scan_filter(Ndb_cond* &cond, NdbScanFilter* filter); + int generate_scan_filter(Ndb_cond_stack* cond_stack, + NdbScanOperation* op); + int generate_scan_filter_from_cond(Ndb_cond_stack* cond_stack, + NdbScanFilter& filter); + int generate_scan_filter_from_key(NdbScanOperation* op, + const KEY* key_info, + const byte *key, + uint key_len, + byte *buf); + + friend int execute_commit(ha_ndbcluster*, NdbTransaction*); + friend int execute_no_commit(ha_ndbcluster*, NdbTransaction*, bool); + friend int execute_no_commit_ie(ha_ndbcluster*, NdbTransaction*, bool); + + NdbTransaction *m_active_trans; + NdbScanOperation *m_active_cursor; void *m_table; int m_table_version; void *m_table_info; @@ -232,21 +759,24 @@ class ha_ndbcluster: public handler NDB_SHARE *m_share; NDB_INDEX_DATA m_index[MAX_KEY]; // NdbRecAttr has no reference to blob - typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue; + typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue; NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE]; byte m_ref[NDB_HIDDEN_PRIMARY_KEY_LENGTH]; bool m_use_write; bool m_ignore_dup_key; + bool m_has_unique_index; bool m_primary_key_update; bool m_retrieve_all_fields; bool m_retrieve_primary_key; ha_rows m_rows_to_insert; ha_rows m_rows_inserted; ha_rows m_bulk_insert_rows; + ha_rows m_rows_changed; bool m_bulk_insert_not_flushed; ha_rows m_ops_pending; bool m_skip_auto_increment; bool m_blobs_pending; + my_ptrdiff_t m_blobs_offset; // memory for blobs in one tuple char *m_blobs_buffer; uint32 m_blobs_buffer_size; @@ -256,35 +786,29 @@ class ha_ndbcluster: public handler bool m_force_send; ha_rows m_autoincrement_prefetch; bool m_transaction_on; - bool m_use_local_query_cache; - + void release_completed_operations(NdbTransaction*, bool); + + Ndb_cond_stack *m_cond_stack; + bool m_disable_multi_read; + byte *m_multi_range_result_ptr; + KEY_MULTI_RANGE *m_multi_ranges; + KEY_MULTI_RANGE *m_multi_range_defined; + const NdbOperation *m_current_multi_operation; + NdbIndexScanOperation *m_multi_cursor; + byte *m_multi_range_cursor_result_ptr; + int setup_recattr(const NdbRecAttr*); Ndb *get_ndb(); - void set_rec_per_key(); - int records_update(); - void no_uncommitted_rows_execute_failure(); - void no_uncommitted_rows_update(int); - void no_uncommitted_rows_init(THD *); - void no_uncommitted_rows_reset(THD *); - - void release_completed_operations(NdbConnection*); - - friend int execute_no_commit(ha_ndbcluster*, NdbConnection*); - friend int execute_commit(ha_ndbcluster*, NdbConnection*); - friend int execute_no_commit_ie(ha_ndbcluster*, NdbConnection*); }; +extern struct show_var_st ndb_status_variables[]; + bool ndbcluster_init(void); bool ndbcluster_end(void); -int ndbcluster_commit(THD *thd, void* ndb_transaction); -int ndbcluster_rollback(THD *thd, void* ndb_transaction); - -void ndbcluster_close_connection(THD *thd); - int ndbcluster_discover(THD* thd, const char* dbname, const char* name, - const void** frmblob, uint* frmlen); + const void** frmblob, uint* frmlen); int ndbcluster_find_files(THD *thd,const char *db,const char *path, - const char *wild, bool dir, List<char> *files); + const char *wild, bool dir, List<char> *files); int ndbcluster_table_exists_in_engine(THD* thd, const char *db, const char *name); int ndbcluster_drop_database(const char* path); diff --git a/sql/handler.cc b/sql/handler.cc index 82fff72f0da..6cba079e736 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -25,87 +24,143 @@ #include "ha_heap.h" #include "ha_myisam.h" #include "ha_myisammrg.h" -#ifdef HAVE_ISAM -#include "ha_isam.h" -#include "ha_isammrg.h" -#endif + + +/* + We have dummy hanldertons in case the handler has not been compiled + in. This will be removed in 5.1. +*/ #ifdef HAVE_BERKELEY_DB #include "ha_berkeley.h" +extern handlerton berkeley_hton; +#else +handlerton berkeley_hton = { "BerkeleyDB", SHOW_OPTION_NO, + "Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB, NULL, + 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, HTON_NO_FLAGS }; #endif #ifdef HAVE_BLACKHOLE_DB #include "ha_blackhole.h" +extern handlerton blackhole_hton; +#else +handlerton blackhole_hton = { "BLACKHOLE", SHOW_OPTION_NO, + "/dev/null storage engine (anything you write to it disappears)", + DB_TYPE_BLACKHOLE_DB, NULL, 0, 0, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + HTON_NO_FLAGS }; #endif #ifdef HAVE_EXAMPLE_DB #include "examples/ha_example.h" +extern handlerton example_hton; +#else +handlerton example_hton = { "EXAMPLE", SHOW_OPTION_NO, + "Example storage engine", + DB_TYPE_EXAMPLE_DB, NULL, 0, 0, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + HTON_NO_FLAGS }; #endif -#ifdef HAVE_ARCHIVE_DB -#include "examples/ha_archive.h" +#if defined(HAVE_ARCHIVE_DB) +#include "ha_archive.h" +extern handlerton archive_hton; +#else +handlerton archive_hton = { "ARCHIVE", SHOW_OPTION_NO, + "Archive storage engine", DB_TYPE_ARCHIVE_DB, NULL, 0, 0, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + HTON_NO_FLAGS }; #endif #ifdef HAVE_CSV_DB #include "examples/ha_tina.h" +extern handlerton tina_hton; +#else +handlerton tina_hton = { "CSV", SHOW_OPTION_NO, "CSV storage engine", + DB_TYPE_CSV_DB, NULL, 0, 0, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + HTON_NO_FLAGS }; #endif #ifdef HAVE_INNOBASE_DB #include "ha_innodb.h" +extern handlerton innobase_hton; +#else +handlerton innobase_hton = { "InnoDB", SHOW_OPTION_NO, + "Supports transactions, row-level locking, and foreign keys", + DB_TYPE_INNODB, NULL, 0, 0, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + HTON_NO_FLAGS }; #endif #ifdef HAVE_NDBCLUSTER_DB #include "ha_ndbcluster.h" +extern handlerton ndbcluster_hton; +#else +handlerton ndbcluster_hton = { "ndbcluster", SHOW_OPTION_NO, + "Clustered, fault-tolerant, memory-based tables", + DB_TYPE_NDBCLUSTER, NULL, 0, 0, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + HTON_NO_FLAGS }; +#endif +#ifdef HAVE_FEDERATED_DB +#include "ha_federated.h" +extern handlerton federated_hton; +#else +handlerton federated_hton = { "FEDERATED", SHOW_OPTION_NO, + "Federated MySQL storage engine", DB_TYPE_FEDERATED_DB, NULL, 0, 0, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + HTON_NO_FLAGS }; #endif #include <myisampack.h> #include <errno.h> - /* static functions defined in this file */ - -static int NEAR_F delete_file(const char *name,const char *ext,int extflag); - -ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count, - ha_read_key_count, ha_read_next_count, ha_read_prev_count, - ha_read_first_count, ha_read_last_count, - ha_commit_count, ha_rollback_count, - ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count; - -static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES; - -struct show_table_type_st sys_table_types[]= -{ - {"MyISAM", &have_yes, - "Default engine as of MySQL 3.23 with great performance", DB_TYPE_MYISAM}, - {"HEAP", &have_yes, - "Alias for MEMORY", DB_TYPE_HEAP}, - {"MEMORY", &have_yes, - "Hash based, stored in memory, useful for temporary tables", DB_TYPE_HEAP}, - {"MERGE", &have_merge_db, - "Collection of identical MyISAM tables", DB_TYPE_MRG_MYISAM}, - {"MRG_MYISAM",&have_merge_db, - "Alias for MERGE", DB_TYPE_MRG_MYISAM}, - {"ISAM", &have_isam, - "Obsolete storage engine, now replaced by MyISAM", DB_TYPE_ISAM}, - {"MRG_ISAM", &have_isam, - "Obsolete storage engine, now replaced by MERGE", DB_TYPE_MRG_ISAM}, - {"InnoDB", &have_innodb, - "Supports transactions, row-level locking, and foreign keys", DB_TYPE_INNODB}, - {"INNOBASE", &have_innodb, - "Alias for INNODB", DB_TYPE_INNODB}, - {"BDB", &have_berkeley_db, - "Supports transactions and page-level locking", DB_TYPE_BERKELEY_DB}, - {"BERKELEYDB",&have_berkeley_db, - "Alias for BDB", DB_TYPE_BERKELEY_DB}, - {"NDBCLUSTER", &have_ndbcluster, - "Clustered, fault-tolerant, memory-based tables", DB_TYPE_NDBCLUSTER}, - {"NDB", &have_ndbcluster, - "Alias for NDBCLUSTER", DB_TYPE_NDBCLUSTER}, - {"EXAMPLE",&have_example_db, - "Example storage engine", DB_TYPE_EXAMPLE_DB}, - {"ARCHIVE",&have_archive_db, - "Archive storage engine", DB_TYPE_ARCHIVE_DB}, - {"CSV",&have_csv_db, - "CSV storage engine", DB_TYPE_CSV_DB}, - {"BLACKHOLE",&have_blackhole_db, - "Storage engine designed to act as null storage", DB_TYPE_BLACKHOLE_DB}, - {NullS, NULL, NullS, DB_TYPE_UNKNOWN} +extern handlerton myisam_hton; +extern handlerton myisammrg_hton; +extern handlerton heap_hton; +extern handlerton binlog_hton; + +/* + Obsolete +*/ +handlerton isam_hton = { "ISAM", SHOW_OPTION_NO, "Obsolete storage engine", + DB_TYPE_ISAM, NULL, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, HTON_NO_FLAGS }; + +/* number of entries in handlertons[] */ +ulong total_ha; +/* number of storage engines (from handlertons[]) that support 2pc */ +ulong total_ha_2pc; +/* size of savepoint storage area (see ha_init) */ +ulong savepoint_alloc_size; + +/* + This array is used for processing compiled in engines. +*/ +handlerton *sys_table_types[]= +{ + &myisam_hton, + &heap_hton, + &innobase_hton, + &berkeley_hton, + &blackhole_hton, + &example_hton, + &archive_hton, + &tina_hton, + &ndbcluster_hton, + &federated_hton, + &myisammrg_hton, + &binlog_hton, + &isam_hton, + NULL +}; + +struct show_table_alias_st sys_table_aliases[]= +{ + {"INNOBASE", "InnoDB"}, + {"NDB", "NDBCLUSTER"}, + {"BDB", "BERKELEYDB"}, + {"HEAP", "MEMORY"}, + {"MERGE", "MRG_MYISAM"}, + {NullS, NullS} }; const char *ha_row_type[] = { - "", "FIXED", "DYNAMIC", "COMPRESSED","?","?","?" + "", "FIXED", "DYNAMIC", "COMPRESSED", "REDUNDANT", "COMPACT", "?","?","?" }; const char *tx_isolation_names[] = @@ -119,53 +174,99 @@ uint known_extensions_id= 0; enum db_type ha_resolve_by_name(const char *name, uint namelen) { - THD *thd=current_thd; - if (thd && !my_strcasecmp(&my_charset_latin1, name, "DEFAULT")) { + THD *thd= current_thd; + show_table_alias_st *table_alias; + handlerton **types; + + if (thd && !my_strnncoll(&my_charset_latin1, + (const uchar *)name, namelen, + (const uchar *)"DEFAULT", 7)) return (enum db_type) thd->variables.table_type; + +retest: + for (types= sys_table_types; *types; types++) + { + if (!my_strnncoll(&my_charset_latin1, + (const uchar *)name, namelen, + (const uchar *)(*types)->name, strlen((*types)->name))) + return (enum db_type) (*types)->db_type; } - - show_table_type_st *types; - for (types= sys_table_types; types->type; types++) + + /* + We check for the historical aliases. + */ + for (table_alias= sys_table_aliases; table_alias->type; table_alias++) { - if (!my_strcasecmp(&my_charset_latin1, name, types->type)) - return (enum db_type) types->db_type; + if (!my_strnncoll(&my_charset_latin1, + (const uchar *)name, namelen, + (const uchar *)table_alias->alias, + strlen(table_alias->alias))) + { + name= table_alias->type; + namelen= strlen(name); + goto retest; + } } + return DB_TYPE_UNKNOWN; } + const char *ha_get_storage_engine(enum db_type db_type) { - show_table_type_st *types; - for (types= sys_table_types; types->type; types++) + handlerton **types; + for (types= sys_table_types; *types; types++) { - if (db_type == types->db_type) - return types->type; + if (db_type == (*types)->db_type) + return (*types)->name; } - - return "none"; + return "*NONE*"; +} + + +bool ha_check_storage_engine_flag(enum db_type db_type, uint32 flag) +{ + handlerton **types; + for (types= sys_table_types; *types; types++) + { + if (db_type == (*types)->db_type) + return test((*types)->flags & flag); + } + return FALSE; // No matching engine } my_bool ha_storage_engine_is_enabled(enum db_type database_type) { - show_table_type_st *types; - for (types= sys_table_types; types->type; types++) + handlerton **types; + for (types= sys_table_types; *types; types++) { - if ((database_type == types->db_type) && - (*types->value == SHOW_OPTION_YES)) + if ((database_type == (*types)->db_type) && + ((*types)->state == SHOW_OPTION_YES)) return TRUE; } return FALSE; } - /* Use other database handler if databasehandler is not incompiled */ +/* Use other database handler if databasehandler is not compiled in */ -enum db_type ha_checktype(enum db_type database_type) +enum db_type ha_checktype(THD *thd, enum db_type database_type, + bool no_substitute, bool report_error) { if (ha_storage_engine_is_enabled(database_type)) return database_type; + if (no_substitute) + { + if (report_error) + { + const char *engine_name= ha_get_storage_engine(database_type); + my_error(ER_FEATURE_DISABLED,MYF(0),engine_name,engine_name); + } + return DB_TYPE_UNKNOWN; + } + switch (database_type) { #ifndef NO_HASH case DB_TYPE_HASH: @@ -176,136 +277,221 @@ enum db_type ha_checktype(enum db_type database_type) default: break; } - - return - DB_TYPE_UNKNOWN != (enum db_type) current_thd->variables.table_type ? - (enum db_type) current_thd->variables.table_type : - DB_TYPE_UNKNOWN != (enum db_type) global_system_variables.table_type ? - (enum db_type) global_system_variables.table_type : - DB_TYPE_MYISAM; + + return ((enum db_type) thd->variables.table_type != DB_TYPE_UNKNOWN ? + (enum db_type) thd->variables.table_type : + ((enum db_type) global_system_variables.table_type != + DB_TYPE_UNKNOWN ? + (enum db_type) global_system_variables.table_type : DB_TYPE_MYISAM) + ); } /* ha_checktype */ -handler *get_new_handler(TABLE *table, enum db_type db_type) +handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type) { switch (db_type) { #ifndef NO_HASH case DB_TYPE_HASH: - return new ha_hash(table); + return new (alloc) ha_hash(table); #endif -#ifdef HAVE_ISAM - case DB_TYPE_MRG_ISAM: - return new ha_isammrg(table); - case DB_TYPE_ISAM: - return new ha_isam(table); -#else + case DB_TYPE_MRG_MYISAM: case DB_TYPE_MRG_ISAM: - return new ha_myisammrg(table); -#endif + if (have_merge_db == SHOW_OPTION_YES) + return new (alloc) ha_myisammrg(table); + return NULL; #ifdef HAVE_BERKELEY_DB case DB_TYPE_BERKELEY_DB: - return new ha_berkeley(table); + if (have_berkeley_db == SHOW_OPTION_YES) + return new (alloc) ha_berkeley(table); + return NULL; #endif #ifdef HAVE_INNOBASE_DB case DB_TYPE_INNODB: - return new ha_innobase(table); + if (have_innodb == SHOW_OPTION_YES) + return new (alloc) ha_innobase(table); + return NULL; #endif #ifdef HAVE_EXAMPLE_DB case DB_TYPE_EXAMPLE_DB: - return new ha_example(table); + if (have_example_db == SHOW_OPTION_YES) + return new (alloc) ha_example(table); + return NULL; #endif -#ifdef HAVE_ARCHIVE_DB +#if defined(HAVE_ARCHIVE_DB) case DB_TYPE_ARCHIVE_DB: - return new ha_archive(table); + if (have_archive_db == SHOW_OPTION_YES) + return new (alloc) ha_archive(table); + return NULL; #endif #ifdef HAVE_BLACKHOLE_DB case DB_TYPE_BLACKHOLE_DB: - return new ha_blackhole(table); + if (have_blackhole_db == SHOW_OPTION_YES) + return new (alloc) ha_blackhole(table); + return NULL; +#endif +#ifdef HAVE_FEDERATED_DB + case DB_TYPE_FEDERATED_DB: + if (have_federated_db == SHOW_OPTION_YES) + return new (alloc) ha_federated(table); + return NULL; #endif #ifdef HAVE_CSV_DB case DB_TYPE_CSV_DB: - return new ha_tina(table); + if (have_csv_db == SHOW_OPTION_YES) + return new (alloc) ha_tina(table); + return NULL; #endif #ifdef HAVE_NDBCLUSTER_DB case DB_TYPE_NDBCLUSTER: - return new ha_ndbcluster(table); + if (have_ndbcluster == SHOW_OPTION_YES) + return new (alloc) ha_ndbcluster(table); + return NULL; #endif case DB_TYPE_HEAP: - return new ha_heap(table); + return new (alloc) ha_heap(table); default: // should never happen { enum db_type def=(enum db_type) current_thd->variables.table_type; /* Try first with 'default table type' */ if (db_type != def) - return get_new_handler(table, def); + return get_new_handler(table, alloc, def); } /* Fall back to MyISAM */ case DB_TYPE_MYISAM: - return new ha_myisam(table); - case DB_TYPE_MRG_MYISAM: - return new ha_myisammrg(table); + return new (alloc) ha_myisam(table); } } -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type) +/* + Register handler error messages for use with my_error(). + + SYNOPSIS + ha_init_errors() + + RETURN + 0 OK + != 0 Error +*/ + +static int ha_init_errors(void) { -#ifdef HAVE_INNOBASE_DB - if (cache_type == HA_CACHE_TBL_ASKTRANSACT) - return innobase_query_caching_of_table_permitted(thd, table_key, key_length); -#endif - return 1; +#define SETMSG(nr, msg) errmsgs[(nr) - HA_ERR_FIRST]= (msg) + const char **errmsgs; + + /* Allocate a pointer array for the error message strings. */ + /* Zerofill it to avoid uninitialized gaps. */ + if (! (errmsgs= (const char**) my_malloc(HA_ERR_ERRORS * sizeof(char*), + MYF(MY_WME | MY_ZEROFILL)))) + return 1; + + /* Set the dedicated error messages. */ + SETMSG(HA_ERR_KEY_NOT_FOUND, ER(ER_KEY_NOT_FOUND)); + SETMSG(HA_ERR_FOUND_DUPP_KEY, ER(ER_DUP_KEY)); + SETMSG(HA_ERR_RECORD_CHANGED, "Update wich is recoverable"); + SETMSG(HA_ERR_WRONG_INDEX, "Wrong index given to function"); + SETMSG(HA_ERR_CRASHED, ER(ER_NOT_KEYFILE)); + SETMSG(HA_ERR_WRONG_IN_RECORD, ER(ER_CRASHED_ON_USAGE)); + SETMSG(HA_ERR_OUT_OF_MEM, "Table handler out of memory"); + SETMSG(HA_ERR_NOT_A_TABLE, "Incorrect file format '%.64s'"); + SETMSG(HA_ERR_WRONG_COMMAND, "Command not supported"); + SETMSG(HA_ERR_OLD_FILE, ER(ER_OLD_KEYFILE)); + SETMSG(HA_ERR_NO_ACTIVE_RECORD, "No record read in update"); + SETMSG(HA_ERR_RECORD_DELETED, "Intern record deleted"); + SETMSG(HA_ERR_RECORD_FILE_FULL, ER(ER_RECORD_FILE_FULL)); + SETMSG(HA_ERR_INDEX_FILE_FULL, "No more room in index file '%.64s'"); + SETMSG(HA_ERR_END_OF_FILE, "End in next/prev/first/last"); + SETMSG(HA_ERR_UNSUPPORTED, ER(ER_ILLEGAL_HA)); + SETMSG(HA_ERR_TO_BIG_ROW, "Too big row"); + SETMSG(HA_WRONG_CREATE_OPTION, "Wrong create option"); + SETMSG(HA_ERR_FOUND_DUPP_UNIQUE, ER(ER_DUP_UNIQUE)); + SETMSG(HA_ERR_UNKNOWN_CHARSET, "Can't open charset"); + SETMSG(HA_ERR_WRONG_MRG_TABLE_DEF, ER(ER_WRONG_MRG_TABLE)); + SETMSG(HA_ERR_CRASHED_ON_REPAIR, ER(ER_CRASHED_ON_REPAIR)); + SETMSG(HA_ERR_CRASHED_ON_USAGE, ER(ER_CRASHED_ON_USAGE)); + SETMSG(HA_ERR_LOCK_WAIT_TIMEOUT, ER(ER_LOCK_WAIT_TIMEOUT)); + SETMSG(HA_ERR_LOCK_TABLE_FULL, ER(ER_LOCK_TABLE_FULL)); + SETMSG(HA_ERR_READ_ONLY_TRANSACTION, ER(ER_READ_ONLY_TRANSACTION)); + SETMSG(HA_ERR_LOCK_DEADLOCK, ER(ER_LOCK_DEADLOCK)); + SETMSG(HA_ERR_CANNOT_ADD_FOREIGN, ER(ER_CANNOT_ADD_FOREIGN)); + SETMSG(HA_ERR_NO_REFERENCED_ROW, ER(ER_NO_REFERENCED_ROW_2)); + SETMSG(HA_ERR_ROW_IS_REFERENCED, ER(ER_ROW_IS_REFERENCED_2)); + SETMSG(HA_ERR_NO_SAVEPOINT, "No savepoint with that name"); + SETMSG(HA_ERR_NON_UNIQUE_BLOCK_SIZE, "Non unique key block size"); + SETMSG(HA_ERR_NO_SUCH_TABLE, "No such table: '%.64s'"); + SETMSG(HA_ERR_TABLE_EXIST, ER(ER_TABLE_EXISTS_ERROR)); + SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine"); + SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER(ER_TABLE_DEF_CHANGED)); + SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER(ER_TABLE_NEEDS_UPGRADE)); + SETMSG(HA_ERR_TABLE_READONLY, ER(ER_OPEN_AS_READONLY)); + SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER(ER_AUTOINC_READ_FAILED)); + SETMSG(HA_ERR_AUTOINC_ERANGE, ER(ER_WARN_DATA_OUT_OF_RANGE)); + + /* Register the error messages for use with my_error(). */ + return my_error_register(errmsgs, HA_ERR_FIRST, HA_ERR_LAST); +} + + +/* + Unregister handler error messages. + + SYNOPSIS + ha_finish_errors() + + RETURN + 0 OK + != 0 Error +*/ + +static int ha_finish_errors(void) +{ + const char **errmsgs; + + /* Allocate a pointer array for the error message strings. */ + if (! (errmsgs= my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST))) + return 1; + my_free((gptr) errmsgs, MYF(0)); + return 0; +} + + +static inline void ha_was_inited_ok(handlerton **ht) +{ + uint tmp= (*ht)->savepoint_offset; + (*ht)->savepoint_offset= savepoint_alloc_size; + savepoint_alloc_size+= tmp; + (*ht)->slot= total_ha++; + if ((*ht)->prepare) + total_ha_2pc++; } int ha_init() { int error= 0; -#ifdef HAVE_BERKELEY_DB - if (have_berkeley_db == SHOW_OPTION_YES) - { - if (berkeley_init()) - { - have_berkeley_db= SHOW_OPTION_DISABLED; // If we couldn't use handler - error= 1; - } - else - opt_using_transactions=1; - } -#endif -#ifdef HAVE_INNOBASE_DB - if (have_innodb == SHOW_OPTION_YES) - { - if (innobase_init()) - { - have_innodb= SHOW_OPTION_DISABLED; // If we couldn't use handler - error= 1; - } - else - opt_using_transactions=1; - } -#endif -#ifdef HAVE_NDBCLUSTER_DB - if (have_ndbcluster == SHOW_OPTION_YES) + handlerton **types; + total_ha= savepoint_alloc_size= 0; + + if (ha_init_errors()) + return 1; + + /* + We now initialize everything here. + */ + for (types= sys_table_types; *types; types++) { - if (ndbcluster_init()) - { - have_ndbcluster= SHOW_OPTION_DISABLED; - error= 1; - } + if (!(*types)->init || !(*types)->init()) + ha_was_inited_ok(types); else - opt_using_transactions=1; + (*types)->state= SHOW_OPTION_DISABLED; } -#endif -#ifdef HAVE_ARCHIVE_DB - if (have_archive_db == SHOW_OPTION_YES) - { - if (archive_db_init()) - { - have_archive_db= SHOW_OPTION_DISABLED; - error= 1; - } - } -#endif + + DBUG_ASSERT(total_ha < MAX_HA); + /* + Check if there is a transaction-capable storage engine besides the + binary log (which is considered a transaction-capable storage engine in + counting total_ha) + */ + opt_using_transactions= total_ha>(ulong)opt_bin_log; + savepoint_alloc_size+= sizeof(SAVEPOINT); return error; } @@ -337,7 +523,11 @@ int ha_panic(enum ha_panic_function flag) if (have_ndbcluster == SHOW_OPTION_YES) error|=ndbcluster_end(); #endif -#ifdef HAVE_ARCHIVE_DB +#ifdef HAVE_FEDERATED_DB + if (have_federated_db == SHOW_OPTION_YES) + error|= federated_db_end(); +#endif +#if defined(HAVE_ARCHIVE_DB) if (have_archive_db == SHOW_OPTION_YES) error|= archive_db_end(); #endif @@ -345,6 +535,8 @@ int ha_panic(enum ha_panic_function flag) if (have_csv_db == SHOW_OPTION_YES) error|= tina_end(); #endif + if (ha_finish_errors()) + error= 1; return error; } /* ha_panic */ @@ -360,16 +552,290 @@ void ha_drop_database(char* path) #endif } +/* don't bother to rollback here, it's done already */ void ha_close_connection(THD* thd) { -#ifdef HAVE_INNOBASE_DB - if (have_innodb == SHOW_OPTION_YES) - innobase_close_connection(thd); -#endif -#ifdef HAVE_NDBCLUSTER_DB - if (have_ndbcluster == SHOW_OPTION_YES) - ndbcluster_close_connection(thd); + handlerton **types; + for (types= sys_table_types; *types; types++) + if (thd->ha_data[(*types)->slot]) + (*types)->close_connection(thd); +} + +/* ======================================================================== + ======================= TRANSACTIONS ===================================*/ + +/* + Register a storage engine for a transaction + + DESCRIPTION + Every storage engine MUST call this function when it starts + a transaction or a statement (that is it must be called both for the + "beginning of transaction" and "beginning of statement"). + Only storage engines registered for the transaction/statement + will know when to commit/rollback it. + + NOTE + trans_register_ha is idempotent - storage engine may register many + times per transaction. + +*/ +void trans_register_ha(THD *thd, bool all, handlerton *ht_arg) +{ + THD_TRANS *trans; + handlerton **ht; + DBUG_ENTER("trans_register_ha"); + DBUG_PRINT("enter",("%s", all ? "all" : "stmt")); + + if (all) + { + trans= &thd->transaction.all; + thd->server_status|= SERVER_STATUS_IN_TRANS; + } + else + trans= &thd->transaction.stmt; + + for (ht=trans->ht; *ht; ht++) + if (*ht == ht_arg) + DBUG_VOID_RETURN; /* already registered, return */ + + trans->ht[trans->nht++]=ht_arg; + DBUG_ASSERT(*ht == ht_arg); + trans->no_2pc|=(ht_arg->prepare==0); + if (thd->transaction.xid_state.xid.is_null()) + thd->transaction.xid_state.xid.set(thd->query_id); + DBUG_VOID_RETURN; +} + +/* + RETURN + 0 - ok + 1 - error, transaction was rolled back +*/ +int ha_prepare(THD *thd) +{ + int error=0, all=1; + THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt; + handlerton **ht=trans->ht; + DBUG_ENTER("ha_prepare"); +#ifdef USING_TRANSACTIONS + if (trans->nht) + { + for (; *ht; ht++) + { + int err; + statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status); + if ((*ht)->prepare) + { + if ((err= (*(*ht)->prepare)(thd, all))) + { + my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); + ha_rollback_trans(thd, all); + error=1; + break; + } + } + else + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), (*ht)->name); + } + } + } +#endif /* USING_TRANSACTIONS */ + DBUG_RETURN(error); +} + +/* + RETURN + 0 - ok + 1 - transaction was rolled back + 2 - error during commit, data may be inconsistent +*/ +int ha_commit_trans(THD *thd, bool all) +{ + int error= 0, cookie= 0; + THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt; + bool is_real_trans= all || thd->transaction.all.nht == 0; + handlerton **ht= trans->ht; + my_xid xid= thd->transaction.xid_state.xid.get_my_xid(); + DBUG_ENTER("ha_commit_trans"); + + if (thd->in_sub_stmt) + { + /* + Since we don't support nested statement transactions in 5.0, + we can't commit or rollback stmt transactions while we are inside + stored functions or triggers. So we simply do nothing now. + TODO: This should be fixed in later ( >= 5.1) releases. + */ + if (!all) + DBUG_RETURN(0); + /* + We assume that all statements which commit or rollback main transaction + are prohibited inside of stored functions or triggers. So they should + bail out with error even before ha_commit_trans() call. To be 100% safe + let us throw error in non-debug builds. + */ + DBUG_ASSERT(0); + my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0)); + DBUG_RETURN(2); + } +#ifdef USING_TRANSACTIONS + if (trans->nht) + { + if (is_real_trans && wait_if_global_read_lock(thd, 0, 0)) + { + ha_rollback_trans(thd, all); + DBUG_RETURN(1); + } + DBUG_EXECUTE_IF("crash_commit_before", abort();); + + /* Close all cursors that can not survive COMMIT */ + if (is_real_trans) /* not a statement commit */ + thd->stmt_map.close_transient_cursors(); + + if (!trans->no_2pc && trans->nht > 1) + { + for (; *ht && !error; ht++) + { + int err; + if ((err= (*(*ht)->prepare)(thd, all))) + { + my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); + error= 1; + } + statistic_increment(thd->status_var.ha_prepare_count,&LOCK_status); + } + DBUG_EXECUTE_IF("crash_commit_after_prepare", abort();); + if (error || (is_real_trans && xid && + (error= !(cookie= tc_log->log_xid(thd, xid))))) + { + ha_rollback_trans(thd, all); + error= 1; + goto end; + } + DBUG_EXECUTE_IF("crash_commit_after_log", abort();); + } + error=ha_commit_one_phase(thd, all) ? (cookie ? 2 : 1) : 0; + DBUG_EXECUTE_IF("crash_commit_before_unlog", abort();); + if (cookie) + tc_log->unlog(cookie, xid); + DBUG_EXECUTE_IF("crash_commit_after", abort();); +end: + if (is_real_trans) + start_waiting_global_read_lock(thd); + } +#endif /* USING_TRANSACTIONS */ + DBUG_RETURN(error); +} + +/* + NOTE - this function does not care about global read lock. + A caller should. +*/ +int ha_commit_one_phase(THD *thd, bool all) +{ + int error=0; + THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt; + bool is_real_trans=all || thd->transaction.all.nht == 0; + handlerton **ht=trans->ht; + DBUG_ENTER("ha_commit_one_phase"); +#ifdef USING_TRANSACTIONS + if (trans->nht) + { + for (ht=trans->ht; *ht; ht++) + { + int err; + if ((err= (*(*ht)->commit)(thd, all))) + { + my_error(ER_ERROR_DURING_COMMIT, MYF(0), err); + error=1; + } + statistic_increment(thd->status_var.ha_commit_count,&LOCK_status); + *ht= 0; + } + trans->nht=0; + trans->no_2pc=0; + if (is_real_trans) + thd->transaction.xid_state.xid.null(); + if (all) + { +#ifdef HAVE_QUERY_CACHE + if (thd->transaction.changed_tables) + query_cache.invalidate(thd->transaction.changed_tables); #endif + thd->variables.tx_isolation=thd->session_tx_isolation; + thd->transaction.cleanup(); + } + } +#endif /* USING_TRANSACTIONS */ + DBUG_RETURN(error); +} + + +int ha_rollback_trans(THD *thd, bool all) +{ + int error=0; + THD_TRANS *trans=all ? &thd->transaction.all : &thd->transaction.stmt; + bool is_real_trans=all || thd->transaction.all.nht == 0; + DBUG_ENTER("ha_rollback_trans"); + if (thd->in_sub_stmt) + { + /* + If we are inside stored function or trigger we should not commit or + rollback current statement transaction. See comment in ha_commit_trans() + call for more information. + */ + if (!all) + DBUG_RETURN(0); + DBUG_ASSERT(0); + my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0)); + DBUG_RETURN(1); + } +#ifdef USING_TRANSACTIONS + if (trans->nht) + { + /* Close all cursors that can not survive ROLLBACK */ + if (is_real_trans) /* not a statement commit */ + thd->stmt_map.close_transient_cursors(); + + for (handlerton **ht=trans->ht; *ht; ht++) + { + int err; + if ((err= (*(*ht)->rollback)(thd, all))) + { // cannot happen + my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); + error=1; + } + statistic_increment(thd->status_var.ha_rollback_count,&LOCK_status); + *ht= 0; + } + trans->nht=0; + trans->no_2pc=0; + if (is_real_trans) + thd->transaction.xid_state.xid.null(); + if (all) + { + thd->variables.tx_isolation=thd->session_tx_isolation; + thd->transaction.cleanup(); + } + } +#endif /* USING_TRANSACTIONS */ + /* + If a non-transactional table was updated, warn; don't warn if this is a + slave thread (because when a slave thread executes a ROLLBACK, it has + been read from the binary log, so it's 100% sure and normal to produce + error ER_WARNING_NOT_COMPLETE_ROLLBACK. If we sent the warning to the + slave SQL thread, it would not stop the thread but just be printed in + the error log; but we don't want users to wonder why they have this + message in the error log, so we don't send it. + */ + if (is_real_trans && (thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && + !thd->slave_thread) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARNING_NOT_COMPLETE_ROLLBACK, + ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)); + DBUG_RETURN(error); } /* @@ -385,7 +851,7 @@ int ha_autocommit_or_rollback(THD *thd, int error) { DBUG_ENTER("ha_autocommit_or_rollback"); #ifdef USING_TRANSACTIONS - if (opt_using_transactions) + if (thd->transaction.stmt.nht) { if (!error) { @@ -401,83 +867,252 @@ int ha_autocommit_or_rollback(THD *thd, int error) DBUG_RETURN(error); } -/* - This function is called when MySQL writes the log segment of a - transaction to the binlog. It is called when the LOCK_log mutex is - reserved. Here we communicate to transactional table handlers what - binlog position corresponds to the current transaction. The handler - can store it and in recovery print to the user, so that the user - knows from what position in the binlog to start possible - roll-forward, for example, if the crashed server was a slave in - replication. This function also calls the commit of the table - handler, because the order of transactions in the log of the table - handler must be the same as in the binlog. - NOTE that to eliminate the bottleneck of the group commit, we do not - flush the handler log files here, but only later in a call of - ha_commit_complete(). - arguments: - thd: the thread handle of the current connection - log_file_name: latest binlog file name - end_offset: the offset in the binlog file up to which we wrote - return value: 0 if success, 1 if error -*/ +int ha_commit_or_rollback_by_xid(XID *xid, bool commit) +{ + handlerton **types; + int res= 1; -int ha_report_binlog_offset_and_commit(THD *thd, - char *log_file_name, - my_off_t end_offset) + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES && (*types)->recover) + { + if ((*(commit ? (*types)->commit_by_xid : + (*types)->rollback_by_xid))(xid)) + res= 0; + } + } + return res; +} + + +#ifndef DBUG_OFF +/* this does not need to be multi-byte safe or anything */ +static char* xid_to_str(char *buf, XID *xid) { - int error= 0; -#ifdef HAVE_INNOBASE_DB - THD_TRANS *trans; - trans = &thd->transaction.all; - if (trans->innodb_active_trans) + int i; + char *s=buf; + *s++='\''; + for (i=0; i < xid->gtrid_length+xid->bqual_length; i++) { - /* - If we updated some InnoDB tables (innodb_active_trans is true), the - binlog coords will be reported into InnoDB during the InnoDB commit - (innobase_report_binlog_offset_and_commit). But if we updated only - non-InnoDB tables, we need an explicit call to report it. - */ - if ((error=innobase_report_binlog_offset_and_commit(thd, - trans->innobase_tid, - log_file_name, - end_offset))) + uchar c=(uchar)xid->data[i]; + /* is_next_dig is set if next character is a number */ + bool is_next_dig= FALSE; + if (i < XIDDATASIZE) { - my_error(ER_ERROR_DURING_COMMIT, MYF(0), error); - error=1; + char ch= xid->data[i+1]; + is_next_dig= (ch >= '0' && ch <='9'); + } + if (i == xid->gtrid_length) + { + *s++='\''; + if (xid->bqual_length) + { + *s++='.'; + *s++='\''; + } + } + if (c < 32 || c > 126) + { + *s++='\\'; + /* + If next character is a number, write current character with + 3 octal numbers to ensure that the next number is not seen + as part of the octal number + */ + if (c > 077 || is_next_dig) + *s++=_dig_vec_lower[c >> 6]; + if (c > 007 || is_next_dig) + *s++=_dig_vec_lower[(c >> 3) & 7]; + *s++=_dig_vec_lower[c & 7]; + } + else + { + if (c == '\'' || c == '\\') + *s++='\\'; + *s++=c; } } - else if (opt_innodb_safe_binlog) // Don't report if not useful - innobase_store_binlog_offset_and_flush_log(log_file_name, end_offset); -#endif - return error; + *s++='\''; + *s=0; + return buf; } +#endif /* - Flushes the handler log files (if my.cnf settings do not free us from it) - after we have called ha_report_binlog_offset_and_commit(). To eliminate - the bottleneck from the group commit, this should be called when - LOCK_log has been released in log.cc. + recover() step of xa - arguments: - thd: the thread handle of the current connection - return value: always 0 -*/ + NOTE + there are three modes of operation: + + - automatic recover after a crash + in this case commit_list != 0, tc_heuristic_recover==0 + all xids from commit_list are committed, others are rolled back -int ha_commit_complete(THD *thd) + - manual (heuristic) recover + in this case commit_list==0, tc_heuristic_recover != 0 + DBA has explicitly specified that all prepared transactions should + be committed (or rolled back). + + - no recovery (MySQL did not detect a crash) + in this case commit_list==0, tc_heuristic_recover == 0 + there should be no prepared transactions in this case. +*/ +int ha_recover(HASH *commit_list) { -#ifdef HAVE_INNOBASE_DB - THD_TRANS *trans; - trans = &thd->transaction.all; - if (trans->innobase_tid) - { - innobase_commit_complete(trans->innobase_tid); + int len, got, found_foreign_xids=0, found_my_xids=0; + handlerton **types; + XID *list=0; + bool dry_run=(commit_list==0 && tc_heuristic_recover==0); + DBUG_ENTER("ha_recover"); + + /* commit_list and tc_heuristic_recover cannot be set both */ + DBUG_ASSERT(commit_list==0 || tc_heuristic_recover==0); + /* if either is set, total_ha_2pc must be set too */ + DBUG_ASSERT(dry_run || total_ha_2pc>(ulong)opt_bin_log); + + if (total_ha_2pc <= (ulong)opt_bin_log) + DBUG_RETURN(0); + + if (commit_list) + sql_print_information("Starting crash recovery..."); - trans->innodb_active_trans=0; +#ifndef WILL_BE_DELETED_LATER + /* + for now, only InnoDB supports 2pc. It means we can always safely + rollback all pending transactions, without risking inconsistent data + */ + DBUG_ASSERT(total_ha_2pc == (ulong) opt_bin_log+1); // only InnoDB and binlog + tc_heuristic_recover= TC_HEURISTIC_RECOVER_ROLLBACK; // forcing ROLLBACK + dry_run=FALSE; +#endif + + for (len= MAX_XID_LIST_SIZE ; list==0 && len > MIN_XID_LIST_SIZE; len/=2) + { + list=(XID *)my_malloc(len*sizeof(XID), MYF(0)); } + if (!list) + { + sql_print_error(ER(ER_OUTOFMEMORY), len*sizeof(XID)); + DBUG_RETURN(1); + } + + for (types= sys_table_types; *types; types++) + { + if ((*types)->state != SHOW_OPTION_YES || !(*types)->recover) + continue; + while ((got=(*(*types)->recover)(list, len)) > 0 ) + { + sql_print_information("Found %d prepared transaction(s) in %s", + got, (*types)->name); + for (int i=0; i < got; i ++) + { + my_xid x=list[i].get_my_xid(); + if (!x) // not "mine" - that is generated by external TM + { +#ifndef DBUG_OFF + char buf[XIDDATASIZE*4+6]; // see xid_to_str + sql_print_information("ignore xid %s", xid_to_str(buf, list+i)); #endif - return 0; + xid_cache_insert(list+i, XA_PREPARED); + found_foreign_xids++; + continue; + } + if (dry_run) + { + found_my_xids++; + continue; + } + // recovery mode + if (commit_list ? + hash_search(commit_list, (byte *)&x, sizeof(x)) != 0 : + tc_heuristic_recover == TC_HEURISTIC_RECOVER_COMMIT) + { +#ifndef DBUG_OFF + char buf[XIDDATASIZE*4+6]; // see xid_to_str + sql_print_information("commit xid %s", xid_to_str(buf, list+i)); +#endif + (*(*types)->commit_by_xid)(list+i); + } + else + { +#ifndef DBUG_OFF + char buf[XIDDATASIZE*4+6]; // see xid_to_str + sql_print_information("rollback xid %s", xid_to_str(buf, list+i)); +#endif + (*(*types)->rollback_by_xid)(list+i); + } + } + if (got < len) + break; + } + } + my_free((gptr)list, MYF(0)); + if (found_foreign_xids) + sql_print_warning("Found %d prepared XA transactions", found_foreign_xids); + if (dry_run && found_my_xids) + { + sql_print_error("Found %d prepared transactions! It means that mysqld was " + "not shut down properly last time and critical recovery " + "information (last binlog or %s file) was manually deleted " + "after a crash. You have to start mysqld with " + "--tc-heuristic-recover switch to commit or rollback " + "pending transactions.", + found_my_xids, opt_tc_log_file); + DBUG_RETURN(1); + } + if (commit_list) + sql_print_information("Crash recovery finished."); + DBUG_RETURN(0); +} + +/* + return the list of XID's to a client, the same way SHOW commands do + + NOTE + I didn't find in XA specs that an RM cannot return the same XID twice, + so mysql_xa_recover does not filter XID's to ensure uniqueness. + It can be easily fixed later, if necessary. +*/ +bool mysql_xa_recover(THD *thd) +{ + List<Item> field_list; + Protocol *protocol= thd->protocol; + int i=0; + XID_STATE *xs; + DBUG_ENTER("mysql_xa_recover"); + + field_list.push_back(new Item_int("formatID", 0, MY_INT32_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_int("gtrid_length", 0, MY_INT32_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_int("bqual_length", 0, MY_INT32_NUM_DECIMAL_DIGITS)); + field_list.push_back(new Item_empty_string("data",XIDDATASIZE)); + + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(1); + + pthread_mutex_lock(&LOCK_xid_cache); + while ((xs= (XID_STATE*)hash_element(&xid_cache, i++))) + { + if (xs->xa_state==XA_PREPARED) + { + protocol->prepare_for_resend(); + protocol->store_longlong((longlong)xs->xid.formatID, FALSE); + protocol->store_longlong((longlong)xs->xid.gtrid_length, FALSE); + protocol->store_longlong((longlong)xs->xid.bqual_length, FALSE); + protocol->store(xs->xid.data, xs->xid.gtrid_length+xs->xid.bqual_length, + &my_charset_bin); + if (protocol->write()) + { + pthread_mutex_unlock(&LOCK_xid_cache); + DBUG_RETURN(1); + } + } + } + + pthread_mutex_unlock(&LOCK_xid_cache); + send_eof(thd); + DBUG_RETURN(0); } /* @@ -500,313 +1135,127 @@ int ha_commit_complete(THD *thd) int ha_release_temporary_latches(THD *thd) { #ifdef HAVE_INNOBASE_DB - THD_TRANS *trans; - trans = &thd->transaction.all; - if (trans->innobase_tid) - innobase_release_temporary_latches(trans->innobase_tid); + if (opt_innodb) + innobase_release_temporary_latches(thd); #endif return 0; } -int ha_commit_trans(THD *thd, THD_TRANS* trans) -{ - int error=0; - DBUG_ENTER("ha_commit_trans"); -#ifdef USING_TRANSACTIONS - if (opt_using_transactions) - { - bool transaction_commited= 0; - bool operation_done= 0, need_start_waiters= 0; - /* If transaction has done some updates to tables */ - if (trans == &thd->transaction.all && mysql_bin_log.is_open() && - my_b_tell(&thd->transaction.trans_log)) - { - if ((error= wait_if_global_read_lock(thd, 0, 0))) - { - /* - Note that ROLLBACK [TO SAVEPOINT] does not have this test; it's - because ROLLBACK never updates data, so needn't wait on the lock. - */ - my_error(ER_ERROR_DURING_COMMIT, MYF(0), error); - error= 1; - } - else - need_start_waiters= 1; - if (mysql_bin_log.is_open()) - { - mysql_bin_log.write(thd, &thd->transaction.trans_log, 1); - statistic_increment(binlog_cache_use, &LOCK_status); - if (thd->transaction.trans_log.disk_writes != 0) - { - /* - We have to do this after addition of trans_log to main binlog since - this operation can cause flushing of end of trans_log to disk. - */ - statistic_increment(binlog_cache_disk_use, &LOCK_status); - thd->transaction.trans_log.disk_writes= 0; - } - reinit_io_cache(&thd->transaction.trans_log, - WRITE_CACHE, (my_off_t) 0, 0, 1); - thd->transaction.trans_log.end_of_file= max_binlog_cache_size; - } - } -#ifdef HAVE_NDBCLUSTER_DB - if (trans->ndb_tid) - { - if ((error=ndbcluster_commit(thd,trans->ndb_tid))) - { - if (error == -1) - my_error(ER_ERROR_DURING_COMMIT, MYF(0)); - error=1; - } - if (trans == &thd->transaction.all) - operation_done= transaction_commited= 1; - trans->ndb_tid=0; - } -#endif -#ifdef HAVE_BERKELEY_DB - if (trans->bdb_tid) - { - if ((error=berkeley_commit(thd,trans->bdb_tid))) - { - my_error(ER_ERROR_DURING_COMMIT, MYF(0), error); - error=1; - } - else - if (!(thd->options & OPTION_BEGIN)) - transaction_commited= 1; - trans->bdb_tid=0; - } -#endif +/* + Export statistics for different engines. Currently we use it only for + InnoDB. +*/ + +int ha_update_statistics() +{ #ifdef HAVE_INNOBASE_DB - if (trans->innobase_tid) - { - if ((error=innobase_commit(thd,trans->innobase_tid))) - { - my_error(ER_ERROR_DURING_COMMIT, MYF(0), error); - error=1; - } - trans->innodb_active_trans=0; - if (trans == &thd->transaction.all) - operation_done= transaction_commited= 1; - } + if (opt_innodb) + innodb_export_status(); #endif -#ifdef HAVE_QUERY_CACHE - if (transaction_commited && thd->transaction.changed_tables) - query_cache.invalidate(thd->transaction.changed_tables); -#endif /*HAVE_QUERY_CACHE*/ - if (error && trans == &thd->transaction.all && mysql_bin_log.is_open()) - sql_print_error("Got error during commit; Binlog is not up to date!"); - thd->variables.tx_isolation=thd->session_tx_isolation; - if (operation_done) - { - statistic_increment(ha_commit_count,&LOCK_status); - thd->transaction.cleanup(); - } - if (need_start_waiters) - start_waiting_global_read_lock(thd); - } -#endif // using transactions - DBUG_RETURN(error); + return 0; } - -int ha_rollback_trans(THD *thd, THD_TRANS *trans) +int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv) { int error=0; - DBUG_ENTER("ha_rollback_trans"); -#ifdef USING_TRANSACTIONS - if (opt_using_transactions) + THD_TRANS *trans= (thd->in_sub_stmt ? &thd->transaction.stmt : + &thd->transaction.all); + handlerton **ht=trans->ht, **end_ht; + DBUG_ENTER("ha_rollback_to_savepoint"); + + trans->nht=sv->nht; + trans->no_2pc=0; + end_ht=ht+sv->nht; + /* + rolling back to savepoint in all storage engines that were part of the + transaction when the savepoint was set + */ + for (; ht < end_ht; ht++) { - bool operation_done=0; - /* - As rollback can be 30 times slower than insert in InnoDB, and user may - not know there's rollback (if it's because of a dupl row), better warn. - */ - const char *save_proc_info= thd->proc_info; - thd->proc_info= "Rolling back"; -#ifdef HAVE_NDBCLUSTER_DB - if (trans->ndb_tid) - { - if ((error=ndbcluster_rollback(thd, trans->ndb_tid))) - { - if (error == -1) - my_error(ER_ERROR_DURING_ROLLBACK, MYF(0)); - error=1; - } - trans->ndb_tid = 0; - operation_done=1; - } -#endif -#ifdef HAVE_BERKELEY_DB - if (trans->bdb_tid) - { - if ((error=berkeley_rollback(thd, trans->bdb_tid))) - { - my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), error); - error=1; - } - trans->bdb_tid=0; - operation_done=1; - } -#endif -#ifdef HAVE_INNOBASE_DB - if (thd->transaction.all.innodb_active_trans) - { - if ((error=innobase_rollback(thd, trans->innobase_tid))) - { - my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), error); - error=1; - } - trans->innodb_active_trans=0; - operation_done=1; + int err; + DBUG_ASSERT((*ht)->savepoint_set != 0); + if ((err= (*(*ht)->savepoint_rollback)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + { // cannot happen + my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); + error=1; } -#endif - if ((trans == &thd->transaction.all) && mysql_bin_log.is_open()) - { - /* - Update the binary log with a BEGIN/ROLLBACK block if we have - cached some queries and we updated some non-transactional - table. Such cases should be rare (updating a - non-transactional table inside a transaction...). Count disk - writes to trans_log in any case. - */ - if (my_b_tell(&thd->transaction.trans_log)) - { - if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE)) - mysql_bin_log.write(thd, &thd->transaction.trans_log, 0); - statistic_increment(binlog_cache_use, &LOCK_status); - if (thd->transaction.trans_log.disk_writes != 0) - { - /* - We have to do this after addition of trans_log to main binlog since - this operation can cause flushing of end of trans_log to disk. - */ - statistic_increment(binlog_cache_disk_use, &LOCK_status); - thd->transaction.trans_log.disk_writes= 0; - } - } - /* Flushed or not, empty the binlog cache */ - reinit_io_cache(&thd->transaction.trans_log, - WRITE_CACHE, (my_off_t) 0, 0, 1); - thd->transaction.trans_log.end_of_file= max_binlog_cache_size; - if (operation_done) - thd->transaction.cleanup(); + statistic_increment(thd->status_var.ha_savepoint_rollback_count,&LOCK_status); + trans->no_2pc|=(*ht)->prepare == 0; + } + /* + rolling back the transaction in all storage engines that were not part of + the transaction when the savepoint was set + */ + for (; *ht ; ht++) + { + int err; + if ((err= (*(*ht)->rollback)(thd, !thd->in_sub_stmt))) + { // cannot happen + my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), err); + error=1; } - thd->variables.tx_isolation=thd->session_tx_isolation; - if (operation_done) - statistic_increment(ha_rollback_count,&LOCK_status); - thd->proc_info= save_proc_info; + statistic_increment(thd->status_var.ha_rollback_count,&LOCK_status); + *ht=0; // keep it conveniently zero-filled } -#endif /* USING_TRANSACTIONS */ DBUG_RETURN(error); } - /* - Rolls the current transaction back to a savepoint. - Return value: 0 if success, 1 if there was not a savepoint of the given - name. - NOTE: how do we handle this (unlikely but legal) case: - [transaction] + [update to non-trans table] + [rollback to savepoint] ? - The problem occurs when a savepoint is before the update to the - non-transactional table. Then when there's a rollback to the savepoint, if we - simply truncate the binlog cache, we lose the part of the binlog cache where - the update is. If we want to not lose it, we need to write the SAVEPOINT - command and the ROLLBACK TO SAVEPOINT command to the binlog cache. The latter - is easy: it's just write at the end of the binlog cache, but the former - should be *inserted* to the place where the user called SAVEPOINT. The - solution is that when the user calls SAVEPOINT, we write it to the binlog - cache (so no need to later insert it). As transactions are never intermixed - in the binary log (i.e. they are serialized), we won't have conflicts with - savepoint names when using mysqlbinlog or in the slave SQL thread. - Then when ROLLBACK TO SAVEPOINT is called, if we updated some - non-transactional table, we don't truncate the binlog cache but instead write - ROLLBACK TO SAVEPOINT to it; otherwise we truncate the binlog cache (which - will chop the SAVEPOINT command from the binlog cache, which is good as in - that case there is no need to have it in the binlog). + note, that according to the sql standard (ISO/IEC 9075-2:2003) + section "4.33.4 SQL-statements and transaction states", + SAVEPOINT is *not* transaction-initiating SQL-statement */ -int ha_rollback_to_savepoint(THD *thd, char *savepoint_name) +int ha_savepoint(THD *thd, SAVEPOINT *sv) { - my_off_t binlog_cache_pos=0; - bool operation_done=0; int error=0; - DBUG_ENTER("ha_rollback_to_savepoint"); + THD_TRANS *trans= (thd->in_sub_stmt ? &thd->transaction.stmt : + &thd->transaction.all); + handlerton **ht=trans->ht; + DBUG_ENTER("ha_savepoint"); #ifdef USING_TRANSACTIONS - if (opt_using_transactions) + for (; *ht; ht++) { -#ifdef HAVE_INNOBASE_DB - /* - Retrieve the trans_log binlog cache position corresponding to the - savepoint, and if the rollback is successful inside InnoDB reset the write - position in the binlog cache to what it was at the savepoint. - */ - if ((error=innobase_rollback_to_savepoint(thd, savepoint_name, - &binlog_cache_pos))) + int err; + if (! (*ht)->savepoint_set) { - my_error(ER_ERROR_DURING_ROLLBACK, MYF(0), error); + my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "SAVEPOINT"); error=1; + break; } - else if (mysql_bin_log.is_open()) - { - /* - Write ROLLBACK TO SAVEPOINT to the binlog cache if we have updated some - non-transactional table. Otherwise, truncate the binlog cache starting - from the SAVEPOINT command. - */ - if (unlikely((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && - my_b_tell(&thd->transaction.trans_log))) - { - Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE); - if (mysql_bin_log.write(&qinfo)) - error= 1; - } - else - reinit_io_cache(&thd->transaction.trans_log, WRITE_CACHE, - binlog_cache_pos, 0, 0); + if ((err= (*(*ht)->savepoint_set)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + { // cannot happen + my_error(ER_GET_ERRNO, MYF(0), err); + error=1; } - operation_done=1; -#endif - if (operation_done) - statistic_increment(ha_rollback_count,&LOCK_status); + statistic_increment(thd->status_var.ha_savepoint_count,&LOCK_status); } + sv->nht=trans->nht; #endif /* USING_TRANSACTIONS */ - DBUG_RETURN(error); } - -/* -Sets a transaction savepoint. -Return value: always 0, that is, succeeds always -*/ - -int ha_savepoint(THD *thd, char *savepoint_name) +int ha_release_savepoint(THD *thd, SAVEPOINT *sv) { int error=0; - DBUG_ENTER("ha_savepoint"); -#ifdef USING_TRANSACTIONS - if (opt_using_transactions) + THD_TRANS *trans= (thd->in_sub_stmt ? &thd->transaction.stmt : + &thd->transaction.all); + handlerton **ht=trans->ht, **end_ht; + DBUG_ENTER("ha_release_savepoint"); + + end_ht=ht+sv->nht; + for (; ht < end_ht; ht++) { - /* Write it to the binary log (see comments of ha_rollback_to_savepoint) */ - if (mysql_bin_log.is_open()) - { -#ifdef HAVE_INNOBASE_DB - innobase_savepoint(thd,savepoint_name, - my_b_tell(&thd->transaction.trans_log)); -#endif - Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE); - if (mysql_bin_log.write(&qinfo)) - error= 1; + int err; + if (!(*ht)->savepoint_release) + continue; + if ((err= (*(*ht)->savepoint_release)(thd, (byte *)(sv+1)+(*ht)->savepoint_offset))) + { // cannot happen + my_error(ER_GET_ERRNO, MYF(0), err); + error=1; } -#ifdef HAVE_INNOBASE_DB - else - innobase_savepoint(thd,savepoint_name,0); -#endif } -#endif /* USING_TRANSACTIONS */ DBUG_RETURN(error); } @@ -850,12 +1299,25 @@ bool ha_flush_logs() The .frm file will be deleted only if we return 0 or ENOENT */ -int ha_delete_table(enum db_type table_type, const char *path) +int ha_delete_table(THD *thd, enum db_type table_type, const char *path, + const char *alias, bool generate_warning) { + handler *file; char tmp_path[FN_REFLEN]; - handler *file=get_new_handler((TABLE*) 0, table_type); - if (!file) - return ENOENT; + int error; + TABLE dummy_table; + TABLE_SHARE dummy_share; + DBUG_ENTER("ha_delete_table"); + + bzero((char*) &dummy_table, sizeof(dummy_table)); + bzero((char*) &dummy_share, sizeof(dummy_share)); + dummy_table.s= &dummy_share; + + /* DB_TYPE_UNKNOWN is used in ALTER TABLE when renaming only .frm files */ + if (table_type == DB_TYPE_UNKNOWN || + ! (file=get_new_handler(&dummy_table, thd->mem_root, table_type))) + DBUG_RETURN(ENOENT); + if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED)) { /* Ensure that table handler get path in lower case */ @@ -863,69 +1325,59 @@ int ha_delete_table(enum db_type table_type, const char *path) my_casedn_str(files_charset_info, tmp_path); path= tmp_path; } - int error=file->delete_table(path); - delete file; - return error; -} - - -void ha_store_ptr(byte *buff, uint pack_length, my_off_t pos) -{ - switch (pack_length) { -#if SIZEOF_OFF_T > 4 - case 8: mi_int8store(buff,pos); break; - case 7: mi_int7store(buff,pos); break; - case 6: mi_int6store(buff,pos); break; - case 5: mi_int5store(buff,pos); break; -#endif - case 4: mi_int4store(buff,pos); break; - case 3: mi_int3store(buff,pos); break; - case 2: mi_int2store(buff,(uint) pos); break; - case 1: buff[0]= (uchar) pos; break; - } - return; -} - -my_off_t ha_get_ptr(byte *ptr, uint pack_length) -{ - my_off_t pos; - switch (pack_length) { -#if SIZEOF_OFF_T > 4 - case 8: - pos= (my_off_t) mi_uint8korr(ptr); - break; - case 7: - pos= (my_off_t) mi_uint7korr(ptr); - break; - case 6: - pos= (my_off_t) mi_uint6korr(ptr); - break; - case 5: - pos= (my_off_t) mi_uint5korr(ptr); - break; -#endif - case 4: - pos= (my_off_t) mi_uint4korr(ptr); - break; - case 3: - pos= (my_off_t) mi_uint3korr(ptr); - break; - case 2: - pos= (my_off_t) mi_uint2korr(ptr); - break; - case 1: - pos= (my_off_t) mi_uint2korr(ptr); - break; - default: - pos=0; // Impossible - break; + if ((error= file->delete_table(path)) && generate_warning) + { + /* + Because file->print_error() use my_error() to generate the error message + we must store the error state in thd, reset it and restore it to + be able to get hold of the error message. + (We should in the future either rewrite handler::print_error() or make + a nice method of this. + */ + bool query_error= thd->query_error; + sp_rcontext *spcont= thd->spcont; + SELECT_LEX *current_select= thd->lex->current_select; + char buff[sizeof(thd->net.last_error)]; + char new_error[sizeof(thd->net.last_error)]; + int last_errno= thd->net.last_errno; + + strmake(buff, thd->net.last_error, sizeof(buff)-1); + thd->query_error= 0; + thd->spcont= 0; + thd->lex->current_select= 0; + thd->net.last_error[0]= 0; + + /* Fill up strucutures that print_error may need */ + dummy_table.s->path= path; + dummy_table.alias= alias; + + file->print_error(error, 0); + strmake(new_error, thd->net.last_error, sizeof(buff)-1); + + /* restore thd */ + thd->query_error= query_error; + thd->spcont= spcont; + thd->lex->current_select= current_select; + thd->net.last_errno= last_errno; + strmake(thd->net.last_error, buff, sizeof(buff)-1); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, new_error); } - return pos; + delete file; + DBUG_RETURN(error); } /**************************************************************************** ** General handler functions ****************************************************************************/ +handler *handler::clone(MEM_ROOT *mem_root) +{ + handler *new_handler= get_new_handler(table, mem_root, table->s->db_type); + if (new_handler && !new_handler->ha_open(table->s->path, table->db_stat, + HA_OPEN_IGNORE_IF_LOCKED)) + return new_handler; + return NULL; +} + /* Open database-handler. Try O_RDONLY if can't open as O_RDWR */ /* Don't wait for locks if not HA_OPEN_WAIT_IF_LOCKED is set */ @@ -935,8 +1387,8 @@ int handler::ha_open(const char *name, int mode, int test_if_locked) int error; DBUG_ENTER("handler::ha_open"); DBUG_PRINT("enter",("name: %s db_type: %d db_stat: %d mode: %d lock_test: %d", - name, table->db_type, table->db_stat, mode, - test_if_locked)); + name, table->s->db_type, table->db_stat, mode, + test_if_locked)); if ((error=open(name,mode,test_if_locked))) { @@ -954,15 +1406,13 @@ int handler::ha_open(const char *name, int mode, int test_if_locked) } else { - if (table->db_options_in_use & HA_OPTION_READ_ONLY_DATA) + if (table->s->db_options_in_use & HA_OPTION_READ_ONLY_DATA) table->db_stat|=HA_READ_ONLY; (void) extra(HA_EXTRA_NO_READCHECK); // Not needed in SQL - if (!alloc_root_inited(&table->mem_root)) // If temporary table - ref=(byte*) sql_alloc(ALIGN_SIZE(ref_length)*2); - else - ref=(byte*) alloc_root(&table->mem_root, ALIGN_SIZE(ref_length)*2); - if (!ref) + DBUG_ASSERT(alloc_root_inited(&table->mem_root)); + + if (!(ref= (byte*) alloc_root(&table->mem_root, ALIGN_SIZE(ref_length)*2))) { close(); error=HA_ERR_OUT_OF_MEM; @@ -984,7 +1434,7 @@ int handler::read_first_row(byte * buf, uint primary_key) register int error; DBUG_ENTER("handler::read_first_row"); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count,&LOCK_status); /* If there is very few deleted rows in the table, find the first row by @@ -1008,71 +1458,278 @@ int handler::read_first_row(byte * buf, uint primary_key) DBUG_RETURN(error); } +/* + Generate the next auto-increment number based on increment and offset + + In most cases increment= offset= 1, in which case we get: + 1,2,3,4,5,... + If increment=10 and offset=5 and previous number is 1, we get: + 1,5,15,25,35,... +*/ + +inline ulonglong +next_insert_id(ulonglong nr,struct system_variables *variables) +{ + nr= (((nr+ variables->auto_increment_increment - + variables->auto_increment_offset)) / + (ulonglong) variables->auto_increment_increment); + return (nr* (ulonglong) variables->auto_increment_increment + + variables->auto_increment_offset); +} + + +void handler::adjust_next_insert_id_after_explicit_value(ulonglong nr) +{ + /* + If we have set THD::next_insert_id previously and plan to insert an + explicitely-specified value larger than this, we need to increase + THD::next_insert_id to be greater than the explicit value. + */ + THD *thd= table->in_use; + if (thd->clear_next_insert_id && (nr >= thd->next_insert_id)) + { + if (thd->variables.auto_increment_increment != 1) + nr= next_insert_id(nr, &thd->variables); + else + nr++; + thd->next_insert_id= nr; + DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr)); + } +} + + +/* + Computes the largest number X: + - smaller than or equal to "nr" + - of the form: auto_increment_offset + N * auto_increment_increment + where N>=0. + + SYNOPSIS + prev_insert_id + nr Number to "round down" + variables variables struct containing auto_increment_increment and + auto_increment_offset + + RETURN + The number X if it exists, "nr" otherwise. +*/ + +inline ulonglong +prev_insert_id(ulonglong nr, struct system_variables *variables) +{ + if (unlikely(nr < variables->auto_increment_offset)) + { + /* + There's nothing good we can do here. That is a pathological case, where + the offset is larger than the column's max possible value, i.e. not even + the first sequence value may be inserted. User will receive warning. + */ + DBUG_PRINT("info",("auto_increment: nr: %lu cannot honour " + "auto_increment_offset: %lu", + (ulong) nr, variables->auto_increment_offset)); + return nr; + } + if (variables->auto_increment_increment == 1) + return nr; // optimization of the formula below + nr= (((nr - variables->auto_increment_offset)) / + (ulonglong) variables->auto_increment_increment); + return (nr * (ulonglong) variables->auto_increment_increment + + variables->auto_increment_offset); +} + /* - Updates field with field_type NEXT_NUMBER according to following: - if field = 0 change field to the next free key in database. + Update the auto_increment field if necessary + + SYNOPSIS + update_auto_increment() + + RETURN + 0 ok + HA_ERR_AUTOINC_READ_FAILED + get_auto_increment() was called and returned ~(ulonglong) 0 + HA_ERR_AUTOINC_ERANGE + storing value in field caused strict mode failure. + + + IMPLEMENTATION + + Updates columns with type NEXT_NUMBER if: + + - If column value is set to NULL (in which case + auto_increment_field_not_null is 0) + - If column is set to 0 and (sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) is not + set. In the future we will only set NEXT_NUMBER fields if one sets them + to NULL (or they are not included in the insert list). + + + There are two different cases when the above is true: + + - thd->next_insert_id == 0 (This is the normal case) + In this case we set the set the column for the first row to the value + next_insert_id(get_auto_increment(column))) which is normally + max-used-column-value +1. + + We call get_auto_increment() only for the first row in a multi-row + statement. For the following rows we generate new numbers based on the + last used number. + + - thd->next_insert_id != 0. This happens when we have read a statement + from the binary log or when one has used SET LAST_INSERT_ID=#. + + In this case we will set the column to the value of next_insert_id. + The next row will be given the id + next_insert_id(next_insert_id) + + The idea is that generated auto_increment values are predictable and + independent of the column values in the table. This is needed to be + able to replicate into a table that already has rows with a higher + auto-increment value than the one that is inserted. + + After we have already generated an auto-increment number and the user + inserts a column with a higher value than the last used one, we will + start counting from the inserted value. + + thd->next_insert_id is cleared after it's been used for a statement. */ -void handler::update_auto_increment() +int handler::update_auto_increment() { - longlong nr; - THD *thd; + ulonglong nr; + THD *thd= table->in_use; + struct system_variables *variables= &thd->variables; DBUG_ENTER("handler::update_auto_increment"); - if (table->next_number_field->val_int() != 0 || + + /* + We must save the previous value to be able to restore it if the + row was not inserted + */ + thd->prev_insert_id= thd->next_insert_id; + + if ((nr= table->next_number_field->val_int()) != 0 || table->auto_increment_field_not_null && - current_thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) + thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { - table->auto_increment_field_not_null= FALSE; + /* Mark that we didn't generate a new value **/ auto_increment_column_changed=0; - DBUG_VOID_RETURN; + adjust_next_insert_id_after_explicit_value(nr); + DBUG_RETURN(0); } - table->auto_increment_field_not_null= FALSE; - thd=current_thd; - if ((nr=thd->next_insert_id)) - thd->next_insert_id=0; // Clear after use - else - nr=get_auto_increment(); - if (!table->next_number_field->store(nr)) + if (!(nr= thd->next_insert_id)) + { + if ((nr= get_auto_increment()) == ~(ulonglong) 0) + DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure + + if (variables->auto_increment_increment != 1) + nr= next_insert_id(nr-1, variables); + /* + Update next row based on the found value. This way we don't have to + call the handler for every generated auto-increment value on a + multi-row statement + */ + thd->next_insert_id= nr; + } + + DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr)); + + /* Mark that we should clear next_insert_id before next stmt */ + thd->clear_next_insert_id= 1; + + if (likely(!table->next_number_field->store((longlong) nr, TRUE))) thd->insert_id((ulonglong) nr); else - thd->insert_id(table->next_number_field->val_int()); + if (thd->killed != THD::KILL_BAD_DATA) /* did we fail strict mode? */ + { + /* + overflow of the field; we'll use the max value, however we try to + decrease it to honour auto_increment_* variables: + */ + nr= prev_insert_id(table->next_number_field->val_int(), variables); + thd->insert_id(nr); + if (unlikely(table->next_number_field->store((longlong) nr, TRUE))) + thd->insert_id(nr= table->next_number_field->val_int()); + } + else + DBUG_RETURN(HA_ERR_AUTOINC_ERANGE); + + /* + We can't set next_insert_id if the auto-increment key is not the + first key part, as there is no guarantee that the first parts will be in + sequence + */ + if (!table->s->next_number_key_offset) + { + /* + Set next insert id to point to next auto-increment value to be able to + handle multi-row statements + This works even if auto_increment_increment > 1 + */ + thd->next_insert_id= next_insert_id(nr, variables); + } + else + thd->next_insert_id= 0; + + /* Mark that we generated a new value */ auto_increment_column_changed=1; - DBUG_VOID_RETURN; + DBUG_RETURN(0); } +/* + restore_auto_increment + + In case of error on write, we restore the last used next_insert_id value + because the previous value was not used. +*/ -longlong handler::get_auto_increment() +void handler::restore_auto_increment() { - longlong nr; + THD *thd= table->in_use; + if (thd->next_insert_id) + thd->next_insert_id= thd->prev_insert_id; +} + + +ulonglong handler::get_auto_increment() +{ + ulonglong nr; int error; (void) extra(HA_EXTRA_KEYREAD); - index_init(table->next_number_index); - if (!table->next_number_key_offset) + index_init(table->s->next_number_index); + if (!table->s->next_number_key_offset) { // Autoincrement at key-start error=index_last(table->record[1]); } else { byte key[MAX_KEY_LENGTH]; - key_copy(key,table,table->next_number_index, - table->next_number_key_offset); - error=index_read(table->record[1], key, table->next_number_key_offset, - HA_READ_PREFIX_LAST); + key_copy(key, table->record[0], + table->key_info + table->s->next_number_index, + table->s->next_number_key_offset); + error= index_read(table->record[1], key, table->s->next_number_key_offset, + HA_READ_PREFIX_LAST); } if (error) nr=1; else - nr=(longlong) table->next_number_field-> - val_int_offset(table->rec_buff_length)+1; + nr= ((ulonglong) table->next_number_field-> + val_int_offset(table->s->rec_buff_length)+1); index_end(); (void) extra(HA_EXTRA_NO_KEYREAD); return nr; } - /* Print error that we got from handler function */ + +/* + Print error that we got from handler function + + NOTE + In case of delete table it's only safe to use the following parts of + the 'table' structure: + table->s->path + table->alias +*/ void handler::print_error(int error, myf errflag) { @@ -1111,7 +1768,7 @@ void handler::print_error(int error, myf errflag) { /* Key is unknown */ str.copy("", 0, system_charset_info); - key_nr= -1; + key_nr= (uint) -1; } else { @@ -1120,17 +1777,17 @@ void handler::print_error(int error, myf errflag) if (str.length() >= max_length) { str.length(max_length-4); - str.append("..."); + str.append(STRING_WITH_LEN("...")); } } - my_error(ER_DUP_ENTRY,MYF(0),str.c_ptr(),key_nr+1); + my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(), key_nr+1); DBUG_VOID_RETURN; } textno=ER_DUP_KEY; break; } case HA_ERR_NULL_IN_SPATIAL: - textno= ER_UNKNOWN_ERROR; + my_error(ER_CANT_CREATE_GEOMETRY_OBJECT, MYF(0)); DBUG_VOID_RETURN; case HA_ERR_FOUND_DUPP_UNIQUE: textno=ER_DUP_UNIQUE; @@ -1141,15 +1798,21 @@ void handler::print_error(int error, myf errflag) case HA_ERR_CRASHED: textno=ER_NOT_KEYFILE; break; + case HA_ERR_WRONG_IN_RECORD: + textno= ER_CRASHED_ON_USAGE; + break; case HA_ERR_CRASHED_ON_USAGE: textno=ER_CRASHED_ON_USAGE; break; + case HA_ERR_NOT_A_TABLE: + textno= error; + break; case HA_ERR_CRASHED_ON_REPAIR: textno=ER_CRASHED_ON_REPAIR; break; case HA_ERR_OUT_OF_MEM: - my_error(ER_OUT_OF_RESOURCES,errflag); - DBUG_VOID_RETURN; + textno=ER_OUT_OF_RESOURCES; + break; case HA_ERR_WRONG_COMMAND: textno=ER_ILLEGAL_HA; break; @@ -1160,6 +1823,7 @@ void handler::print_error(int error, myf errflag) textno=ER_UNSUPPORTED_EXTENSION; break; case HA_ERR_RECORD_FILE_FULL: + case HA_ERR_INDEX_FILE_FULL: textno=ER_RECORD_FILE_FULL; break; case HA_ERR_LOCK_WAIT_TIMEOUT: @@ -1178,10 +1842,21 @@ void handler::print_error(int error, myf errflag) textno=ER_CANNOT_ADD_FOREIGN; break; case HA_ERR_ROW_IS_REFERENCED: - textno=ER_ROW_IS_REFERENCED; - break; + { + String str; + get_error_message(error, &str); + my_error(ER_ROW_IS_REFERENCED_2, MYF(0), str.c_ptr_safe()); + DBUG_VOID_RETURN; + } case HA_ERR_NO_REFERENCED_ROW: - textno=ER_NO_REFERENCED_ROW; + { + String str; + get_error_message(error, &str); + my_error(ER_NO_REFERENCED_ROW_2, MYF(0), str.c_ptr_safe()); + DBUG_VOID_RETURN; + } + case HA_ERR_TABLE_DEF_CHANGED: + textno=ER_TABLE_DEF_CHANGED; break; case HA_ERR_NO_SUCH_TABLE: { @@ -1192,12 +1867,24 @@ void handler::print_error(int error, myf errflag) */ char *db; char buff[FN_REFLEN]; - uint length=dirname_part(buff,table->path); + uint length= dirname_part(buff,table->s->path); buff[length-1]=0; db=buff+dirname_length(buff); - my_error(ER_NO_SUCH_TABLE,MYF(0),db,table->table_name); + my_error(ER_NO_SUCH_TABLE, MYF(0), db, table->alias); break; } + case HA_ERR_TABLE_NEEDS_UPGRADE: + textno=ER_TABLE_NEEDS_UPGRADE; + break; + case HA_ERR_TABLE_READONLY: + textno= ER_OPEN_AS_READONLY; + break; + case HA_ERR_AUTOINC_READ_FAILED: + textno= ER_AUTOINC_READ_FAILED; + break; + case HA_ERR_AUTOINC_ERANGE: + textno= ER_WARN_DATA_OUT_OF_RANGE; + break; default: { /* The error was "unknown" to this function. @@ -1209,27 +1896,27 @@ void handler::print_error(int error, myf errflag) { const char* engine= table_type(); if (temporary) - my_error(ER_GET_TEMPORARY_ERRMSG,MYF(0),error,str.ptr(),engine); + my_error(ER_GET_TEMPORARY_ERRMSG, MYF(0), error, str.ptr(), engine); else - my_error(ER_GET_ERRMSG,MYF(0),error,str.ptr(),engine); + my_error(ER_GET_ERRMSG, MYF(0), error, str.ptr(), engine); } - else + else my_error(ER_GET_ERRNO,errflag,error); DBUG_VOID_RETURN; } } - my_error(textno,errflag,table->table_name,error); + my_error(textno, errflag, table->alias, error); DBUG_VOID_RETURN; } -/* +/* Return an error message specific to this handler - + SYNOPSIS error error code previously returned by handler buf Pointer to String where to add error message - + Returns true if this is a temporary error */ @@ -1239,6 +1926,107 @@ bool handler::get_error_message(int error, String* buf) } +int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) +{ + KEY *keyinfo, *keyend; + KEY_PART_INFO *keypart, *keypartend; + + if (!table->s->mysql_version) + { + /* check for blob-in-key error */ + keyinfo= table->key_info; + keyend= table->key_info + table->s->keys; + for (; keyinfo < keyend; keyinfo++) + { + keypart= keyinfo->key_part; + keypartend= keypart + keyinfo->key_parts; + for (; keypart < keypartend; keypart++) + { + if (!keypart->fieldnr) + continue; + Field *field= table->field[keypart->fieldnr-1]; + if (field->type() == FIELD_TYPE_BLOB) + { + if (check_opt->sql_flags & TT_FOR_UPGRADE) + check_opt->flags= T_MEDIUM; + return HA_ADMIN_NEEDS_CHECK; + } + } + } + } + return check_for_upgrade(check_opt); +} + + +int handler::check_old_types() +{ + Field** field; + + if (!table->s->mysql_version) + { + /* check for bad DECIMAL field */ + for (field= table->field; (*field); field++) + { + if ((*field)->type() == FIELD_TYPE_NEWDECIMAL) + { + return HA_ADMIN_NEEDS_ALTER; + } + if ((*field)->type() == MYSQL_TYPE_VAR_STRING) + { + return HA_ADMIN_NEEDS_ALTER; + } + } + } + return 0; +} + + +static bool update_frm_version(TABLE *table, bool needs_lock) +{ + char path[FN_REFLEN]; + File file; + int result= 1; + DBUG_ENTER("update_frm_version"); + + if (table->s->mysql_version != MYSQL_VERSION_ID) + DBUG_RETURN(0); + + strxnmov(path, sizeof(path)-1, mysql_data_home, "/", table->s->db, "/", + table->s->table_name, reg_ext, NullS); + if (!unpack_filename(path, path)) + DBUG_RETURN(1); + + if (needs_lock) + pthread_mutex_lock(&LOCK_open); + + if ((file= my_open(path, O_RDWR|O_BINARY, MYF(MY_WME))) >= 0) + { + uchar version[4]; + char *key= table->s->table_cache_key; + uint key_length= table->s->key_length; + TABLE *entry; + HASH_SEARCH_STATE state; + + int4store(version, MYSQL_VERSION_ID); + + if ((result= my_pwrite(file,(byte*) version,4,51L,MYF_RW))) + goto err; + + for (entry=(TABLE*) hash_first(&open_cache,(byte*) key,key_length, &state); + entry; + entry= (TABLE*) hash_next(&open_cache,(byte*) key,key_length, &state)) + entry->s->mysql_version= MYSQL_VERSION_ID; + } +err: + if (file >= 0) + VOID(my_close(file,MYF(MY_WME))); + if (needs_lock) + pthread_mutex_unlock(&LOCK_open); + DBUG_RETURN(result); +} + + + /* Return key if error because of duplicated keys */ uint handler::get_dup_key(int error) @@ -1252,16 +2040,40 @@ uint handler::get_dup_key(int error) } +/* + Delete all files with extension from bas_ext() + + SYNOPSIS + delete_table() + name Base name of table + + NOTES + We assume that the handler may return more extensions than + was actually used for the file. + + RETURN + 0 If we successfully deleted at least one file from base_ext and + didn't get any other errors than ENOENT + # Error +*/ + int handler::delete_table(const char *name) { - int error=0; + int error= 0; + int enoent_or_zero= ENOENT; // Error if no file was deleted + char buff[FN_REFLEN]; + for (const char **ext=bas_ext(); *ext ; ext++) { - if (delete_file(name,*ext,2)) + fn_format(buff, name, "", *ext, 2 | 4); + if (my_delete_with_symlink(buff, MYF(0))) { - if ((error=errno) != ENOENT) + if ((error= my_errno) != ENOENT) break; } + else + enoent_or_zero= 0; // No error for ENOENT + error= enoent_or_zero; } return error; } @@ -1282,8 +2094,64 @@ int handler::rename_table(const char * from, const char * to) return error; } + +/* + Performs checks upon the table. + + SYNOPSIS + check() + thd thread doing CHECK TABLE operation + check_opt options from the parser + + NOTES + + RETURN + HA_ADMIN_OK Successful upgrade + HA_ADMIN_NEEDS_UPGRADE Table has structures requiring upgrade + HA_ADMIN_NEEDS_ALTER Table has structures requiring ALTER TABLE + HA_ADMIN_NOT_IMPLEMENTED +*/ + +int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt) +{ + int error; + + if ((table->s->mysql_version >= MYSQL_VERSION_ID) && + (check_opt->sql_flags & TT_FOR_UPGRADE)) + return 0; + + if (table->s->mysql_version < MYSQL_VERSION_ID) + { + if ((error= check_old_types())) + return error; + error= ha_check_for_upgrade(check_opt); + if (error && (error != HA_ADMIN_NEEDS_CHECK)) + return error; + if (!error && (check_opt->sql_flags & TT_FOR_UPGRADE)) + return 0; + } + if ((error= check(thd, check_opt))) + return error; + return update_frm_version(table, 0); +} + + +int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt) +{ + int result; + if ((result= repair(thd, check_opt))) + return result; + return update_frm_version(table, 0); +} + + /* - Tell the handler to turn on or off transaction in the handler + Tell the storage engine that it is allowed to "disable transaction" in the + handler. It is a hint that ACID is not required - it is used in NDB for + ALTER TABLE, for example, when data are copied to temporary table. + A storage engine may treat this hint any way it likes. NDB for example + starts to commit every now and then automatically. + This hint can be safely ignored. */ int ha_enable_transaction(THD *thd, bool on) @@ -1292,6 +2160,17 @@ int ha_enable_transaction(THD *thd, bool on) DBUG_ENTER("ha_enable_transaction"); thd->transaction.on= on; + if (on) + { + /* + Now all storage engines should have transaction handling enabled. + But some may have it enabled all the time - "disabling" transactions + is an optimization hint that storage engine is free to ignore. + So, let's commit an open transaction (if any) now. + */ + if (!(error= ha_commit_stmt(thd))) + error= end_trans(thd, COMMIT); + } DBUG_RETURN(error); } @@ -1327,7 +2206,7 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, char name_buff[FN_REFLEN]; DBUG_ENTER("ha_create_table"); - if (openfrm(name,"",0,(uint) READ_ALL, 0, &table)) + if (openfrm(current_thd, name,"",0,(uint) READ_ALL, 0, &table)) DBUG_RETURN(1); if (update_create_info) { @@ -1345,7 +2224,7 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, error=table.file->create(name,&table,create_info); VOID(closefrm(&table)); if (error) - my_error(ER_CANT_CREATE_TABLE,MYF(ME_BELL+ME_WAITTANG),name,error); + my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name,error); DBUG_RETURN(error != 0); } @@ -1392,7 +2271,7 @@ int ha_create_table_from_engine(THD* thd, if (error) DBUG_RETURN(2); - if (openfrm(path,"",0,(uint) READ_ALL, 0, &table)) + if (openfrm(thd, path,"",0,(uint) READ_ALL, 0, &table)) DBUG_RETURN(3); update_create_info_from_table(&create_info, &table); @@ -1410,13 +2289,6 @@ int ha_create_table_from_engine(THD* thd, DBUG_RETURN(error != 0); } -static int NEAR_F delete_file(const char *name,const char *ext,int extflag) -{ - char buff[FN_REFLEN]; - VOID(fn_format(buff,name,"",ext,extflag | 4)); - return(my_delete_with_symlink(buff,MYF(MY_WME))); -} - void st_ha_check_opt::init() { flags= sql_flags= 0; @@ -1535,14 +2407,14 @@ int ha_discover(THD *thd, const char *db, const char *name, error= ndbcluster_discover(thd, db, name, frmblob, frmlen); #endif if (!error) - statistic_increment(ha_discover_count,&LOCK_status); + statistic_increment(thd->status_var.ha_discover_count,&LOCK_status); DBUG_RETURN(error); } /* - Call this function in order to give the handler the possiblity - to ask engine if there are any new tables that should be written to disk + Call this function in order to give the handler the possiblity + to ask engine if there are any new tables that should be written to disk or any dropped tables that need to be removed from disk */ @@ -1552,7 +2424,7 @@ ha_find_files(THD *thd,const char *db,const char *path, { int error= 0; DBUG_ENTER("ha_find_files"); - DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d", + DBUG_PRINT("enter", ("db: %s, path: %s, wild: %s, dir: %d", db, path, wild, dir)); #ifdef HAVE_NDBCLUSTER_DB if (have_ndbcluster == SHOW_OPTION_YES) @@ -1586,6 +2458,131 @@ int ha_table_exists_in_engine(THD* thd, const char* db, const char* name) /* + Read the first row of a multi-range set. + + SYNOPSIS + read_multi_range_first() + found_range_p Returns a pointer to the element in 'ranges' that + corresponds to the returned row. + ranges An array of KEY_MULTI_RANGE range descriptions. + range_count Number of ranges in 'ranges'. + sorted If result should be sorted per key. + buffer A HANDLER_BUFFER for internal handler usage. + + NOTES + Record is read into table->record[0]. + *found_range_p returns a valid value only if read_multi_range_first() + returns 0. + Sorting is done within each range. If you want an overall sort, enter + 'ranges' with sorted ranges. + + RETURN + 0 OK, found a row + HA_ERR_END_OF_FILE No rows in range + # Error code +*/ + +int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE *ranges, uint range_count, + bool sorted, HANDLER_BUFFER *buffer) +{ + int result= HA_ERR_END_OF_FILE; + DBUG_ENTER("handler::read_multi_range_first"); + multi_range_sorted= sorted; + multi_range_buffer= buffer; + + for (multi_range_curr= ranges, multi_range_end= ranges + range_count; + multi_range_curr < multi_range_end; + multi_range_curr++) + { + result= read_range_first(multi_range_curr->start_key.length ? + &multi_range_curr->start_key : 0, + multi_range_curr->end_key.length ? + &multi_range_curr->end_key : 0, + test(multi_range_curr->range_flag & EQ_RANGE), + multi_range_sorted); + if (result != HA_ERR_END_OF_FILE) + break; + } + + *found_range_p= multi_range_curr; + DBUG_PRINT("exit",("result %d", result)); + DBUG_RETURN(result); +} + + +/* + Read the next row of a multi-range set. + + SYNOPSIS + read_multi_range_next() + found_range_p Returns a pointer to the element in 'ranges' that + corresponds to the returned row. + + NOTES + Record is read into table->record[0]. + *found_range_p returns a valid value only if read_multi_range_next() + returns 0. + + RETURN + 0 OK, found a row + HA_ERR_END_OF_FILE No (more) rows in range + # Error code +*/ + +int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p) +{ + int result; + DBUG_ENTER("handler::read_multi_range_next"); + + /* We should not be called after the last call returned EOF. */ + DBUG_ASSERT(multi_range_curr < multi_range_end); + + do + { + /* Save a call if there can be only one row in range. */ + if (multi_range_curr->range_flag != (UNIQUE_RANGE | EQ_RANGE)) + { + result= read_range_next(); + + /* On success or non-EOF errors jump to the end. */ + if (result != HA_ERR_END_OF_FILE) + break; + } + else + { + /* + We need to set this for the last range only, but checking this + condition is more expensive than just setting the result code. + */ + result= HA_ERR_END_OF_FILE; + } + + /* Try the next range(s) until one matches a record. */ + for (multi_range_curr++; + multi_range_curr < multi_range_end; + multi_range_curr++) + { + result= read_range_first(multi_range_curr->start_key.length ? + &multi_range_curr->start_key : 0, + multi_range_curr->end_key.length ? + &multi_range_curr->end_key : 0, + test(multi_range_curr->range_flag & EQ_RANGE), + multi_range_sorted); + if (result != HA_ERR_END_OF_FILE) + break; + } + } + while ((result == HA_ERR_END_OF_FILE) && + (multi_range_curr < multi_range_end)); + + *found_range_p= multi_range_curr; + DBUG_PRINT("exit",("handler::read_multi_range_next: result %d", result)); + DBUG_RETURN(result); +} + + +/* Read first row between two ranges. Store ranges for future calls to read_range_next @@ -1593,7 +2590,7 @@ int ha_table_exists_in_engine(THD* thd, const char* db, const char* name) read_range_first() start_key Start key. Is 0 if no min range end_key End key. Is 0 if no max range - eq_range_arg Set to 1 if start_key == end_key + eq_range_arg Set to 1 if start_key == end_key sorted Set to 1 if result should be sorted per key NOTES @@ -1631,7 +2628,7 @@ int handler::read_range_first(const key_range *start_key, start_key->length, start_key->flag); if (result) - DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) + DBUG_RETURN((result == HA_ERR_KEY_NOT_FOUND) ? HA_ERR_END_OF_FILE : result); @@ -1679,7 +2676,7 @@ int handler::read_range_next() SYNOPSIS compare_key range range to compare to row. May be 0 for no range - + NOTES See key.cc::key_cmp() for details @@ -1719,7 +2716,7 @@ int handler::index_read_idx(byte * buf, uint index, const byte * key, SYNOPSIS ha_known_exts() - + NOTES No mutexes, worst case race is a minor surplus memory allocation We have to recreate the extension map if mysqld is restarted (for example @@ -1731,20 +2728,23 @@ int handler::index_read_idx(byte * buf, uint index, const byte * key, TYPELIB *ha_known_exts(void) { + MEM_ROOT *mem_root= current_thd->mem_root; if (!known_extensions.type_names || mysys_usage_id != known_extensions_id) { - show_table_type_st *types; + handlerton **types; List<char> found_exts; List_iterator_fast<char> it(found_exts); const char **ext, *old_ext; known_extensions_id= mysys_usage_id; - found_exts.push_back((char*) ".db"); - for (types= sys_table_types; types->type; types++) - { - if (*types->value == SHOW_OPTION_YES) + found_exts.push_back((char*) triggers_file_ext); + found_exts.push_back((char*) trigname_file_ext); + for (types= sys_table_types; *types; types++) + { + if ((*types)->state == SHOW_OPTION_YES) { - handler *file= get_new_handler(0,(enum db_type) types->db_type); + handler *file= get_new_handler(0, mem_root, + (enum db_type) (*types)->db_type); for (ext= file->bas_ext(); *ext; ext++) { while ((old_ext= it++)) @@ -1763,8 +2763,8 @@ TYPELIB *ha_known_exts(void) ext= (const char **) my_once_alloc(sizeof(char *)* (found_exts.elements+1), MYF(MY_WME | MY_FAE)); - - DBUG_ASSERT(ext); + + DBUG_ASSERT(ext != 0); known_extensions.count= found_exts.elements; known_extensions.type_names= ext; diff --git a/sql/handler.h b/sql/handler.h index 75ff3c2764e..9e381ca4482 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000,2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -44,16 +43,30 @@ #define HA_ADMIN_INVALID -5 #define HA_ADMIN_REJECT -6 #define HA_ADMIN_TRY_ALTER -7 +#define HA_ADMIN_WRONG_CHECKSUM -8 +#define HA_ADMIN_NOT_BASE_TABLE -9 +#define HA_ADMIN_NEEDS_UPGRADE -10 +#define HA_ADMIN_NEEDS_ALTER -11 +#define HA_ADMIN_NEEDS_CHECK -12 /* Bits in table_flags() to show what database can do */ -#define HA_READ_RND_SAME (1 << 0) /* can switch index during the scan - with ::rnd_same() - not used yet. - see mi_rsame/heap_rsame/myrg_rsame */ + +/* + Can switch index during the scan with ::rnd_same() - not used yet. + see mi_rsame/heap_rsame/myrg_rsame +*/ +#define HA_READ_RND_SAME (1 << 0) +#define HA_PARTIAL_COLUMN_READ (1 << 1) /* read may not return all columns */ #define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */ #define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber; It returns a position to ha_r_rnd */ #define HA_CAN_GEOMETRY (1 << 4) -#define HA_FAST_KEY_READ (1 << 5) /* no need for a record cache in filesort */ +/* + Reading keys in random order is as fast as reading keys in sort order + (Used in records.cc to decide if we should use a record cache and by + filesort to decide if we should sort key + data or key + pointer-to-row +*/ +#define HA_FAST_KEY_READ (1 << 5) #define HA_NULL_IN_KEY (1 << 7) /* One can have keys with NULL */ #define HA_DUPP_POS (1 << 8) /* ha_position() gives dup row */ #define HA_NO_BLOBS (1 << 9) /* Doesn't support blobs */ @@ -61,10 +74,13 @@ #define HA_AUTO_PART_KEY (1 << 11) /* auto-increment in multi-part key */ #define HA_REQUIRE_PRIMARY_KEY (1 << 12) /* .. and can't create a hidden one */ #define HA_NOT_EXACT_COUNT (1 << 13) -#define HA_CAN_INSERT_DELAYED (1 << 14) /* only handlers with table-level locks - need no special code to support - INSERT DELAYED */ +/* + INSERT_DELAYED only works with handlers that uses MySQL internal table + level locks +*/ +#define HA_CAN_INSERT_DELAYED (1 << 14) #define HA_PRIMARY_KEY_IN_READ_INDEX (1 << 15) +#define HA_CAN_RTREEKEYS (1 << 17) #define HA_NOT_DELETE_WITH_CACHE (1 << 18) #define HA_NO_PREFIX_CHAR_KEYS (1 << 20) #define HA_CAN_FULLTEXT (1 << 21) @@ -73,6 +89,9 @@ #define HA_HAS_CHECKSUM (1 << 24) /* Table data are stored in separate files (for lower_case_table_names) */ #define HA_FILE_BASED (1 << 26) +#define HA_NO_VARCHAR (1 << 27) +#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */ +#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */ #define HA_ANY_INDEX_MAY_BE_UNIQUE (1 << 30) @@ -84,12 +103,26 @@ #define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */ #define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */ +/* + Index scan will not return records in rowid order. Not guaranteed to be + set for unordered (e.g. HASH) indexes. +*/ +#define HA_KEY_SCAN_NOT_ROR 128 + + /* operations for disable/enable indexes */ #define HA_KEY_SWITCH_NONUNIQ 0 #define HA_KEY_SWITCH_ALL 1 #define HA_KEY_SWITCH_NONUNIQ_SAVE 2 #define HA_KEY_SWITCH_ALL_SAVE 3 +/* + Note: the following includes binlog and closing 0. + so: innodb + bdb + ndb + binlog + myisam + myisammrg + archive + + example + csv + heap + blackhole + federated + 0 + (yes, the sum is deliberately inaccurate) +*/ +#define MAX_HA 14 /* Bits in index_ddl_flags(KEY *wanted_index) @@ -142,13 +175,13 @@ /* Options of START TRANSACTION statement (and later of SET TRANSACTION stmt) */ #define MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT 1 -enum db_type -{ +enum db_type +{ DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1, DB_TYPE_HASH,DB_TYPE_MISAM,DB_TYPE_PISAM, DB_TYPE_RMS_ISAM, DB_TYPE_HEAP, DB_TYPE_ISAM, DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM, - DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, + DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER, DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB, DB_TYPE_FEDERATED_DB, @@ -156,35 +189,235 @@ enum db_type DB_TYPE_DEFAULT // Must be last }; -struct show_table_type_st { - const char *type; - SHOW_COMP_OPTION *value; - const char *comment; - enum db_type db_type; -}; - enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, - ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED}; + ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED, + ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT }; /* struct to hold information about the table that should be created */ /* Bits in used_fields */ -#define HA_CREATE_USED_AUTO 1 -#define HA_CREATE_USED_RAID 2 -#define HA_CREATE_USED_UNION 4 -#define HA_CREATE_USED_INSERT_METHOD 8 -#define HA_CREATE_USED_MIN_ROWS 16 -#define HA_CREATE_USED_MAX_ROWS 32 -#define HA_CREATE_USED_AVG_ROW_LENGTH 64 -#define HA_CREATE_USED_PACK_KEYS 128 -#define HA_CREATE_USED_CHARSET 256 -#define HA_CREATE_USED_DEFAULT_CHARSET 512 - -typedef struct st_thd_trans { - void *bdb_tid; - void *innobase_tid; - bool innodb_active_trans; - void *ndb_tid; +#define HA_CREATE_USED_AUTO (1L << 0) +#define HA_CREATE_USED_RAID (1L << 1) +#define HA_CREATE_USED_UNION (1L << 2) +#define HA_CREATE_USED_INSERT_METHOD (1L << 3) +#define HA_CREATE_USED_MIN_ROWS (1L << 4) +#define HA_CREATE_USED_MAX_ROWS (1L << 5) +#define HA_CREATE_USED_AVG_ROW_LENGTH (1L << 6) +#define HA_CREATE_USED_PACK_KEYS (1L << 7) +#define HA_CREATE_USED_CHARSET (1L << 8) +#define HA_CREATE_USED_DEFAULT_CHARSET (1L << 9) +#define HA_CREATE_USED_DATADIR (1L << 10) +#define HA_CREATE_USED_INDEXDIR (1L << 11) +#define HA_CREATE_USED_ENGINE (1L << 12) +#define HA_CREATE_USED_CHECKSUM (1L << 13) +#define HA_CREATE_USED_DELAY_KEY_WRITE (1L << 14) +#define HA_CREATE_USED_ROW_FORMAT (1L << 15) +#define HA_CREATE_USED_COMMENT (1L << 16) +#define HA_CREATE_USED_PASSWORD (1L << 17) +#define HA_CREATE_USED_CONNECTION (1L << 18) + +typedef ulonglong my_xid; // this line is the same as in log_event.h +#define MYSQL_XID_PREFIX "MySQLXid" +#define MYSQL_XID_PREFIX_LEN 8 // must be a multiple of 8 +#define MYSQL_XID_OFFSET (MYSQL_XID_PREFIX_LEN+sizeof(server_id)) +#define MYSQL_XID_GTRID_LEN (MYSQL_XID_OFFSET+sizeof(my_xid)) + +#define XIDDATASIZE 128 +#define MAXGTRIDSIZE 64 +#define MAXBQUALSIZE 64 + +struct xid_t { + long formatID; + long gtrid_length; + long bqual_length; + char data[XIDDATASIZE]; // not \0-terminated ! + + xid_t() {} /* Remove gcc warning */ + bool eq(struct xid_t *xid) + { return eq(xid->gtrid_length, xid->bqual_length, xid->data); } + bool eq(long g, long b, const char *d) + { return g == gtrid_length && b == bqual_length && !memcmp(d, data, g+b); } + void set(struct xid_t *xid) + { memcpy(this, xid, xid->length()); } + void set(long f, const char *g, long gl, const char *b, long bl) + { + formatID= f; + memcpy(data, g, gtrid_length= gl); + memcpy(data+gl, b, bqual_length= bl); + } + void set(ulonglong xid) + { + my_xid tmp; + formatID= 1; + set(MYSQL_XID_PREFIX_LEN, 0, MYSQL_XID_PREFIX); + memcpy(data+MYSQL_XID_PREFIX_LEN, &server_id, sizeof(server_id)); + tmp= xid; + memcpy(data+MYSQL_XID_OFFSET, &tmp, sizeof(tmp)); + gtrid_length=MYSQL_XID_GTRID_LEN; + } + void set(long g, long b, const char *d) + { + formatID= 1; + gtrid_length= g; + bqual_length= b; + memcpy(data, d, g+b); + } + bool is_null() { return formatID == -1; } + void null() { formatID= -1; } + my_xid quick_get_my_xid() + { + my_xid tmp; + memcpy(&tmp, data+MYSQL_XID_OFFSET, sizeof(tmp)); + return tmp; + } + my_xid get_my_xid() + { + return gtrid_length == MYSQL_XID_GTRID_LEN && bqual_length == 0 && + !memcmp(data+MYSQL_XID_PREFIX_LEN, &server_id, sizeof(server_id)) && + !memcmp(data, MYSQL_XID_PREFIX, MYSQL_XID_PREFIX_LEN) ? + quick_get_my_xid() : 0; + } + uint length() + { + return sizeof(formatID)+sizeof(gtrid_length)+sizeof(bqual_length)+ + gtrid_length+bqual_length; + } + byte *key() + { + return (byte *)>rid_length; + } + uint key_length() + { + return sizeof(gtrid_length)+sizeof(bqual_length)+gtrid_length+bqual_length; + } +}; +typedef struct xid_t XID; + +/* for recover() handlerton call */ +#define MIN_XID_LIST_SIZE 128 +#ifdef SAFEMALLOC +#define MAX_XID_LIST_SIZE 256 +#else +#define MAX_XID_LIST_SIZE (1024*128) +#endif + +/* + handlerton is a singleton structure - one instance per storage engine - + to provide access to storage engine functionality that works on the + "global" level (unlike handler class that works on a per-table basis) + + usually handlerton instance is defined statically in ha_xxx.cc as + + static handlerton { ... } xxx_hton; + + savepoint_*, prepare, recover, and *_by_xid pointers can be 0. +*/ +typedef struct +{ + /* + storage engine name as it should be printed to a user + */ + const char *name; + + /* + Historical marker for if the engine is available of not + */ + SHOW_COMP_OPTION state; + + /* + A comment used by SHOW to describe an engine. + */ + const char *comment; + + /* + Historical number used for frm file to determine the correct storage engine. + This is going away and new engines will just use "name" for this. + */ + enum db_type db_type; + /* + Method that initizlizes a storage engine + */ + bool (*init)(); + + /* + each storage engine has it's own memory area (actually a pointer) + in the thd, for storing per-connection information. + It is accessed as + + thd->ha_data[xxx_hton.slot] + + slot number is initialized by MySQL after xxx_init() is called. + */ + uint slot; + /* + to store per-savepoint data storage engine is provided with an area + of a requested size (0 is ok here). + savepoint_offset must be initialized statically to the size of + the needed memory to store per-savepoint information. + After xxx_init it is changed to be an offset to savepoint storage + area and need not be used by storage engine. + see binlog_hton and binlog_savepoint_set/rollback for an example. + */ + uint savepoint_offset; + /* + handlerton methods: + + close_connection is only called if + thd->ha_data[xxx_hton.slot] is non-zero, so even if you don't need + this storage area - set it to something, so that MySQL would know + this storage engine was accessed in this connection + */ + int (*close_connection)(THD *thd); + /* + sv points to an uninitialized storage area of requested size + (see savepoint_offset description) + */ + int (*savepoint_set)(THD *thd, void *sv); + /* + sv points to a storage area, that was earlier passed + to the savepoint_set call + */ + int (*savepoint_rollback)(THD *thd, void *sv); + int (*savepoint_release)(THD *thd, void *sv); + /* + 'all' is true if it's a real commit, that makes persistent changes + 'all' is false if it's not in fact a commit but an end of the + statement that is part of the transaction. + NOTE 'all' is also false in auto-commit mode where 'end of statement' + and 'real commit' mean the same event. + */ + int (*commit)(THD *thd, bool all); + int (*rollback)(THD *thd, bool all); + int (*prepare)(THD *thd, bool all); + int (*recover)(XID *xid_list, uint len); + int (*commit_by_xid)(XID *xid); + int (*rollback_by_xid)(XID *xid); + void *(*create_cursor_read_view)(); + void (*set_cursor_read_view)(void *); + void (*close_cursor_read_view)(void *); + uint32 flags; /* global handler flags */ +} handlerton; + +struct show_table_alias_st { + const char *alias; + const char *type; +}; + +/* Possible flags of a handlerton */ +#define HTON_NO_FLAGS 0 +#define HTON_CLOSE_CURSORS_AT_COMMIT (1 << 0) +#define HTON_ALTER_NOT_SUPPORTED (1 << 1) //Engine does not support alter +#define HTON_CAN_RECREATE (1 << 2) //Delete all is used fro truncate +#define HTON_HIDDEN (1 << 3) //Engine does not appear in lists + +typedef struct st_thd_trans +{ + /* number of entries in the ht[] */ + uint nht; + /* true is not all entries in the ht[] support 2pc */ + bool no_2pc; + /* storage engines that registered themselves for this transaction */ + handlerton *ht[MAX_HA]; } THD_TRANS; enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED, @@ -193,7 +426,9 @@ enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED, typedef struct st_ha_create_information { CHARSET_INFO *table_charset, *default_table_charset; - const char *comment,*password; + LEX_STRING connect_string; + LEX_STRING comment; + const char *password; const char *data_file_name, *index_file_name; const char *alias; ulonglong max_rows,min_rows; @@ -209,7 +444,10 @@ typedef struct st_ha_create_information uint options; /* OR of HA_CREATE_ options */ uint raid_type,raid_chunks; uint merge_insert_method; + uint extra_size; /* length of extra data segment */ bool table_existed; /* 1 in create if table existed */ + bool frm_only; /* 1 if no ha_create_table() */ + bool varchar; /* 1 if table has a VARCHAR */ } HA_CREATE_INFO; @@ -217,9 +455,18 @@ typedef struct st_ha_create_information struct st_table; typedef struct st_table TABLE; +struct st_foreign_key_info; +typedef struct st_foreign_key_info FOREIGN_KEY_INFO; + +typedef struct st_savepoint SAVEPOINT; +extern ulong savepoint_alloc_size; + +/* Forward declaration for condition pushdown to storage engine */ +typedef class Item COND; typedef struct st_ha_check_opt { + st_ha_check_opt() {} /* Remove gcc warning */ ulong sort_buffer_size; uint flags; /* isam layer flags (e.g. for myisamchk) */ uint sql_flags; /* sql layer flags - for something myisamchk cannot do */ @@ -228,6 +475,21 @@ typedef struct st_ha_check_opt } HA_CHECK_OPT; +/* + This is a buffer area that the handler can use to store rows. + 'end_of_used_area' should be kept updated after calls to + read-functions so that other parts of the code can use the + remaining area (until next read calls is issued). +*/ + +typedef struct st_handler_buffer +{ + const byte *buffer; /* Buffer one can start using */ + const byte *buffer_end; /* End of buffer */ + byte *end_of_used_area; /* End of area that was used by handler */ +} HANDLER_BUFFER; + + class handler :public Sql_alloc { protected: @@ -246,6 +508,7 @@ class handler :public Sql_alloc virtual int rnd_end() { return 0; } public: + const handlerton *ht; /* storage engine of this handler */ byte *ref; /* Pointer to current row */ byte *dupp_ref; /* Pointer to dupp row */ ulonglong data_file_length; /* Length off data file */ @@ -262,6 +525,12 @@ public: time_t check_time; time_t update_time; + /* The following are for read_multi_range */ + bool multi_range_sorted; + KEY_MULTI_RANGE *multi_range_curr; + KEY_MULTI_RANGE *multi_range_end; + HANDLER_BUFFER *multi_range_buffer; + /* The following are for read_range() */ key_range save_end_range, *end_range; KEY_PART_INFO *range_key_part; @@ -279,20 +548,24 @@ public: enum {NONE=0, INDEX, RND} inited; bool auto_increment_column_changed; bool implicit_emptied; /* Can be !=0 only if HEAP */ + const COND *pushed_cond; - - handler(TABLE *table_arg) :table(table_arg), + handler(const handlerton *ht_arg, TABLE *table_arg) :table(table_arg), + ht(ht_arg), ref(0), data_file_length(0), max_data_file_length(0), index_file_length(0), delete_length(0), auto_increment_value(0), records(0), deleted(0), mean_rec_length(0), create_time(0), check_time(0), update_time(0), key_used_on_scan(MAX_KEY), active_index(MAX_KEY), ref_length(sizeof(my_off_t)), block_size(0), - raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0) + raid_type(0), ft_handler(0), inited(NONE), implicit_emptied(0), + pushed_cond(NULL) {} virtual ~handler(void) { /* TODO: DBUG_ASSERT(inited == NONE); */ } + virtual handler *clone(MEM_ROOT *mem_root); int ha_open(const char *name, int mode, int test_if_locked); - void update_auto_increment(); + void adjust_next_insert_id_after_explicit_value(ulonglong nr); + int update_auto_increment(); virtual void print_error(int error, myf errflag); virtual bool get_error_message(int error, String *buf); uint get_dup_key(int error); @@ -304,7 +577,7 @@ public: virtual const key_map *keys_to_use_for_scanning() { return &key_map_empty; } virtual bool has_transactions(){ return 0;} virtual uint extra_rec_buf_length() { return 0; } - + /* Return upper bound of current number of records in the table (max. of how many records one will retrieve when doing a full table scan) @@ -314,33 +587,43 @@ public: virtual ha_rows estimate_rows_upper_bound() { return records+EXTRA_RECORDS; } + /* + Get the row type from the storage engine. If this method returns + ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used. + */ + virtual enum row_type get_row_type() const { return ROW_TYPE_NOT_USED; } + virtual const char *index_type(uint key_number) { DBUG_ASSERT(0); return "";} int ha_index_init(uint idx) { + DBUG_ENTER("ha_index_init"); DBUG_ASSERT(inited==NONE); inited=INDEX; - return index_init(idx); + DBUG_RETURN(index_init(idx)); } int ha_index_end() { + DBUG_ENTER("ha_index_end"); DBUG_ASSERT(inited==INDEX); inited=NONE; - return index_end(); + DBUG_RETURN(index_end()); } int ha_rnd_init(bool scan) { + DBUG_ENTER("ha_rnd_init"); DBUG_ASSERT(inited==NONE || (inited==RND && scan)); inited=RND; - return rnd_init(scan); + DBUG_RETURN(rnd_init(scan)); } int ha_rnd_end() { + DBUG_ENTER("ha_rnd_end"); DBUG_ASSERT(inited==RND); inited=NONE; - return rnd_end(); + DBUG_RETURN(rnd_end()); } - /* this is neseccary in many places, e.g. in HANDLER command */ + /* this is necessary in many places, e.g. in HANDLER command */ int ha_index_or_rnd_end() { return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0; @@ -369,6 +652,10 @@ public: virtual int index_next_same(byte *buf, const byte *key, uint keylen); virtual int index_read_last(byte * buf, const byte * key, uint key_len) { return (my_errno=HA_ERR_WRONG_COMMAND); } + virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE *ranges, uint range_count, + bool sorted, HANDLER_BUFFER *buffer); + virtual int read_multi_range_next(KEY_MULTI_RANGE **found_range_p); virtual int read_range_first(const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted); @@ -394,15 +681,15 @@ public: key_range *max_key) { return (ha_rows) 10; } virtual void position(const byte *record)=0; - virtual int info(uint)=0; + virtual int info(uint)=0; // see my_base.h for full description virtual int extra(enum ha_extra_function operation) { return 0; } virtual int extra_opt(enum ha_extra_function operation, ulong cache_size) { return extra(operation); } virtual int reset() { return extra(HA_EXTRA_RESET); } - virtual int external_lock(THD *thd, int lock_type)=0; + virtual int external_lock(THD *thd, int lock_type) { return 0; } virtual void unlock_row() {} - virtual int start_stmt(THD *thd) {return 0;} + virtual int start_stmt(THD *thd, thr_lock_type lock_type) {return 0;} /* This is called to delete all rows in a table If the handler don't support this, then this function will @@ -411,12 +698,39 @@ public: */ virtual int delete_all_rows() { return (my_errno=HA_ERR_WRONG_COMMAND); } - virtual longlong get_auto_increment(); + virtual ulonglong get_auto_increment(); + virtual void restore_auto_increment(); + + /* + Reset the auto-increment counter to the given value, i.e. the next row + inserted will get the given value. This is called e.g. after TRUNCATE + is emulated by doing a 'DELETE FROM t'. HA_ERR_WRONG_COMMAND is + returned by storage engines that don't support this operation. + */ + virtual int reset_auto_increment(ulonglong value) + { return HA_ERR_WRONG_COMMAND; } + virtual void update_create_info(HA_CREATE_INFO *create_info) {} +protected: + /* to be implemented in handlers */ /* admin commands - called from mysql_admin_table */ virtual int check(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } + + /* + in these two methods check_opt can be modified + to specify CHECK option to use to call check() + upon the table + */ + virtual int check_for_upgrade(HA_CHECK_OPT *check_opt) + { return 0; } +public: + int ha_check_for_upgrade(HA_CHECK_OPT *check_opt); + int check_old_types(); + /* to be actually called to get 'check()' functionality*/ + int ha_check(THD *thd, HA_CHECK_OPT *check_opt); + virtual int backup(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } /* @@ -425,8 +739,11 @@ public: */ virtual int restore(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } +protected: virtual int repair(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } +public: + int ha_repair(THD* thd, HA_CHECK_OPT* check_opt); virtual int optimize(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt) @@ -455,6 +772,8 @@ public: /* used in ALTER TABLE; 1 if changing storage engine is allowed */ virtual bool can_switch_engines() { return 1; } /* used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */ + virtual int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) + { return 0; } virtual uint referenced_by_foreign_key() { return 0;} virtual void init_table_handle_for_HANDLER() { return; } /* prepare InnoDB for HANDLER */ @@ -500,7 +819,7 @@ public: */ virtual int rename_table(const char *from, const char *to); virtual int delete_table(const char *name); - + virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0; /* lock_count() can be more than one if the table is a MERGE */ @@ -511,70 +830,142 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* - Is query with this table cachable (have sense only for ASKTRANSACT - tables) - */ + /* ask handler about permission to cache table when query is to be cached */ + virtual my_bool register_query_cache_table(THD *thd, char *table_key, + uint key_length, + qc_engine_callback + *engine_callback, + ulonglong *engine_data) + { + *engine_callback= 0; + return 1; + } + /* + RETURN + true Primary key (if there is one) is clustered key covering all fields + false otherwise + */ + virtual bool primary_key_is_clustered() { return FALSE; } + + virtual int cmp_ref(const byte *ref1, const byte *ref2) + { + return memcmp(ref1, ref2, ref_length); + } + + /* + Condition pushdown to storage engines + */ + + /* + Push condition down to the table handler. + SYNOPSIS + cond_push() + cond Condition to be pushed. The condition tree must not be + modified by the by the caller. + RETURN + The 'remainder' condition that caller must use to filter out records. + NULL means the handler will not return rows that do not match the + passed condition. + NOTES + The pushed conditions form a stack (from which one can remove the + last pushed condition using cond_pop). + The table handler filters out rows using (pushed_cond1 AND pushed_cond2 + AND ... AND pushed_condN) + or less restrictive condition, depending on handler's capabilities. + + handler->extra(HA_EXTRA_RESET) call empties the condition stack. + Calls to rnd_init/rnd_end, index_init/index_end etc do not affect the + condition stack. + */ + virtual const COND *cond_push(const COND *cond) { return cond; }; + /* + Pop the top condition from the condition stack of the handler instance. + SYNOPSIS + cond_pop() + Pops the top if condition stack, if stack is not empty + */ + virtual void cond_pop() { return; }; }; /* Some extern variables used with handlers */ -extern struct show_table_type_st sys_table_types[]; +extern handlerton *sys_table_types[]; extern const char *ha_row_type[]; extern TYPELIB tx_isolation_typelib; extern TYPELIB myisam_stats_method_typelib; +extern ulong total_ha, total_ha_2pc; /* Wrapper functions */ -#define ha_commit_stmt(thd) (ha_commit_trans((thd), &((thd)->transaction.stmt))) -#define ha_rollback_stmt(thd) (ha_rollback_trans((thd), &((thd)->transaction.stmt))) -#define ha_commit(thd) (ha_commit_trans((thd), &((thd)->transaction.all))) -#define ha_rollback(thd) (ha_rollback_trans((thd), &((thd)->transaction.all))) - -#define ha_supports_generate(T) (T != DB_TYPE_INNODB && \ - T != DB_TYPE_BERKELEY_DB && \ - T != DB_TYPE_ARCHIVE_DB && \ - T != DB_TYPE_FEDERATED_DB) - -bool ha_caching_allowed(THD* thd, char* table_key, - uint key_length, uint8 cache_type); +#define ha_commit_stmt(thd) (ha_commit_trans((thd), FALSE)) +#define ha_rollback_stmt(thd) (ha_rollback_trans((thd), FALSE)) +#define ha_commit(thd) (ha_commit_trans((thd), TRUE)) +#define ha_rollback(thd) (ha_rollback_trans((thd), TRUE)) + +/* lookups */ enum db_type ha_resolve_by_name(const char *name, uint namelen); const char *ha_get_storage_engine(enum db_type db_type); -handler *get_new_handler(TABLE *table, enum db_type db_type); -my_off_t ha_get_ptr(byte *ptr, uint pack_length); -void ha_store_ptr(byte *buff, uint pack_length, my_off_t pos); +handler *get_new_handler(TABLE *table, MEM_ROOT *alloc, enum db_type db_type); +enum db_type ha_checktype(THD *thd, enum db_type database_type, + bool no_substitute, bool report_error); +bool ha_check_storage_engine_flag(enum db_type db_type, uint32 flag); + +/* basic stuff */ int ha_init(void); +TYPELIB *ha_known_exts(void); int ha_panic(enum ha_panic_function flag); +int ha_update_statistics(); void ha_close_connection(THD* thd); -enum db_type ha_checktype(enum db_type database_type); my_bool ha_storage_engine_is_enabled(enum db_type database_type); +bool ha_flush_logs(void); +void ha_drop_database(char* path); int ha_create_table(const char *name, HA_CREATE_INFO *create_info, bool update_create_info); +int ha_delete_table(THD *thd, enum db_type db_type, const char *path, + const char *alias, bool generate_warning); + +/* discovery */ int ha_create_table_from_engine(THD* thd, const char *db, const char *name); -int ha_delete_table(enum db_type db_type, const char *path); -void ha_drop_database(char* path); +int ha_discover(THD* thd, const char* dbname, const char* name, + const void** frmblob, uint* frmlen); +int ha_find_files(THD *thd,const char *db,const char *path, + const char *wild, bool dir,List<char>* files); +int ha_table_exists_in_engine(THD* thd, const char* db, const char* name); + +/* key cache */ int ha_init_key_cache(const char *name, KEY_CACHE *key_cache); int ha_resize_key_cache(KEY_CACHE *key_cache); int ha_change_key_cache_param(KEY_CACHE *key_cache); +int ha_change_key_cache(KEY_CACHE *old_key_cache, KEY_CACHE *new_key_cache); int ha_end_key_cache(KEY_CACHE *key_cache); -int ha_start_stmt(THD *thd); -int ha_report_binlog_offset_and_commit(THD *thd, char *log_file_name, - my_off_t end_offset); -int ha_commit_complete(THD *thd); + +/* report to InnoDB that control passes to the client */ int ha_release_temporary_latches(THD *thd); -int ha_commit_trans(THD *thd, THD_TRANS *trans); -int ha_rollback_trans(THD *thd, THD_TRANS *trans); -int ha_rollback_to_savepoint(THD *thd, char *savepoint_name); -int ha_savepoint(THD *thd, char *savepoint_name); + +/* transactions: interface to handlerton functions */ +int ha_start_consistent_snapshot(THD *thd); +int ha_commit_or_rollback_by_xid(XID *xid, bool commit); +int ha_commit_one_phase(THD *thd, bool all); +int ha_rollback_trans(THD *thd, bool all); +int ha_prepare(THD *thd); +int ha_recover(HASH *commit_list); + +/* transactions: these functions never call handlerton functions directly */ +int ha_commit_trans(THD *thd, bool all); int ha_autocommit_or_rollback(THD *thd, int error); -void ha_set_spin_retries(uint retries); -bool ha_flush_logs(void); int ha_enable_transaction(THD *thd, bool on); -int ha_change_key_cache(KEY_CACHE *old_key_cache, - KEY_CACHE *new_key_cache); -int ha_discover(THD* thd, const char* dbname, const char* name, - const void** frmblob, uint* frmlen); -int ha_find_files(THD *thd,const char *db,const char *path, - const char *wild, bool dir,List<char>* files); -int ha_table_exists_in_engine(THD* thd, const char* db, const char* name); -TYPELIB *ha_known_exts(void); -int ha_start_consistent_snapshot(THD *thd); + +/* savepoints */ +int ha_rollback_to_savepoint(THD *thd, SAVEPOINT *sv); +int ha_savepoint(THD *thd, SAVEPOINT *sv); +int ha_release_savepoint(THD *thd, SAVEPOINT *sv); + +/* these are called by storage engines */ +void trans_register_ha(THD *thd, bool all, handlerton *ht); + +/* + Storage engine has to assume the transaction will end up with 2pc if + - there is more than one 2pc-capable storage engine available + - in the current transaction 2pc was not disabled yet +*/ +#define trans_need_2pc(thd, all) ((total_ha_2pc > 1) && \ + !((all ? &thd->transaction.all : &thd->transaction.stmt)->no_2pc)) diff --git a/sql/hash_filo.cc b/sql/hash_filo.cc index ec200768222..9303120e18a 100644 --- a/sql/hash_filo.cc +++ b/sql/hash_filo.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/hash_filo.h b/sql/hash_filo.h index fc48c3b1540..c25af67b572 100644 --- a/sql/hash_filo.h +++ b/sql/hash_filo.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/hostname.cc b/sql/hostname.cc index 32c4bb8533d..3b5f3adf88a 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -140,10 +139,10 @@ my_string ip_to_hostname(struct in_addr *in, uint *errors) uint i; host_entry *entry; DBUG_ENTER("ip_to_hostname"); - *errors= 0; + *errors=0; /* We always treat the loopback address as "localhost". */ - if (in->s_addr == htonl(INADDR_LOOPBACK)) + if (in->s_addr == htonl(INADDR_LOOPBACK)) // is expanded inline by gcc DBUG_RETURN((char *)my_localhost); /* Check first if we have name in cache */ diff --git a/sql/init.cc b/sql/init.cc index 5e1b6532c75..ad55a2a8b24 100644 --- a/sql/init.cc +++ b/sql/init.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -39,7 +38,7 @@ void unireg_init(ulong options) #endif VOID(strmov(reg_ext,".frm")); - specialflag=SPECIAL_SAME_DB_NAME; + specialflag=SPECIAL_SAME_DB_NAME | options; /* Set options from argv */ /* Make a tab of powers of 10 */ for (i=0,nr=1.0; i < array_elements(log_10) ; i++) { /* It's used by filesort... */ @@ -51,6 +50,5 @@ void unireg_init(ulong options) log_01[i]= nr; nr*= 0.1; } - specialflag|=options; /* Set options from argv */ DBUG_VOID_RETURN; } diff --git a/sql/item.cc b/sql/item.cc index bf96fdf3f43..8568a44c547 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -21,13 +20,138 @@ #include "mysql_priv.h" #include <m_ctype.h> #include "my_dir.h" - -static void mark_as_dependent(THD *thd, - SELECT_LEX *last, SELECT_LEX *current, - Item_ident *item); +#include "sp_rcontext.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "sql_select.h" const String my_null_string("NULL", 4, default_charset_info); +/****************************************************************************/ + +/* Hybrid_type_traits {_real} */ + +void Hybrid_type_traits::fix_length_and_dec(Item *item, Item *arg) const +{ + item->decimals= NOT_FIXED_DEC; + item->max_length= item->float_length(arg->decimals); +} + +static const Hybrid_type_traits real_traits_instance; + +const Hybrid_type_traits *Hybrid_type_traits::instance() +{ + return &real_traits_instance; +} + + +my_decimal * +Hybrid_type_traits::val_decimal(Hybrid_type *val, my_decimal *to) const +{ + double2my_decimal(E_DEC_FATAL_ERROR, val->real, val->dec_buf); + return val->dec_buf; +} + + +String * +Hybrid_type_traits::val_str(Hybrid_type *val, String *to, uint8 decimals) const +{ + to->set(val->real, decimals, &my_charset_bin); + return to; +} + +/* Hybrid_type_traits_decimal */ +static const Hybrid_type_traits_decimal decimal_traits_instance; + +const Hybrid_type_traits_decimal *Hybrid_type_traits_decimal::instance() +{ + return &decimal_traits_instance; +} + + +void +Hybrid_type_traits_decimal::fix_length_and_dec(Item *item, Item *arg) const +{ + item->decimals= arg->decimals; + item->max_length= min(arg->max_length + DECIMAL_LONGLONG_DIGITS, + DECIMAL_MAX_STR_LENGTH); +} + + +void Hybrid_type_traits_decimal::set_zero(Hybrid_type *val) const +{ + my_decimal_set_zero(&val->dec_buf[0]); + val->used_dec_buf_no= 0; +} + + +void Hybrid_type_traits_decimal::add(Hybrid_type *val, Field *f) const +{ + my_decimal_add(E_DEC_FATAL_ERROR, + &val->dec_buf[val->used_dec_buf_no ^ 1], + &val->dec_buf[val->used_dec_buf_no], + f->val_decimal(&val->dec_buf[2])); + val->used_dec_buf_no^= 1; +} + + +void Hybrid_type_traits_decimal::div(Hybrid_type *val, ulonglong u) const +{ + int2my_decimal(E_DEC_FATAL_ERROR, u, TRUE, &val->dec_buf[2]); + /* XXX: what is '4' for scale? */ + my_decimal_div(E_DEC_FATAL_ERROR, + &val->dec_buf[val->used_dec_buf_no ^ 1], + &val->dec_buf[val->used_dec_buf_no], + &val->dec_buf[2], 4); + val->used_dec_buf_no^= 1; +} + + +longlong +Hybrid_type_traits_decimal::val_int(Hybrid_type *val, bool unsigned_flag) const +{ + longlong result; + my_decimal2int(E_DEC_FATAL_ERROR, &val->dec_buf[val->used_dec_buf_no], + unsigned_flag, &result); + return result; +} + + +double +Hybrid_type_traits_decimal::val_real(Hybrid_type *val) const +{ + my_decimal2double(E_DEC_FATAL_ERROR, &val->dec_buf[val->used_dec_buf_no], + &val->real); + return val->real; +} + + +String * +Hybrid_type_traits_decimal::val_str(Hybrid_type *val, String *to, + uint8 decimals) const +{ + my_decimal_round(E_DEC_FATAL_ERROR, &val->dec_buf[val->used_dec_buf_no], + decimals, FALSE, &val->dec_buf[2]); + my_decimal2string(E_DEC_FATAL_ERROR, &val->dec_buf[2], 0, 0, 0, to); + return to; +} + +/* Hybrid_type_traits_integer */ +static const Hybrid_type_traits_integer integer_traits_instance; + +const Hybrid_type_traits_integer *Hybrid_type_traits_integer::instance() +{ + return &integer_traits_instance; +} + +void +Hybrid_type_traits_integer::fix_length_and_dec(Item *item, Item *arg) const +{ + item->decimals= 0; + item->max_length= MY_INT64_NUM_DECIMAL_DIGITS; + item->unsigned_flag= 0; +} + /***************************************************************************** ** Item functions *****************************************************************************/ @@ -39,15 +163,189 @@ void item_init(void) item_user_lock_init(); } + +/* +TODO: make this functions class dependent +*/ + +bool Item::val_bool() +{ + switch(result_type()) { + case INT_RESULT: + return val_int() != 0; + case DECIMAL_RESULT: + { + my_decimal decimal_value; + my_decimal *val= val_decimal(&decimal_value); + if (val) + return !my_decimal_is_zero(val); + return 0; + } + case REAL_RESULT: + case STRING_RESULT: + return val_real() != 0.0; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + return 0; // Wrong (but safe) + } +} + + +String *Item::val_string_from_real(String *str) +{ + double nr= val_real(); + if (null_value) + return 0; /* purecov: inspected */ + str->set(nr,decimals, &my_charset_bin); + return str; +} + + +String *Item::val_string_from_int(String *str) +{ + longlong nr= val_int(); + if (null_value) + return 0; + if (unsigned_flag) + str->set((ulonglong) nr, &my_charset_bin); + else + str->set(nr, &my_charset_bin); + return str; +} + + +String *Item::val_string_from_decimal(String *str) +{ + my_decimal dec_buf, *dec= val_decimal(&dec_buf); + if (null_value) + return 0; + my_decimal_round(E_DEC_FATAL_ERROR, dec, decimals, FALSE, &dec_buf); + my_decimal2string(E_DEC_FATAL_ERROR, &dec_buf, 0, 0, 0, str); + return str; +} + + +my_decimal *Item::val_decimal_from_real(my_decimal *decimal_value) +{ + double nr= val_real(); + if (null_value) + return 0; + double2my_decimal(E_DEC_FATAL_ERROR, nr, decimal_value); + return (decimal_value); +} + + +my_decimal *Item::val_decimal_from_int(my_decimal *decimal_value) +{ + longlong nr= val_int(); + if (null_value) + return 0; + int2my_decimal(E_DEC_FATAL_ERROR, nr, unsigned_flag, decimal_value); + return decimal_value; +} + + +my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value) +{ + String *res; + char *end_ptr; + if (!(res= val_str(&str_value))) + return 0; // NULL or EOM + + end_ptr= (char*) res->ptr()+ res->length(); + if (str2my_decimal(E_DEC_FATAL_ERROR & ~E_DEC_BAD_NUM, + res->ptr(), res->length(), res->charset(), + decimal_value) & E_DEC_BAD_NUM) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), "DECIMAL", + str_value.c_ptr()); + } + return decimal_value; +} + + +my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + if (get_date(<ime, TIME_FUZZY_DATE)) + { + my_decimal_set_zero(decimal_value); + return 0; + } + return date2my_decimal(<ime, decimal_value); +} + + +my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + TIME ltime; + if (get_time(<ime)) + { + my_decimal_set_zero(decimal_value); + return 0; + } + return date2my_decimal(<ime, decimal_value); +} + + +double Item::val_real_from_decimal() +{ + /* Note that fix_fields may not be called for Item_avg_field items */ + double result; + my_decimal value_buff, *dec_val= val_decimal(&value_buff); + if (null_value) + return 0.0; + my_decimal2double(E_DEC_FATAL_ERROR, dec_val, &result); + return result; +} + + +longlong Item::val_int_from_decimal() +{ + /* Note that fix_fields may not be called for Item_avg_field items */ + longlong result; + my_decimal value, *dec_val= val_decimal(&value); + if (null_value) + return 0; + my_decimal2int(E_DEC_FATAL_ERROR, dec_val, unsigned_flag, &result); + return result; +} + +int Item::save_time_in_field(Field *field) +{ + TIME ltime; + if (get_time(<ime)) + return set_field_to_null(field); + field->set_notnull(); + return field->store_time(<ime, MYSQL_TIMESTAMP_TIME); +} + + +int Item::save_date_in_field(Field *field) +{ + TIME ltime; + if (get_date(<ime, TIME_FUZZY_DATE)) + return set_field_to_null(field); + field->set_notnull(); + return field->store_time(<ime, MYSQL_TIMESTAMP_DATETIME); +} + + Item::Item(): - fixed(0) + rsize(0), name(0), orig_name(0), name_length(0), fixed(0), + is_autogenerated_name(TRUE), + collation(&my_charset_bin, DERIVATION_COERCIBLE) { marker= 0; maybe_null=null_value=with_sum_func=unsigned_flag=0; - collation.set(&my_charset_bin, DERIVATION_COERCIBLE); - name= 0; decimals= 0; max_length= 0; with_subselect= 0; + cmp_context= (Item_result)-1; /* Put item in free list so that we can free all items at end */ THD *thd= current_thd; @@ -69,13 +367,15 @@ Item::Item(): } /* - Constructor used by Item_field, Item_*_ref & agregate (sum) functions. + Constructor used by Item_field, Item_*_ref & aggregate (sum) functions. Used for duplicating lists in processing queries with temporary tables */ Item::Item(THD *thd, Item *item): + rsize(0), str_value(item->str_value), name(item->name), + orig_name(item->orig_name), max_length(item->max_length), marker(item->marker), decimals(item->decimals), @@ -84,45 +384,153 @@ Item::Item(THD *thd, Item *item): unsigned_flag(item->unsigned_flag), with_sum_func(item->with_sum_func), fixed(item->fixed), - collation(item->collation) + collation(item->collation), + cmp_context(item->cmp_context) { next= thd->free_list; // Put in free list thd->free_list= this; } +uint Item::decimal_precision() const +{ + Item_result restype= result_type(); + + if ((restype == DECIMAL_RESULT) || (restype == INT_RESULT)) + return min(my_decimal_length_to_precision(max_length, decimals, unsigned_flag), + DECIMAL_MAX_PRECISION); + return min(max_length, DECIMAL_MAX_PRECISION); +} + + void Item::print_item_w_name(String *str) { print(str); if (name) { - str->append(" AS `", 5); - str->append(name); - str->append('`'); + THD *thd= current_thd; + str->append(STRING_WITH_LEN(" AS ")); + append_identifier(thd, str, name, (uint) strlen(name)); } } -Item_ident::Item_ident(const char *db_name_par,const char *table_name_par, - const char *field_name_par) - :orig_db_name(db_name_par), orig_table_name(table_name_par), - orig_field_name(field_name_par), - db_name(db_name_par), table_name(table_name_par), - field_name(field_name_par), cached_field_index(NO_CACHED_FIELD_INDEX), +void Item::cleanup() +{ + DBUG_ENTER("Item::cleanup"); + fixed=0; + marker= 0; + if (orig_name) + name= orig_name; + DBUG_VOID_RETURN; +} + + +/* + cleanup() item if it is 'fixed' + + SYNOPSIS + cleanup_processor() + arg - a dummy parameter, is not used here +*/ + +bool Item::cleanup_processor(byte *arg) +{ + if (fixed) + cleanup(); + return FALSE; +} + + +/* + rename item (used for views, cleanup() return original name) + + SYNOPSIS + Item::rename() + new_name new name of item; +*/ + +void Item::rename(char *new_name) +{ + /* + we can compare pointers to names here, because if name was not changed, + pointer will be same + */ + if (!orig_name && new_name != name) + orig_name= name; + name= new_name; +} + + +/* + Traverse item tree possibly transforming it (replacing items). + + SYNOPSIS + Item::transform() + transformer functor that performs transformation of a subtree + arg opaque argument passed to the functor + + DESCRIPTION + This function is designed to ease transformation of Item trees. + + Re-execution note: every such transformation is registered for + rollback by THD::change_item_tree() and is rolled back at the end + of execution by THD::rollback_item_tree_changes(). + + Therefore: + + - this function can not be used at prepared statement prepare + (in particular, in fix_fields!), as only permanent + transformation of Item trees are allowed at prepare. + + - the transformer function shall allocate new Items in execution + memory root (thd->mem_root) and not anywhere else: allocated + items will be gone in the end of execution. + + If you don't need to transform an item tree, but only traverse + it, please use Item::walk() instead. + + + RETURN VALUE + Returns pointer to the new subtree root. THD::change_item_tree() + should be called for it if transformation took place, i.e. if a + pointer to newly allocated item is returned. +*/ + +Item* Item::transform(Item_transformer transformer, byte *arg) +{ + DBUG_ASSERT(!current_thd->is_stmt_prepare()); + + return (this->*transformer)(arg); +} + + +Item_ident::Item_ident(Name_resolution_context *context_arg, + const char *db_name_arg,const char *table_name_arg, + const char *field_name_arg) + :orig_db_name(db_name_arg), orig_table_name(table_name_arg), + orig_field_name(field_name_arg), context(context_arg), + db_name(db_name_arg), table_name(table_name_arg), + field_name(field_name_arg), + alias_name_used(FALSE), cached_field_index(NO_CACHED_FIELD_INDEX), cached_table(0), depended_from(0) { - name = (char*) field_name_par; + name = (char*) field_name_arg; } -// Constructor used by Item_field & Item_*_ref (see Item comment) + +/* Constructor used by Item_field & Item_*_ref (see Item comment) */ + Item_ident::Item_ident(THD *thd, Item_ident *item) :Item(thd, item), orig_db_name(item->orig_db_name), orig_table_name(item->orig_table_name), orig_field_name(item->orig_field_name), + context(item->context), db_name(item->db_name), table_name(item->table_name), field_name(item->field_name), + alias_name_used(item->alias_name_used), cached_field_index(item->cached_field_index), cached_table(item->cached_table), depended_from(item->depended_from) @@ -131,14 +539,19 @@ Item_ident::Item_ident(THD *thd, Item_ident *item) void Item_ident::cleanup() { DBUG_ENTER("Item_ident::cleanup"); - DBUG_PRINT("enter", ("b:%s(%s), t:%s(%s), f:%s(%s)", - db_name, orig_db_name, - table_name, orig_table_name, - field_name, orig_field_name)); +#ifdef CANT_BE_USED_AS_MEMORY_IS_FREED + db_name ? db_name : "(null)", + orig_db_name ? orig_db_name : "(null)", + table_name ? table_name : "(null)", + orig_table_name ? orig_table_name : "(null)", + field_name ? field_name : "(null)", + orig_field_name ? orig_field_name : "(null)")); +#endif Item::cleanup(); db_name= orig_db_name; table_name= orig_table_name; field_name= orig_field_name; + depended_from= 0; DBUG_VOID_RETURN; } @@ -151,6 +564,78 @@ bool Item_ident::remove_dependence_processor(byte * arg) } +/* + Store the pointer to this item field into a list if not already there. + + SYNOPSIS + Item_field::collect_item_field_processor() + arg pointer to a List<Item_field> + + DESCRIPTION + The method is used by Item::walk to collect all unique Item_field objects + from a tree of Items into a set of items represented as a list. + + IMPLEMENTATION + Item_cond::walk() and Item_func::walk() stop the evaluation of the + processor function for its arguments once the processor returns + true.Therefore in order to force this method being called for all item + arguments in a condition the method must return false. + + RETURN + FALSE to force the evaluation of collect_item_field_processor + for the subsequent items. +*/ + +bool Item_field::collect_item_field_processor(byte *arg) +{ + DBUG_ENTER("Item_field::collect_item_field_processor"); + DBUG_PRINT("info", ("%s", field->field_name ? field->field_name : "noname")); + List<Item_field> *item_list= (List<Item_field>*) arg; + List_iterator<Item_field> item_list_it(*item_list); + Item_field *curr_item; + while ((curr_item= item_list_it++)) + { + if (curr_item->eq(this, 1)) + DBUG_RETURN(FALSE); /* Already in the set. */ + } + item_list->push_back(this); + DBUG_RETURN(FALSE); +} + + +/* + Check if an Item_field references some field from a list of fields. + + SYNOPSIS + Item_field::find_item_in_field_list_processor + arg Field being compared, arg must be of type Field + + DESCRIPTION + Check whether the Item_field represented by 'this' references any + of the fields in the keyparts passed via 'arg'. Used with the + method Item::walk() to test whether any keypart in a sequence of + keyparts is referenced in an expression. + + RETURN + TRUE if 'this' references the field 'arg' + FALSE otherwise +*/ + +bool Item_field::find_item_in_field_list_processor(byte *arg) +{ + KEY_PART_INFO *first_non_group_part= *((KEY_PART_INFO **) arg); + KEY_PART_INFO *last_part= *(((KEY_PART_INFO **) arg) + 1); + KEY_PART_INFO *cur_part; + + for (cur_part= first_non_group_part; cur_part != last_part; cur_part++) + { + if (field->eq(cur_part->field)) + return TRUE; + } + return FALSE; +} + + bool Item::check_cols(uint c) { if (c != 1) @@ -168,27 +653,36 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) { /* Empty string, used by AS or internal function like last_insert_id() */ name= (char*) str; + name_length= 0; return; } if (cs->ctype) { - // This will probably need a better implementation in the future: - // a function in CHARSET_INFO structure. + uint orig_len= length; + /* + This will probably need a better implementation in the future: + a function in CHARSET_INFO structure. + */ while (length && !my_isgraph(cs,*str)) { // Fix problem with yacc length--; str++; } + if (orig_len != length && !is_autogenerated_name) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES), + str + length - orig_len); + } if (!my_charset_same(cs, system_charset_info)) { uint32 res_length; - name= sql_strmake_with_convert(str, length, cs, + name= sql_strmake_with_convert(str, name_length= length, cs, MAX_ALIAS_NAME, system_charset_info, &res_length); } else - name=sql_strmake(str, min(length,MAX_ALIAS_NAME)); + name= sql_strmake(str, (name_length= min(length,MAX_ALIAS_NAME))); } @@ -236,7 +730,23 @@ Item *Item_num::safe_charset_converter(CHARSET_INFO *tocs) if ((conv= new Item_string(s->ptr(), s->length(), s->charset()))) { conv->str_value.copy(); - conv->str_value.shrink_to_length(); + conv->str_value.mark_as_const(); + } + return conv; +} + + +Item *Item_static_float_func::safe_charset_converter(CHARSET_INFO *tocs) +{ + Item_string *conv; + char buf[64]; + String *s, tmp(buf, sizeof(buf), &my_charset_bin); + s= val_str(&tmp); + if ((conv= new Item_static_string_func(func_name, s->ptr(), s->length(), + s->charset()))) + { + conv->str_value.copy(); + conv->str_value.mark_as_const(); } return conv; } @@ -261,18 +771,8 @@ Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs) return NULL; } conv->str_value.copy(); - /* - The above line executes str_value.realloc() internally, - which alligns Alloced_length using ALLIGN_SIZE. - In the case of Item_string::str_value we don't want - Alloced_length to be longer than str_length. - Otherwise, some functions like Item_func_concat::val_str() - try to reuse str_value as a buffer for concatenation result - for optimization purposes, so our string constant become - corrupted. See bug#8785 for more details. - Let's shrink Alloced_length to str_length to avoid this problem. - */ - conv->str_value.shrink_to_length(); + /* Ensure that no one is going to change the result string */ + conv->str_value.mark_as_const(); return conv; } @@ -281,25 +781,44 @@ Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs) { if (const_item()) { - Item_string *conv; - uint conv_errors; - char buf[MAX_FIELD_WIDTH]; - String tmp(buf, sizeof(buf), &my_charset_bin); - String cstr, *ostr= val_str(&tmp); + uint cnv_errors; + String *ostr= val_str(&cnvstr); + cnvitem->str_value.copy(ostr->ptr(), ostr->length(), + ostr->charset(), tocs, &cnv_errors); + if (cnv_errors) + return NULL; + cnvitem->str_value.mark_as_const(); + cnvitem->max_length= cnvitem->str_value.numchars() * tocs->mbmaxlen; + return cnvitem; + } + return NULL; +} + + +Item *Item_static_string_func::safe_charset_converter(CHARSET_INFO *tocs) +{ + Item_string *conv; + uint conv_errors; + String tmp, cstr, *ostr= val_str(&tmp); + cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); + if (conv_errors || + !(conv= new Item_static_string_func(func_name, + cstr.ptr(), cstr.length(), + cstr.charset(), + collation.derivation))) + { /* - As safe_charset_converter is not executed for - a parameter bound to NULL, ostr should never be 0. + Safe conversion is not possible (or EOM). + We could not convert a string into the requested character set + without data loss. The target charset does not cover all the + characters from the string. Operation cannot be done correctly. */ - cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); - if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), - cstr.charset(), - collation.derivation))) - return NULL; - conv->str_value.copy(); - conv->str_value.shrink_to_length(); - return conv; + return NULL; } - return NULL; + conv->str_value.copy(); + /* Ensure that no one is going to change the result string */ + conv->str_value.mark_as_const(); + return conv; } @@ -309,7 +828,8 @@ bool Item_string::eq(const Item *item, bool binary_cmp) const { if (binary_cmp) return !stringcmp(&str_value, &item->str_value); - return !sortcmp(&str_value, &item->str_value, collation.collation); + return (collation.collation == item->collation.collation && + !sortcmp(&str_value, &item->str_value, collation.collation)); } return 0; } @@ -358,6 +878,337 @@ CHARSET_INFO *Item::default_charset() } +int Item::save_in_field_no_warnings(Field *field, bool no_conversions) +{ + int res; + THD *thd= field->table->in_use; + enum_check_fields tmp= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; + res= save_in_field(field, no_conversions); + thd->count_cuted_fields= tmp; + return res; +} + + +/***************************************************************************** + Item_sp_variable methods +*****************************************************************************/ + +Item_sp_variable::Item_sp_variable(char *sp_var_name_str, + uint sp_var_name_length) + :m_thd(0) +#ifndef DBUG_OFF + , m_sp(0) +#endif +{ + m_name.str= sp_var_name_str; + m_name.length= sp_var_name_length; +} + + +bool Item_sp_variable::fix_fields(THD *thd, Item **) +{ + Item *it; + + m_thd= thd; /* NOTE: this must be set before any this_xxx() */ + it= this_item(); + + DBUG_ASSERT(it->fixed); + + max_length= it->max_length; + decimals= it->decimals; + unsigned_flag= it->unsigned_flag; + fixed= 1; + collation.set(it->collation.collation, it->collation.derivation); + + return FALSE; +} + + +double Item_sp_variable::val_real() +{ + DBUG_ASSERT(fixed); + Item *it= this_item(); + double ret= it->val_real(); + null_value= it->null_value; + return ret; +} + + +longlong Item_sp_variable::val_int() +{ + DBUG_ASSERT(fixed); + Item *it= this_item(); + longlong ret= it->val_int(); + null_value= it->null_value; + return ret; +} + + +String *Item_sp_variable::val_str(String *sp) +{ + DBUG_ASSERT(fixed); + Item *it= this_item(); + String *res= it->val_str(sp); + + null_value= it->null_value; + + if (!res) + return NULL; + + /* + This way we mark returned value of val_str as const, + so that various functions (e.g. CONCAT) won't try to + modify the value of the Item. Analogous mechanism is + implemented for Item_param. + Without this trick Item_splocal could be changed as a + side-effect of expression computation. Here is an example + of what happens without it: suppose x is varchar local + variable in a SP with initial value 'ab' Then + select concat(x,'c'); + would change x's value to 'abc', as Item_func_concat::val_str() + would use x's internal buffer to compute the result. + This is intended behaviour of Item_func_concat. Comments to + Item_param class contain some more details on the topic. + */ + + if (res != &str_value) + str_value.set(res->ptr(), res->length(), res->charset()); + else + res->mark_as_const(); + + return &str_value; +} + + +my_decimal *Item_sp_variable::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed); + Item *it= this_item(); + my_decimal *val= it->val_decimal(decimal_value); + null_value= it->null_value; + return val; +} + + +bool Item_sp_variable::is_null() +{ + return this_item()->is_null(); +} + + +/***************************************************************************** + Item_splocal methods +*****************************************************************************/ + +Item_splocal::Item_splocal(const LEX_STRING &sp_var_name, + uint sp_var_idx, + enum_field_types sp_var_type, + uint pos_in_q) + :Item_sp_variable(sp_var_name.str, sp_var_name.length), + m_var_idx(sp_var_idx), pos_in_query(pos_in_q) +{ + maybe_null= TRUE; + + m_type= sp_map_item_type(sp_var_type); + m_result_type= sp_map_result_type(sp_var_type); +} + + +Item * +Item_splocal::this_item() +{ + DBUG_ASSERT(m_sp == m_thd->spcont->sp); + + return m_thd->spcont->get_item(m_var_idx); +} + +const Item * +Item_splocal::this_item() const +{ + DBUG_ASSERT(m_sp == m_thd->spcont->sp); + + return m_thd->spcont->get_item(m_var_idx); +} + + +Item ** +Item_splocal::this_item_addr(THD *thd, Item **) +{ + DBUG_ASSERT(m_sp == thd->spcont->sp); + + return thd->spcont->get_item_addr(m_var_idx); +} + + +void Item_splocal::print(String *str) +{ + str->reserve(m_name.length+8); + str->append(m_name.str, m_name.length); + str->append('@'); + str->qs_append(m_var_idx); +} + + +bool Item_splocal::set_value(THD *thd, sp_rcontext *ctx, Item **it) +{ + return ctx->set_variable(thd, get_var_idx(), it); +} + + +/***************************************************************************** + Item_case_expr methods +*****************************************************************************/ + +Item_case_expr::Item_case_expr(int case_expr_id) + :Item_sp_variable((char *) STRING_WITH_LEN("case_expr")), + m_case_expr_id(case_expr_id) +{ +} + + +Item * +Item_case_expr::this_item() +{ + DBUG_ASSERT(m_sp == m_thd->spcont->sp); + + return m_thd->spcont->get_case_expr(m_case_expr_id); +} + + + +const Item * +Item_case_expr::this_item() const +{ + DBUG_ASSERT(m_sp == m_thd->spcont->sp); + + return m_thd->spcont->get_case_expr(m_case_expr_id); +} + + +Item ** +Item_case_expr::this_item_addr(THD *thd, Item **) +{ + DBUG_ASSERT(m_sp == thd->spcont->sp); + + return thd->spcont->get_case_expr_addr(m_case_expr_id); +} + + +void Item_case_expr::print(String *str) +{ + VOID(str->append(STRING_WITH_LEN("case_expr@"))); + str->qs_append(m_case_expr_id); +} + + +/***************************************************************************** + Item_name_const methods +*****************************************************************************/ + +double Item_name_const::val_real() +{ + DBUG_ASSERT(fixed); + double ret= value_item->val_real(); + null_value= value_item->null_value; + return ret; +} + + +longlong Item_name_const::val_int() +{ + DBUG_ASSERT(fixed); + longlong ret= value_item->val_int(); + null_value= value_item->null_value; + return ret; +} + + +String *Item_name_const::val_str(String *sp) +{ + DBUG_ASSERT(fixed); + String *ret= value_item->val_str(sp); + null_value= value_item->null_value; + return ret; +} + + +my_decimal *Item_name_const::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed); + my_decimal *val= value_item->val_decimal(decimal_value); + null_value= value_item->null_value; + return val; +} + + +bool Item_name_const::is_null() +{ + return value_item->is_null(); +} + +Item::Type Item_name_const::type() const +{ + return value_item->type(); +} + + +bool Item_name_const::fix_fields(THD *thd, Item **ref) +{ + char buf[128]; + String *item_name; + String s(buf, sizeof(buf), &my_charset_bin); + s.length(0); + + if (value_item->fix_fields(thd, &value_item) || + name_item->fix_fields(thd, &name_item)) + return TRUE; + if (!(value_item->const_item() && name_item->const_item())) + return TRUE; + + if (!(item_name= name_item->val_str(&s))) + return TRUE; /* Can't have a NULL name */ + + set_name(item_name->ptr(), (uint) item_name->length(), system_charset_info); + max_length= value_item->max_length; + decimals= value_item->decimals; + fixed= 1; + return FALSE; +} + + +void Item_name_const::print(String *str) +{ + str->append(STRING_WITH_LEN("NAME_CONST(")); + name_item->print(str); + str->append(','); + value_item->print(str); + str->append(')'); +} + + +/* + need a special class to adjust printing : references to aggregate functions + must not be printed as refs because the aggregate functions that are added to + the front of select list are not printed as well. +*/ +class Item_aggregate_ref : public Item_ref +{ +public: + Item_aggregate_ref(Name_resolution_context *context_arg, Item **item, + const char *table_name_arg, const char *field_name_arg) + :Item_ref(context_arg, item, table_name_arg, field_name_arg) {} + + void print (String *str) + { + if (ref) + (*ref)->print(str); + else + Item_ident::print(str); + } +}; + + /* Move SUM items out from item tree and replace with reference @@ -367,6 +1218,7 @@ CHARSET_INFO *Item::default_charset() ref_pointer_array Pointer to array of reference fields fields All fields in select ref Pointer to item + skip_registered <=> function be must skipped for registered SUM items NOTES This is from split_sum_func2() for items that should be split @@ -379,17 +1231,25 @@ CHARSET_INFO *Item::default_charset() void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, - List<Item> &fields, Item **ref) -{ - if (type() != SUM_FUNC_ITEM && with_sum_func) + List<Item> &fields, Item **ref, + bool skip_registered) +{ + /* An item of type Item_sum is registered <=> ref_by != 0 */ + if (type() == SUM_FUNC_ITEM && skip_registered && + ((Item_sum *) this)->ref_by) + return; + if ((type() != SUM_FUNC_ITEM && with_sum_func) || + (type() == FUNC_ITEM && + (((Item_func *) this)->functype() == Item_func::ISNOTNULLTEST_FUNC || + ((Item_func *) this)->functype() == Item_func::TRIG_COND_FUNC))) { /* Will split complicated items and ignore simple ones */ split_sum_func(thd, ref_pointer_array, fields); } - else if ((type() == SUM_FUNC_ITEM || - (used_tables() & ~PARAM_TABLE_BIT)) && + else if ((type() == SUM_FUNC_ITEM || (used_tables() & ~PARAM_TABLE_BIT)) && type() != SUBSELECT_ITEM && - type() != REF_ITEM) + (type() != REF_ITEM || + ((Item_ref*)this)->ref_type() == Item_ref::VIEW_REF)) { /* Replace item with a reference so that we can easily calculate @@ -398,15 +1258,21 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, The test above is to ensure we don't do a reference for things that are constants (PARAM_TABLE_BIT is in effect a constant) or already referenced (for example an item in HAVING) + Exception is Item_direct_view_ref which we need to convert to + Item_ref to allow fields from view being stored in tmp table. */ + Item_aggregate_ref *item_ref; uint el= fields.elements; - Item *new_item; - ref_pointer_array[el]= this; - if (!(new_item= new Item_ref(ref_pointer_array + el, 0, name))) + Item *real_itm= real_item(); + + ref_pointer_array[el]= real_itm; + if (!(item_ref= new Item_aggregate_ref(&thd->lex->current_select->context, + ref_pointer_array + el, 0, name))) return; // fatal_error is set - fields.push_front(this); - ref_pointer_array[el]= this; - thd->change_item_tree(ref, new_item); + if (type() == SUM_FUNC_ITEM) + item_ref->depended_from= ((Item_sum *) this)->depended_from(); + fields.push_front(real_itm); + thd->change_item_tree(ref, item_ref); } } @@ -415,7 +1281,7 @@ void Item::split_sum_func2(THD *thd, Item **ref_pointer_array, Aggregate two collations together taking into account their coercibility (aka derivation): - 0 == DERIVATION_EXPLICIT - an explicitely written COLLATE clause + 0 == DERIVATION_EXPLICIT - an explicitly written COLLATE clause 1 == DERIVATION_NONE - a mix of two different collations 2 == DERIVATION_IMPLICIT - a column 3 == DERIVATION_COERCIBLE - a string constant @@ -453,7 +1319,7 @@ bool DTCollation::aggregate(DTCollation &dt, uint flags) /* We do allow to use binary strings (like BLOBS) together with character strings. - Binaries have more precedance than a character + Binaries have more precedence than a character string of the same derivation. */ if (collation == &my_charset_bin) @@ -531,10 +1397,8 @@ bool DTCollation::aggregate(DTCollation &dt, uint flags) return 1; } if (collation->state & MY_CS_BINSORT) - { return 0; - } - else if (dt.collation->state & MY_CS_BINSORT) + if (dt.collation->state & MY_CS_BINSORT) { set(dt); return 0; @@ -571,35 +1435,37 @@ void my_coll_agg_error(DTCollation &c1, DTCollation &c2, DTCollation &c3, static -void my_coll_agg_error(Item** args, uint count, const char *fname) +void my_coll_agg_error(Item** args, uint count, const char *fname, + int item_sep) { if (count == 2) - my_coll_agg_error(args[0]->collation, args[1]->collation, fname); + my_coll_agg_error(args[0]->collation, args[item_sep]->collation, fname); else if (count == 3) - my_coll_agg_error(args[0]->collation, args[1]->collation, - args[2]->collation, fname); + my_coll_agg_error(args[0]->collation, args[item_sep]->collation, + args[2*item_sep]->collation, fname); else my_error(ER_CANT_AGGREGATE_NCOLLATIONS,MYF(0),fname); } bool agg_item_collations(DTCollation &c, const char *fname, - Item **av, uint count, uint flags) + Item **av, uint count, uint flags, int item_sep) { uint i; + Item **arg; c.set(av[0]->collation); - for (i= 1; i < count; i++) + for (i= 1, arg= &av[item_sep]; i < count; i++, arg++) { - if (c.aggregate(av[i]->collation, flags)) + if (c.aggregate((*arg)->collation, flags)) { - my_coll_agg_error(av, count, fname); + my_coll_agg_error(av, count, fname, item_sep); return TRUE; } } if ((flags & MY_COLL_DISALLOW_NONE) && c.derivation == DERIVATION_NONE) { - my_coll_agg_error(av, count, fname); + my_coll_agg_error(av, count, fname, item_sep); return TRUE; } return FALSE; @@ -610,7 +1476,7 @@ bool agg_item_collations_for_comparison(DTCollation &c, const char *fname, Item **av, uint count, uint flags) { return (agg_item_collations(c, fname, av, count, - flags | MY_COLL_DISALLOW_NONE)); + flags | MY_COLL_DISALLOW_NONE, 1)); } @@ -633,13 +1499,22 @@ bool agg_item_collations_for_comparison(DTCollation &c, const char *fname, For functions with more than two arguments: collect(A,B,C) ::= collect(collect(A,B),C) + + Since this function calls THD::change_item_tree() on the passed Item ** + pointers, it is necessary to pass the original Item **'s, not copies. + Otherwise their values will not be properly restored (see BUG#20769). + If the items are not consecutive (eg. args[2] and args[5]), use the + item_sep argument, ie. + + agg_item_charsets(coll, fname, &args[2], 2, flags, 3) + */ bool agg_item_charsets(DTCollation &coll, const char *fname, - Item **args, uint nargs, uint flags) + Item **args, uint nargs, uint flags, int item_sep) { - Item **arg, **last, *safe_args[2]; - if (agg_item_collations(coll, fname, args, nargs, flags)) + Item **arg, *safe_args[2]; + if (agg_item_collations(coll, fname, args, nargs, flags, item_sep)) return TRUE; /* @@ -649,22 +1524,26 @@ bool agg_item_charsets(DTCollation &coll, const char *fname, doesn't display each argument's characteristics. - if nargs is 1, then this error cannot happen. */ + LINT_INIT(safe_args[0]); + LINT_INIT(safe_args[1]); if (nargs >=2 && nargs <= 3) { safe_args[0]= args[0]; - safe_args[1]= args[1]; + safe_args[1]= args[item_sep]; } THD *thd= current_thd; - Item_arena *arena, backup; + Query_arena *arena, backup; bool res= FALSE; + uint i; /* In case we're in statement prepare, create conversion item in its memory: it will be reused on each execute. */ - arena= thd->change_arena_if_needed(&backup); + arena= thd->is_stmt_prepare() ? thd->activate_stmt_arena_if_needed(&backup) + : NULL; - for (arg= args, last= args + nargs; arg < last; arg++) + for (i= 0, arg= args; i < nargs; i++, arg+= item_sep) { Item* conv; uint32 dummy_offset; @@ -679,13 +1558,14 @@ bool agg_item_charsets(DTCollation &coll, const char *fname, { /* restore the original arguments for better error message */ args[0]= safe_args[0]; - args[1]= safe_args[1]; + args[item_sep]= safe_args[1]; } - my_coll_agg_error(args, nargs, fname); + my_coll_agg_error(args, nargs, fname, item_sep); res= TRUE; break; // we cannot return here, we need to restore "arena". } - conv->fix_fields(thd, 0, &conv); + if ((*arg)->type() == Item::FIELD_ITEM) + ((Item_field *)(*arg))->no_const_subst= 1; /* If in statement prepare, then we create a converter for two constant items, do it once and then reuse it. @@ -700,19 +1580,37 @@ bool agg_item_charsets(DTCollation &coll, const char *fname, *arg= conv; else thd->change_item_tree(arg, conv); + /* + We do not check conv->fixed, because Item_func_conv_charset which can + be return by safe_charset_converter can't be fixed at creation + */ + conv->fix_fields(thd, arg); } if (arena) - thd->restore_backup_item_arena(arena, &backup); + thd->restore_active_arena(arena, &backup); return res; } - +void Item_ident_for_show::make_field(Send_field *tmp_field) +{ + tmp_field->table_name= tmp_field->org_table_name= table_name; + tmp_field->db_name= db_name; + tmp_field->col_name= tmp_field->org_col_name= field->field_name; + tmp_field->charsetnr= field->charset()->number; + tmp_field->length=field->field_length; + tmp_field->type=field->type(); + tmp_field->flags= field->table->maybe_null ? + (field->flags & ~NOT_NULL_FLAG) : field->flags; + tmp_field->decimals= 0; +} /**********************************************/ Item_field::Item_field(Field *f) - :Item_ident(NullS, f->table_name, f->field_name) + :Item_ident(0, NullS, *f->table_name, f->field_name), + item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0), fixed_as_field(0) { set_field(f); /* @@ -722,8 +1620,11 @@ Item_field::Item_field(Field *f) orig_table_name= orig_field_name= ""; } -Item_field::Item_field(THD *thd, Field *f) - :Item_ident(f->table->table_cache_key, f->table_name, f->field_name) +Item_field::Item_field(THD *thd, Name_resolution_context *context_arg, + Field *f) + :Item_ident(context_arg, f->table->s->db, *f->table_name, f->field_name), + item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0), fixed_as_field(0) { /* We always need to provide Item_field with a fully qualified field @@ -740,7 +1641,7 @@ Item_field::Item_field(THD *thd, Field *f) structure can go away and pop up again between subsequent executions of a prepared statement). */ - if (thd->current_arena->is_stmt_prepare()) + if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute()) { if (db_name) orig_db_name= thd->strdup(db_name); @@ -756,11 +1657,30 @@ Item_field::Item_field(THD *thd, Field *f) set_field(f); } + +Item_field::Item_field(Name_resolution_context *context_arg, + const char *db_arg,const char *table_name_arg, + const char *field_name_arg) + :Item_ident(context_arg, db_arg,table_name_arg,field_name_arg), + field(0), result_field(0), item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0), fixed_as_field(0) +{ + SELECT_LEX *select= current_thd->lex->current_select; + collation.set(DERIVATION_IMPLICIT); + if (select && select->parsing_place != IN_HAVING) + select->select_n_where_fields++; +} + // Constructor need to process subselect with temporary tables (see Item) Item_field::Item_field(THD *thd, Item_field *item) :Item_ident(thd, item), field(item->field), - result_field(item->result_field) + result_field(item->result_field), + item_equal(item->item_equal), + no_const_subst(item->no_const_subst), + have_privileges(item->have_privileges), + any_privileges(item->any_privileges), + fixed_as_field(item->fixed_as_field) { collation.set(DERIVATION_IMPLICIT); } @@ -769,13 +1689,14 @@ void Item_field::set_field(Field *field_par) { field=result_field=field_par; // for easy coding with fields maybe_null=field->maybe_null(); - max_length=field_par->max_length(); decimals= field->decimals(); - table_name=field_par->table_name; - field_name=field_par->field_name; - db_name=field_par->table->table_cache_key; + max_length= field_par->max_display_length(); + table_name= *field_par->table_name; + field_name= field_par->field_name; + db_name= field_par->table->s->db; + alias_name_used= field_par->table->alias_name_used; unsigned_flag=test(field_par->flags & UNSIGNED_FLAG); - collation.set(field_par->charset(), DERIVATION_IMPLICIT); + collation.set(field_par->charset(), field_par->derivation()); fixed= 1; } @@ -818,6 +1739,60 @@ const char *Item_ident::full_name() const return tmp; } +void Item_ident::print(String *str) +{ + THD *thd= current_thd; + char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME]; + const char *d_name= db_name, *t_name= table_name; + if (lower_case_table_names== 1 || + (lower_case_table_names == 2 && !alias_name_used)) + { + if (table_name && table_name[0]) + { + strmov(t_name_buff, table_name); + my_casedn_str(files_charset_info, t_name_buff); + t_name= t_name_buff; + } + if (db_name && db_name[0]) + { + strmov(d_name_buff, db_name); + my_casedn_str(files_charset_info, d_name_buff); + d_name= d_name_buff; + } + } + + if (!table_name || !field_name || !field_name[0]) + { + const char *nm= (field_name && field_name[0]) ? + field_name : name ? name : "tmp_field"; + append_identifier(thd, str, nm, (uint) strlen(nm)); + return; + } + if (db_name && db_name[0] && !alias_name_used) + { + if (!(cached_table && cached_table->belong_to_view && + cached_table->belong_to_view->compact_view_format)) + { + append_identifier(thd, str, d_name, (uint)strlen(d_name)); + str->append('.'); + } + append_identifier(thd, str, t_name, (uint)strlen(t_name)); + str->append('.'); + append_identifier(thd, str, field_name, (uint)strlen(field_name)); + } + else + { + if (table_name[0]) + { + append_identifier(thd, str, t_name, (uint) strlen(t_name)); + str->append('.'); + append_identifier(thd, str, field_name, (uint) strlen(field_name)); + } + else + append_identifier(thd, str, field_name, (uint) strlen(field_name)); + } +} + /* ARGSUSED */ String *Item_field::val_str(String *str) { @@ -828,7 +1803,8 @@ String *Item_field::val_str(String *str) return field->val_str(str,&str_value); } -double Item_field::val() + +double Item_field::val_real() { DBUG_ASSERT(fixed == 1); if ((null_value=field->is_null())) @@ -836,6 +1812,7 @@ double Item_field::val() return field->val_real(); } + longlong Item_field::val_int() { DBUG_ASSERT(fixed == 1); @@ -845,6 +1822,14 @@ longlong Item_field::val_int() } +my_decimal *Item_field::val_decimal(my_decimal *decimal_value) +{ + if ((null_value= field->is_null())) + return 0; + return field->val_decimal(decimal_value); +} + + String *Item_field::str_result(String *str) { if ((null_value=result_field->is_null())) @@ -899,13 +1884,47 @@ longlong Item_field::val_int_result() } +my_decimal *Item_field::val_decimal_result(my_decimal *decimal_value) +{ + if ((null_value= result_field->is_null())) + return 0; + return result_field->val_decimal(decimal_value); +} + + +bool Item_field::val_bool_result() +{ + if ((null_value= result_field->is_null())) + return FALSE; + switch (result_field->result_type()) { + case INT_RESULT: + return result_field->val_int() != 0; + case DECIMAL_RESULT: + { + my_decimal decimal_value; + my_decimal *val= result_field->val_decimal(&decimal_value); + if (val) + return !my_decimal_is_zero(val); + return 0; + } + case REAL_RESULT: + case STRING_RESULT: + return result_field->val_real() != 0.0; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + return 0; // Shut up compiler + } +} + + bool Item_field::eq(const Item *item, bool binary_cmp) const { if (item->type() != FIELD_ITEM) return 0; Item_field *item_field= (Item_field*) item; - if (item_field->field) + if (item_field->field && field) return item_field->field == field; /* We may come here when we are trying to find a function in a GROUP BY @@ -919,10 +1938,10 @@ bool Item_field::eq(const Item *item, bool binary_cmp) const */ return (!my_strcasecmp(system_charset_info, item_field->name, field_name) && - (!item_field->table_name || + (!item_field->table_name || !table_name || (!my_strcasecmp(table_alias_charset, item_field->table_name, table_name) && - (!item_field->db_name || + (!item_field->db_name || !db_name || (item_field->db_name && !strcmp(item_field->db_name, db_name)))))); } @@ -946,8 +1965,9 @@ Item *Item_field::get_tmp_table_item(THD *thd) /* - Create an item from a string we KNOW points to a valid longlong/ulonglong - end \0 terminated number string + Create an item from a string we KNOW points to a valid longlong + end \0 terminated number string. + This is always 'signed'. Unsigned values are created with Item_uint() */ Item_int::Item_int(const char *str_arg, uint length) @@ -961,6 +1981,12 @@ Item_int::Item_int(const char *str_arg, uint length) } +my_decimal *Item_int::val_decimal(my_decimal *decimal_value) +{ + int2my_decimal(E_DEC_FATAL_ERROR, value, unsigned_flag, decimal_value); + return decimal_value; +} + String *Item_int::val_str(String *str) { // following assert is redundant, because fixed=1 assigned in constructor @@ -1008,7 +2034,126 @@ void Item_uint::print(String *str) } -String *Item_real::val_str(String *str) +Item_decimal::Item_decimal(const char *str_arg, uint length, + CHARSET_INFO *charset) +{ + str2my_decimal(E_DEC_FATAL_ERROR, str_arg, length, charset, &decimal_value); + name= (char*) str_arg; + decimals= (uint8) decimal_value.frac; + fixed= 1; + max_length= my_decimal_precision_to_length(decimal_value.intg + decimals, + decimals, unsigned_flag); +} + +Item_decimal::Item_decimal(longlong val, bool unsig) +{ + int2my_decimal(E_DEC_FATAL_ERROR, val, unsig, &decimal_value); + decimals= (uint8) decimal_value.frac; + fixed= 1; + max_length= my_decimal_precision_to_length(decimal_value.intg + decimals, + decimals, unsigned_flag); +} + + +Item_decimal::Item_decimal(double val, int precision, int scale) +{ + double2my_decimal(E_DEC_FATAL_ERROR, val, &decimal_value); + decimals= (uint8) decimal_value.frac; + fixed= 1; + max_length= my_decimal_precision_to_length(decimal_value.intg + decimals, + decimals, unsigned_flag); +} + + +Item_decimal::Item_decimal(const char *str, const my_decimal *val_arg, + uint decimal_par, uint length) +{ + my_decimal2decimal(val_arg, &decimal_value); + name= (char*) str; + decimals= (uint8) decimal_par; + max_length= length; + fixed= 1; +} + + +Item_decimal::Item_decimal(my_decimal *value_par) +{ + my_decimal2decimal(value_par, &decimal_value); + decimals= (uint8) decimal_value.frac; + fixed= 1; + max_length= my_decimal_precision_to_length(decimal_value.intg + decimals, + decimals, unsigned_flag); +} + + +Item_decimal::Item_decimal(const char *bin, int precision, int scale) +{ + binary2my_decimal(E_DEC_FATAL_ERROR, bin, + &decimal_value, precision, scale); + decimals= (uint8) decimal_value.frac; + fixed= 1; + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); +} + + +longlong Item_decimal::val_int() +{ + longlong result; + my_decimal2int(E_DEC_FATAL_ERROR, &decimal_value, unsigned_flag, &result); + return result; +} + +double Item_decimal::val_real() +{ + double result; + my_decimal2double(E_DEC_FATAL_ERROR, &decimal_value, &result); + return result; +} + +String *Item_decimal::val_str(String *result) +{ + result->set_charset(&my_charset_bin); + my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, 0, 0, 0, result); + return result; +} + +void Item_decimal::print(String *str) +{ + my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, 0, 0, 0, &str_value); + str->append(str_value); +} + + +bool Item_decimal::eq(const Item *item, bool binary_cmp) const +{ + if (type() == item->type() && item->basic_const_item()) + { + /* + We need to cast off const to call val_decimal(). This should + be OK for a basic constant. Additionally, we can pass 0 as + a true decimal constant will return its internal decimal + storage and ignore the argument. + */ + Item *arg= (Item*) item; + my_decimal *value= arg->val_decimal(0); + return !my_decimal_cmp(&decimal_value, value); + } + return 0; +} + + +void Item_decimal::set_decimal_value(my_decimal *value_par) +{ + my_decimal2decimal(value_par, &decimal_value); + decimals= (uint8) decimal_value.frac; + unsigned_flag= !decimal_value.sign(); + max_length= my_decimal_precision_to_length(decimal_value.intg + decimals, + decimals, unsigned_flag); +} + + +String *Item_float::val_str(String *str) { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); @@ -1017,6 +2162,15 @@ String *Item_real::val_str(String *str) } +my_decimal *Item_float::val_decimal(my_decimal *decimal_value) +{ + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + double2my_decimal(E_DEC_FATAL_ERROR, value, decimal_value); + return (decimal_value); +} + + void Item_string::print(String *str) { str->append('_'); @@ -1026,9 +2180,70 @@ void Item_string::print(String *str) str->append('\''); } + +double Item_string::val_real() +{ + DBUG_ASSERT(fixed == 1); + int error; + char *end, *org_end; + double tmp; + CHARSET_INFO *cs= str_value.charset(); + + org_end= (char*) str_value.ptr() + str_value.length(); + tmp= my_strntod(cs, (char*) str_value.ptr(), str_value.length(), &end, + &error); + if (error || (end != org_end && !check_if_only_end_space(cs, end, org_end))) + { + /* + We can use str_value.ptr() here as Item_string is gurantee to put an + end \0 here. + */ + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), "DOUBLE", + str_value.ptr()); + } + return tmp; +} + + +longlong Item_string::val_int() +{ + DBUG_ASSERT(fixed == 1); + int err; + longlong tmp; + char *end= (char*) str_value.ptr()+ str_value.length(); + char *org_end= end; + CHARSET_INFO *cs= str_value.charset(); + + tmp= (*(cs->cset->strtoll10))(cs, str_value.ptr(), &end, &err); + /* + TODO: Give error if we wanted a signed integer and we got an unsigned + one + */ + if (err > 0 || + (end != org_end && !check_if_only_end_space(cs, end, org_end))) + { + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", + str_value.ptr()); + } + return tmp; +} + + +my_decimal *Item_string::val_decimal(my_decimal *decimal_value) +{ + return val_decimal_from_string(decimal_value); +} + + bool Item_null::eq(const Item *item, bool binary_cmp) const { return item->type() == type(); } -double Item_null::val() + + +double Item_null::val_real() { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); @@ -1051,6 +2266,11 @@ String *Item_null::val_str(String *str) return 0; } +my_decimal *Item_null::val_decimal(my_decimal *decimal_value) +{ + return 0; +} + Item *Item_null::safe_charset_converter(CHARSET_INFO *tocs) { @@ -1073,12 +2293,13 @@ default_set_param_func(Item_param *param, param->set_null(); } + Item_param::Item_param(unsigned pos_in_query_arg) : state(NO_VALUE), item_result_type(STRING_RESULT), /* Don't pretend to be a literal unless value for this item is set. */ item_type(PARAM_ITEM), - param_type(MYSQL_TYPE_STRING), + param_type(MYSQL_TYPE_VARCHAR), pos_in_query(pos_in_query_arg), set_param_func(default_set_param_func) { @@ -1089,13 +2310,15 @@ Item_param::Item_param(unsigned pos_in_query_arg) : value is set. */ maybe_null= 1; + cnvitem= new Item_string("", 0, &my_charset_bin, DERIVATION_COERCIBLE); + cnvstr.set(cnvbuf, sizeof(cnvbuf), &my_charset_bin); } + void Item_param::set_null() { DBUG_ENTER("Item_param::set_null"); /* These are cleared after each execution by reset() method */ - max_length= 0; null_value= 1; /* Because of NULL and string values we need to set max_length for each new @@ -1133,6 +2356,36 @@ void Item_param::set_double(double d) /* + Set decimal parameter value from string. + + SYNOPSIS + set_decimal() + str - character string + length - string length + + NOTE + as we use character strings to send decimal values in + binary protocol, we use str2my_decimal to convert it to + internal decimal value. +*/ + +void Item_param::set_decimal(const char *str, ulong length) +{ + char *end; + DBUG_ENTER("Item_param::set_decimal"); + + end= (char*) str+length; + str2my_decimal(E_DEC_FATAL_ERROR, str, &decimal_value, &end); + state= DECIMAL_VALUE; + decimals= decimal_value.frac; + max_length= my_decimal_precision_to_length(decimal_value.precision(), + decimals, unsigned_flag); + maybe_null= 0; + DBUG_VOID_RETURN; +} + + +/* Set parameter value from TIME value. SYNOPSIS @@ -1147,21 +2400,22 @@ void Item_param::set_double(double d) the fact that even wrong value sent over binary protocol fits into MAX_DATE_STRING_REP_LENGTH buffer. */ -void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg) +void Item_param::set_time(TIME *tm, timestamp_type time_type, + uint32 max_length_arg) { DBUG_ENTER("Item_param::set_time"); value.time= *tm; - value.time.time_type= type; + value.time.time_type= time_type; if (value.time.year > 9999 || value.time.month > 12 || value.time.day > 31 || - type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23 || + time_type != MYSQL_TIMESTAMP_TIME && value.time.hour > 23 || value.time.minute > 59 || value.time.second > 59) { char buff[MAX_DATE_STRING_REP_LENGTH]; uint length= my_TIME_to_str(&value.time, buff); - make_truncated_value_warning(current_thd, buff, length, type); + make_truncated_value_warning(current_thd, buff, length, time_type, 0); set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR); } @@ -1185,6 +2439,7 @@ bool Item_param::set_str(const char *str, ulong length) &dummy_errors)) DBUG_RETURN(TRUE); state= STRING_VALUE; + max_length= length; maybe_null= 0; /* max_length and decimals are set after charset conversion */ /* sic: str may be not null-terminated, don't add DBUG_PRINT here */ @@ -1224,7 +2479,7 @@ bool Item_param::set_longdata(const char *str, ulong length) RETURN 0 OK - 1 Out of memort + 1 Out of memory */ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry) @@ -1240,7 +2495,7 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry) item_result_type= REAL_RESULT; break; case INT_RESULT: - set_int(*(longlong*)entry->value, 21); + set_int(*(longlong*)entry->value, MY_INT64_NUM_DECIMAL_DIGITS); item_type= Item::INT_ITEM; item_result_type= INT_RESULT; break; @@ -1270,6 +2525,16 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry) DBUG_RETURN(1); break; } + case DECIMAL_RESULT: + { + const my_decimal *ent_value= (const my_decimal *)entry->value; + my_decimal2decimal(ent_value, &decimal_value); + state= DECIMAL_VALUE; + decimals= ent_value->frac; + max_length= my_decimal_precision_to_length(ent_value->precision(), + decimals, unsigned_flag); + break; + } default: DBUG_ASSERT(0); set_null(); @@ -1294,6 +2559,7 @@ bool Item_param::set_from_user_var(THD *thd, const user_var_entry *entry) void Item_param::reset() { + DBUG_ENTER("Item_param::reset"); /* Shrink string buffer if it's bigger than max possible CHAR column */ if (str_value.alloced_length() > MAX_CHAR_WIDTH) str_value.free(); @@ -1301,7 +2567,7 @@ void Item_param::reset() str_value.length(0); str_value_ptr.length(0); /* - We must prevent all charset conversions untill data has been written + We must prevent all charset conversions until data has been written to the binary log. */ str_value.set_charset(&my_charset_bin); @@ -1318,6 +2584,7 @@ void Item_param::reset() DBUG_ASSERTS(state != NO_VALUE) in all Item_param::get_* methods). */ + DBUG_VOID_RETURN; } @@ -1327,9 +2594,11 @@ int Item_param::save_in_field(Field *field, bool no_conversions) switch (state) { case INT_VALUE: - return field->store(value.integer); + return field->store(value.integer, unsigned_flag); case REAL_VALUE: return field->store(value.real); + case DECIMAL_VALUE: + return field->store_decimal(&decimal_value); case TIME_VALUE: field->store_time(&value.time, value.time.time_type); return 0; @@ -1373,21 +2642,27 @@ bool Item_param::get_date(TIME *res, uint fuzzydate) } -double Item_param::val() +double Item_param::val_real() { switch (state) { case REAL_VALUE: return value.real; case INT_VALUE: return (double) value.integer; + case DECIMAL_VALUE: + { + double result; + my_decimal2double(E_DEC_FATAL_ERROR, &decimal_value, &result); + return result; + } case STRING_VALUE: case LONG_DATA_VALUE: - { - int dummy_err; - char *end_not_used; - return my_strntod(str_value.charset(), (char*) str_value.ptr(), - str_value.length(), &end_not_used, &dummy_err); - } + { + int dummy_err; + char *end_not_used; + return my_strntod(str_value.charset(), (char*) str_value.ptr(), + str_value.length(), &end_not_used, &dummy_err); + } case TIME_VALUE: /* This works for example when user says SELECT ?+0.0 and supplies @@ -1407,9 +2682,15 @@ longlong Item_param::val_int() { switch (state) { case REAL_VALUE: - return (longlong) (value.real + (value.real > 0 ? 0.5 : -0.5)); + return (longlong) rint(value.real); case INT_VALUE: return value.integer; + case DECIMAL_VALUE: + { + longlong i; + my_decimal2int(E_DEC_FATAL_ERROR, &decimal_value, unsigned_flag, &i); + return i; + } case STRING_VALUE: case LONG_DATA_VALUE: { @@ -1428,6 +2709,36 @@ longlong Item_param::val_int() } +my_decimal *Item_param::val_decimal(my_decimal *dec) +{ + switch (state) { + case DECIMAL_VALUE: + return &decimal_value; + case REAL_VALUE: + double2my_decimal(E_DEC_FATAL_ERROR, value.real, dec); + return dec; + case INT_VALUE: + int2my_decimal(E_DEC_FATAL_ERROR, value.integer, unsigned_flag, dec); + return dec; + case STRING_VALUE: + case LONG_DATA_VALUE: + string2my_decimal(E_DEC_FATAL_ERROR, &str_value, dec); + return dec; + case TIME_VALUE: + { + longlong i= (longlong) TIME_to_ulonglong(&value.time); + int2my_decimal(E_DEC_FATAL_ERROR, i, 0, dec); + return dec; + } + case NULL_VALUE: + return 0; + default: + DBUG_ASSERT(0); + } + return 0; +} + + String *Item_param::val_str(String* str) { switch (state) { @@ -1440,6 +2751,11 @@ String *Item_param::val_str(String* str) case INT_VALUE: str->set(value.integer, &my_charset_bin); return str; + case DECIMAL_VALUE: + if (my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, + 0, 0, 0, str) <= 1) + return str; + return NULL; case TIME_VALUE: { if (str->reserve(MAX_DATE_STRING_REP_LENGTH)) @@ -1472,6 +2788,11 @@ const String *Item_param::query_val_str(String* str) const case REAL_VALUE: str->set(value.real, NOT_FIXED_DEC, &my_charset_bin); break; + case DECIMAL_VALUE: + if (my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, + 0, 0, 0, str) > 1) + return &my_null_string; + break; case TIME_VALUE: { char *buf, *ptr; @@ -1495,25 +2816,8 @@ const String *Item_param::query_val_str(String* str) const case STRING_VALUE: case LONG_DATA_VALUE: { - char *buf, *ptr; str->length(0); - if (str->reserve(str_value.length()*2+3)) - break; - - buf= str->c_ptr_quick(); - ptr= buf; - if (value.cs_info.character_set_client->escape_with_backslash_is_dangerous) - { - ptr= str_to_hex(ptr, str_value.ptr(), str_value.length()); - } - else - { - *ptr++= '\''; - ptr+= escape_string_for_mysql(str_value.charset(), ptr, - str_value.ptr(), str_value.length()); - *ptr++='\''; - } - str->length(ptr - buf); + append_query_string(value.cs_info.character_set_client, &str_value, str); break; } case NULL_VALUE: @@ -1574,8 +2878,9 @@ bool Item_param::basic_const_item() const return TRUE; } + Item * -Item_param::new_item() +Item_param::clone_item() { /* see comments in the header file */ switch (state) { @@ -1586,7 +2891,7 @@ Item_param::new_item() new Item_uint(name, value.integer, max_length) : new Item_int(name, value.integer, max_length)); case REAL_VALUE: - return new Item_real(name, value.real, decimals, max_length); + return new Item_float(name, value.real, decimals, max_length); case STRING_VALUE: case LONG_DATA_VALUE: return new Item_string(name, str_value.c_ptr_quick(), str_value.length(), @@ -1620,7 +2925,7 @@ Item_param::eq(const Item *arg, bool binary_cmp) const return value.integer == item->val_int() && unsigned_flag == item->unsigned_flag; case REAL_VALUE: - return value.real == item->val(); + return value.real == item->val_real(); case STRING_VALUE: case LONG_DATA_VALUE: if (binary_cmp) @@ -1634,6 +2939,26 @@ Item_param::eq(const Item *arg, bool binary_cmp) const /* End of Item_param related */ +void Item_param::print(String *str) +{ + if (state == NO_VALUE) + { + str->append('?'); + } + else + { + char buffer[STRING_BUFFER_USUAL_SIZE]; + String tmp(buffer, sizeof(buffer), &my_charset_bin); + const String *res; + res= query_val_str(&tmp); + str->append(*res); + } +} + + +/**************************************************************************** + Item_copy_string +****************************************************************************/ void Item_copy_string::copy() { @@ -1653,6 +2978,17 @@ String *Item_copy_string::val_str(String *str) } +my_decimal *Item_copy_string::val_decimal(my_decimal *decimal_value) +{ + // Item_copy_string is used without fix_fields call + if (null_value) + return 0; + string2my_decimal(E_DEC_FATAL_ERROR, &str_value, decimal_value); + return (decimal_value); +} + + + int Item_copy_string::save_in_field(Field *field, bool no_conversions) { if (null_value) @@ -1667,18 +3003,16 @@ int Item_copy_string::save_in_field(Field *field, bool no_conversions) */ /* ARGSUSED */ -bool Item::fix_fields(THD *thd, - struct st_table_list *list, - Item ** ref) +bool Item::fix_fields(THD *thd, Item **ref) { // We do not check fields which are fixed during construction DBUG_ASSERT(fixed == 0 || basic_const_item()); fixed= 1; - return 0; + return FALSE; } -double Item_ref_null_helper::val() +double Item_ref_null_helper::val_real() { DBUG_ASSERT(fixed == 1); double tmp= (*ref)->val_result(); @@ -1696,6 +3030,24 @@ longlong Item_ref_null_helper::val_int() } +my_decimal *Item_ref_null_helper::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + my_decimal *val= (*ref)->val_decimal_result(decimal_value); + owner->was_null|= null_value= (*ref)->null_value; + return val; +} + + +bool Item_ref_null_helper::val_bool() +{ + DBUG_ASSERT(fixed == 1); + bool val= (*ref)->val_bool_result(); + owner->was_null|= null_value= (*ref)->null_value; + return val; +} + + String* Item_ref_null_helper::val_str(String* s) { DBUG_ASSERT(fixed == 1); @@ -1712,29 +3064,37 @@ bool Item_ref_null_helper::get_date(TIME *ltime, uint fuzzydate) /* - Mark item and SELECT_LEXs as dependent if it is not outer resolving + Mark item and SELECT_LEXs as dependent if item was resolved in outer SELECT SYNOPSIS mark_as_dependent() thd - thread handler last - select from which current item depend current - current select - item - item which should be marked + resolved_item - item which was resolved in outer SELECT(for warning) + mark_item - item which should be marked (can be differ in case of + substitution) */ static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, - Item_ident *item) -{ - // store pointer on SELECT_LEX from which item is dependent - item->depended_from= last; + Item_ident *resolved_item, + Item_ident *mark_item) +{ + const char *db_name= (resolved_item->db_name ? + resolved_item->db_name : ""); + const char *table_name= (resolved_item->table_name ? + resolved_item->table_name : ""); + /* store pointer on SELECT_LEX from which item is dependent */ + if (mark_item) + mark_item->depended_from= last; current->mark_as_dependent(last); if (thd->lex->describe & DESCRIBE_EXTENDED) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_WARN_FIELD_RESOLVED), - (item->db_name?item->db_name:""), (item->db_name?".":""), - (item->table_name?item->table_name:""), (item->table_name?".":""), - item->field_name, + db_name, (db_name[0] ? "." : ""), + table_name, (table_name [0] ? "." : ""), + resolved_item->field_name, current->select_number, last->select_number); push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_WARN_FIELD_RESOLVED, warn_buff); @@ -1742,28 +3102,673 @@ static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, } -bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +/* + Mark range of selects and resolved identifier (field/reference) item as + dependent + + SYNOPSIS + mark_select_range_as_dependent() + thd - thread handler + last_select - select where resolved_item was resolved + current_sel - current select (select where resolved_item was placed) + found_field - field which was found during resolving + found_item - Item which was found during resolving (if resolved + identifier belongs to VIEW) + resolved_item - Identifier which was resolved + + NOTE: + We have to mark all items between current_sel (including) and + last_select (excluding) as dependend (select before last_select should + be marked with actual table mask used by resolved item, all other with + OUTER_REF_TABLE_BIT) and also write dependence information to Item of + resolved identifier. +*/ + +void mark_select_range_as_dependent(THD *thd, + SELECT_LEX *last_select, + SELECT_LEX *current_sel, + Field *found_field, Item *found_item, + Item_ident *resolved_item) +{ + /* + Go from current SELECT to SELECT where field was resolved (it + have to be reachable from current SELECT, because it was already + done once when we resolved this field and cached result of + resolving) + */ + SELECT_LEX *previous_select= current_sel; + for (; previous_select->outer_select() != last_select; + previous_select= previous_select->outer_select()) + { + Item_subselect *prev_subselect_item= + previous_select->master_unit()->item; + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; + prev_subselect_item->const_item_cache= 0; + } + { + Item_subselect *prev_subselect_item= + previous_select->master_unit()->item; + Item_ident *dependent= resolved_item; + if (found_field == view_ref_found) + { + Item::Type type= found_item->type(); + prev_subselect_item->used_tables_cache|= + found_item->used_tables(); + dependent= ((type == Item::REF_ITEM || type == Item::FIELD_ITEM) ? + (Item_ident*) found_item : + 0); + } + else + prev_subselect_item->used_tables_cache|= + found_field->table->map; + prev_subselect_item->const_item_cache= 0; + mark_as_dependent(thd, last_select, current_sel, resolved_item, + dependent); + } +} + + +/* + Search a GROUP BY clause for a field with a certain name. + + SYNOPSIS + find_field_in_group_list() + find_item the item being searched for + group_list GROUP BY clause + + DESCRIPTION + Search the GROUP BY list for a column named as find_item. When searching + preference is given to columns that are qualified with the same table (and + database) name as the one being searched for. + + RETURN + - the found item on success + - NULL if find_item is not in group_list +*/ + +static Item** find_field_in_group_list(Item *find_item, ORDER *group_list) +{ + const char *db_name; + const char *table_name; + const char *field_name; + ORDER *found_group= NULL; + int found_match_degree= 0; + Item_ident *cur_field; + int cur_match_degree= 0; + char name_buff[NAME_LEN+1]; + + if (find_item->type() == Item::FIELD_ITEM || + find_item->type() == Item::REF_ITEM) + { + db_name= ((Item_ident*) find_item)->db_name; + table_name= ((Item_ident*) find_item)->table_name; + field_name= ((Item_ident*) find_item)->field_name; + } + else + return NULL; + + if (db_name && lower_case_table_names) + { + /* Convert database to lower case for comparison */ + strmake(name_buff, db_name, sizeof(name_buff)-1); + my_casedn_str(files_charset_info, name_buff); + db_name= name_buff; + } + + DBUG_ASSERT(field_name != 0); + + for (ORDER *cur_group= group_list ; cur_group ; cur_group= cur_group->next) + { + if ((*(cur_group->item))->real_item()->type() == Item::FIELD_ITEM) + { + cur_field= (Item_ident*) *cur_group->item; + cur_match_degree= 0; + + DBUG_ASSERT(cur_field->field_name != 0); + + if (!my_strcasecmp(system_charset_info, + cur_field->field_name, field_name)) + ++cur_match_degree; + else + continue; + + if (cur_field->table_name && table_name) + { + /* If field_name is qualified by a table name. */ + if (strcmp(cur_field->table_name, table_name)) + /* Same field names, different tables. */ + return NULL; + + ++cur_match_degree; + if (cur_field->db_name && db_name) + { + /* If field_name is also qualified by a database name. */ + if (strcmp(cur_field->db_name, db_name)) + /* Same field names, different databases. */ + return NULL; + ++cur_match_degree; + } + } + + if (cur_match_degree > found_match_degree) + { + found_match_degree= cur_match_degree; + found_group= cur_group; + } + else if (found_group && (cur_match_degree == found_match_degree) && + ! (*(found_group->item))->eq(cur_field, 0)) + { + /* + If the current resolve candidate matches equally well as the current + best match, they must reference the same column, otherwise the field + is ambiguous. + */ + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find_item->full_name(), current_thd->where); + return NULL; + } + } + } + + if (found_group) + return found_group->item; + else + return NULL; +} + + +/* + Resolve a column reference in a sub-select. + + SYNOPSIS + resolve_ref_in_select_and_group() + thd current thread + ref column reference being resolved + select the sub-select that ref is resolved against + + DESCRIPTION + Resolve a column reference (usually inside a HAVING clause) against the + SELECT and GROUP BY clauses of the query described by 'select'. The name + resolution algorithm searches both the SELECT and GROUP BY clauses, and in + case of a name conflict prefers GROUP BY column names over SELECT names. If + both clauses contain different fields with the same names, a warning is + issued that name of 'ref' is ambiguous. We extend ANSI SQL in that when no + GROUP BY column is found, then a HAVING name is resolved as a possibly + derived SELECT column. This extension is allowed only if the + MODE_ONLY_FULL_GROUP_BY sql mode isn't enabled. + + NOTES + The resolution procedure is: + - Search for a column or derived column named col_ref_i [in table T_j] + in the SELECT clause of Q. + - Search for a column named col_ref_i [in table T_j] + in the GROUP BY clause of Q. + - If found different columns with the same name in GROUP BY and SELECT + - issue a warning and return the GROUP BY column, + - otherwise + - if the MODE_ONLY_FULL_GROUP_BY mode is enabled return error + - else return the found SELECT column. + + + RETURN + NULL - there was an error, and the error was already reported + not_found_item - the item was not resolved, no error was reported + resolved item - if the item was resolved +*/ + +static Item** +resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) +{ + Item **group_by_ref= NULL; + Item **select_ref= NULL; + ORDER *group_list= (ORDER*) select->group_list.first; + bool ambiguous_fields= FALSE; + uint counter; + enum_resolution_type resolution; + + /* + Search for a column or derived column named as 'ref' in the SELECT + clause of the current select. + */ + if (!(select_ref= find_item_in_list(ref, *(select->get_item_list()), + &counter, REPORT_EXCEPT_NOT_FOUND, + &resolution))) + return NULL; /* Some error occurred. */ + if (resolution == RESOLVED_AGAINST_ALIAS) + ref->alias_name_used= TRUE; + + /* If this is a non-aggregated field inside HAVING, search in GROUP BY. */ + if (select->having_fix_field && !ref->with_sum_func && group_list) + { + group_by_ref= find_field_in_group_list(ref, group_list); + + /* Check if the fields found in SELECT and GROUP BY are the same field. */ + if (group_by_ref && (select_ref != not_found_item) && + !((*group_by_ref)->eq(*select_ref, 0))) + { + ambiguous_fields= TRUE; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, + ER(ER_NON_UNIQ_ERROR), ref->full_name(), + current_thd->where); + + } + } + + if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && + select_ref != not_found_item && !group_by_ref) + { + /* + Report the error if fields was found only in the SELECT item list and + the strict mode is enabled. + */ + my_error(ER_NON_GROUPING_FIELD_USED, MYF(0), + ref->name, "HAVING"); + return NULL; + } + if (select_ref != not_found_item || group_by_ref) + { + if (select_ref != not_found_item && !ambiguous_fields) + { + DBUG_ASSERT(*select_ref != 0); + if (!select->ref_pointer_array[counter]) + { + my_error(ER_ILLEGAL_REFERENCE, MYF(0), + ref->name, "forward reference in item list"); + return NULL; + } + DBUG_ASSERT((*select_ref)->fixed); + return (select->ref_pointer_array + counter); + } + if (group_by_ref) + return group_by_ref; + DBUG_ASSERT(FALSE); + return NULL; /* So there is no compiler warning. */ + } + + return (Item**) not_found_item; +} + + +/* + Resolve the name of an outer select column reference. + + SYNOPSIS + Item_field::fix_outer_field() + thd [in] current thread + from_field [in/out] found field reference or (Field*)not_found_field + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in outer selects that contain current select. + + NOTES + This is the inner loop of Item_field::fix_fields: + + for each outer query Q_k beginning from the inner-most one + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + + if such a column is not found + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + } + + IMPLEMENTATION + In prepared statements, because of cache, find_field_in_tables() + can resolve fields even if they don't belong to current context. + In this case this method only finds appropriate context and marks + current select as dependent. The found reference of field should be + provided in 'from_field'. + + RETURN + 1 - column succefully resolved and fix_fields() should continue. + 0 - column fully fixed and fix_fields() should return FALSE + -1 - error occured +*/ +int +Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) { enum_parsing_place place= NO_MATTER; + bool field_found= (*from_field != not_found_field); + bool upward_lookup= FALSE; + + /* + If there are outer contexts (outer selects, but current select is + not derived table or view) try to resolve this reference in the + outer contexts. + + We treat each subselect as a separate namespace, so that different + subselects may contain columns with the same names. The subselects + are searched starting from the innermost. + */ + Name_resolution_context *last_checked_context= context; + Item **ref= (Item **) not_found_item; + SELECT_LEX *current_sel= (SELECT_LEX *) thd->lex->current_select; + Name_resolution_context *outer_context= 0; + /* Currently derived tables cannot be correlated */ + if (current_sel->master_unit()->first_select()->linkage != + DERIVED_TABLE_TYPE) + outer_context= context->outer_context; + for (; + outer_context; + outer_context= outer_context->outer_context) + { + SELECT_LEX *select= outer_context->select_lex; + Item_subselect *prev_subselect_item= + last_checked_context->select_lex->master_unit()->item; + last_checked_context= outer_context; + upward_lookup= TRUE; + + place= prev_subselect_item->parsing_place; + /* + If outer_field is set, field was already found by first call + to find_field_in_tables(). Only need to find appropriate context. + */ + if (field_found && outer_context->select_lex != + cached_table->select_lex) + continue; + /* + In case of a view, find_field_in_tables() writes the pointer to + the found view field into '*reference', in other words, it + substitutes this Item_field with the found expression. + */ + if (field_found || (*from_field= find_field_in_tables(thd, this, + outer_context-> + first_name_resolution_table, + outer_context-> + last_name_resolution_table, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + TRUE, TRUE)) != + not_found_field) + { + if (*from_field) + { + if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && + select->cur_pos_in_select_list != UNDEF_POS) + { + /* + As this is an outer field it should be added to the list of + non aggregated fields of the outer select. + */ + marker= select->cur_pos_in_select_list; + select->non_agg_fields.push_back(this); + } + if (*from_field != view_ref_found) + { + + prev_subselect_item->used_tables_cache|= (*from_field)->table->map; + prev_subselect_item->const_item_cache= 0; + if (!last_checked_context->select_lex->having_fix_field && + !fixed_as_field) + { + Item_outer_ref *rf; + Query_arena *arena= 0, backup; + /* + Each outer field is replaced for an Item_outer_ref object. + This is done in order to get correct results when the outer + select employs a temporary table. + The original fields are saved in the inner_fields_list of the + outer select. This list is created by the following reasons: + 1. We can't add field items to the outer select list directly + because the outer select hasn't been fully fixed yet. + 2. We need a location to refer to in the Item_ref object + so the inner_fields_list is used as such temporary + reference storage. + The new Item_outer_ref object replaces the original field and is + also saved in the inner_refs_list of the outer select. Here + it is only created. It can be fixed only after the original + field has been fixed and this is done in the fix_inner_refs() + function. + */ + set_field(*from_field); + arena= thd->activate_stmt_arena_if_needed(&backup); + rf= new Item_outer_ref(context, this); + if (!rf) + { + if (arena) + thd->restore_active_arena(arena, &backup); + return -1; + } + *reference= rf; + select->inner_refs_list.push_back(rf); + if (arena) + thd->restore_active_arena(arena, &backup); + fixed_as_field= 1; + } + if (thd->lex->in_sum_func && + thd->lex->in_sum_func->nest_level == + thd->lex->current_select->nest_level) + { + Item::Type ref_type= (*reference)->type(); + set_if_bigger(thd->lex->in_sum_func->max_arg_level, + select->nest_level); + set_field(*from_field); + fixed= 1; + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, + ((ref_type == REF_ITEM || + ref_type == FIELD_ITEM) ? + (Item_ident*) (*reference) : 0)); + return 0; + } + } + else + { + Item::Type ref_type= (*reference)->type(); + prev_subselect_item->used_tables_cache|= + (*reference)->used_tables(); + prev_subselect_item->const_item_cache&= + (*reference)->const_item(); + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, + ((ref_type == REF_ITEM || ref_type == FIELD_ITEM) ? + (Item_ident*) (*reference) : + 0)); + /* + A reference to a view field had been found and we + substituted it instead of this Item (find_field_in_tables + does it by assigning the new value to *reference), so now + we can return from this function. + */ + return 0; + } + } + break; + } + + /* Search in SELECT and GROUP lists of the outer select. */ + if (outer_context->resolve_in_select_list) + { + if (!(ref= resolve_ref_in_select_and_group(thd, this, select))) + return -1; /* Some error occurred (e.g. ambiguous names). */ + if (ref != not_found_item) + { + DBUG_ASSERT(*ref && (*ref)->fixed); + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + break; + } + } + + /* + Reference is not found in this select => this subquery depend on + outer select (or we just trying to find wrong identifier, in this + case it does not matter which used tables bits we set) + */ + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; + prev_subselect_item->const_item_cache= 0; + } + + DBUG_ASSERT(ref != 0); + if (!*from_field) + return -1; + if (ref == not_found_item && *from_field == not_found_field) + { + if (upward_lookup) + { + // We can't say exactly what absent table or field + my_error(ER_BAD_FIELD_ERROR, MYF(0), full_name(), thd->where); + } + else + { + /* Call find_field_in_tables only to report the error */ + find_field_in_tables(thd, this, + context->first_name_resolution_table, + context->last_name_resolution_table, + reference, REPORT_ALL_ERRORS, + !any_privileges && + TRUE, TRUE); + } + return -1; + } + else if (ref != not_found_item) + { + Item *save; + Item_ref *rf; + + /* Should have been checked in resolve_ref_in_select_and_group(). */ + DBUG_ASSERT(*ref && (*ref)->fixed); + /* + Here, a subset of actions performed by Item_ref::set_properties + is not enough. So we pass ptr to NULL into Item_[direct]_ref + constructor, so no initialization is performed, and call + fix_fields() below. + */ + save= *ref; + *ref= NULL; // Don't call set_properties() + rf= (place == IN_HAVING ? + new Item_ref(context, ref, (char*) table_name, + (char*) field_name, alias_name_used) : + new Item_direct_ref(context, ref, (char*) table_name, + (char*) field_name, alias_name_used)); + *ref= save; + if (!rf) + return -1; + thd->change_item_tree(reference, rf); + /* + rf is Item_ref => never substitute other items (in this case) + during fix_fields() => we can use rf after fix_fields() + */ + DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() + if (rf->fix_fields(thd, reference) || rf->check_cols(1)) + return -1; + + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, + rf); + return 0; + } + else + { + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, + this, (Item_ident*)*reference); + if (last_checked_context->select_lex->having_fix_field) + { + Item_ref *rf; + rf= new Item_ref(context, + (cached_table->db[0] ? cached_table->db : 0), + (char*) cached_table->alias, (char*) field_name); + if (!rf) + return -1; + thd->change_item_tree(reference, rf); + /* + rf is Item_ref => never substitute other items (in this case) + during fix_fields() => we can use rf after fix_fields() + */ + DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() + if (rf->fix_fields(thd, reference) || rf->check_cols(1)) + return -1; + return 0; + } + } + return 1; +} + + +/* + Resolve the name of a column reference. + + SYNOPSIS + Item_field::fix_fields() + thd [in] current thread + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in one of: FROM clause, SELECT clause, GROUP BY clause of a query + Q, or in outer queries that contain Q. + + NOTES + The name resolution algorithm used is (where [T_j] is an optional table + name that qualifies the column name): + + resolve_column_reference([T_j].col_ref_i) + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q; + + if such a column is NOT found AND // Lookup in outer queries. + there are outer queries + { + for each outer query Q_k beginning from the inner-most one + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + + if such a column is not found + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + } + } + } + + Notice that compared to Item_ref::fix_fields, here we first search the FROM + clause, and then we search the SELECT and GROUP BY clauses. + + RETURN + TRUE if error + FALSE on success +*/ + +bool Item_field::fix_fields(THD *thd, Item **reference) +{ DBUG_ASSERT(fixed == 0); + Field *from_field= (Field *)not_found_field; + bool outer_fixed= false; + if (!field) // If field is not checked { - TABLE_LIST *where= 0; - bool upward_lookup= 0; - Field *tmp= (Field *)not_found_field; - if ((tmp= find_field_in_tables(thd, this, tables, &where, 0)) == + /* + In case of view, find_field_in_tables() write pointer to view field + expression to 'reference', i.e. it substitute that expression instead + of this Item_field + */ + if ((from_field= find_field_in_tables(thd, this, + context->first_name_resolution_table, + context->last_name_resolution_table, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + !any_privileges, + TRUE)) == not_found_field) { + int ret; /* Look up in current select's item_list to find aliased fields */ if (thd->lex->current_select->is_item_list_lookup) { uint counter; - bool not_used; + enum_resolution_type resolution; Item** res= find_item_in_list(this, thd->lex->current_select->item_list, &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used); + &resolution); if (!res) return 1; + if (resolution == RESOLVED_AGAINST_ALIAS) + alias_name_used= TRUE; if (res != (Item **)not_found_item) { if ((*res)->type() == Item::FIELD_ITEM) @@ -1795,179 +3800,61 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) Item_ref to point to the Item in the select list and replace the Item_field created by the parser with the new Item_ref. */ - Item_ref *rf= new Item_ref(db_name,table_name,field_name); + Item_ref *rf= new Item_ref(context, db_name,table_name,field_name); if (!rf) return 1; - thd->change_item_tree(ref, rf); + thd->change_item_tree(reference, rf); /* Because Item_ref never substitutes itself with other items in Item_ref::fix_fields(), we can safely use the original pointer to it even after fix_fields() */ - return rf->fix_fields(thd, tables, ref) || rf->check_cols(1); + return rf->fix_fields(thd, reference) || rf->check_cols(1); } } } + if ((ret= fix_outer_field(thd, &from_field, reference)) < 0) + goto error; + else if (!ret) + return FALSE; + outer_fixed= TRUE; + } + else if (!from_field) + goto error; - /* - We can't find table field in table list of current select, - consequently we have to find it in outer subselect(s). - We can't join lists of outer & current select, because of scope - of view rules. For example if both tables (outer & current) have - field 'field' it is not mistake to refer to this field without - mention of table name, but if we join tables in one list it will - cause error ER_NON_UNIQ_ERROR in find_field_in_tables. - */ - SELECT_LEX *last= 0; -#ifdef EMBEDDED_LIBRARY - thd->net.last_errno= 0; -#endif - TABLE_LIST *table_list; - Item **refer= (Item **)not_found_item; - uint counter; - bool not_used; - // Prevent using outer fields in subselects, that is not supported now - SELECT_LEX *cursel= (SELECT_LEX *) thd->lex->current_select; - if (cursel->master_unit()->first_select()->linkage != DERIVED_TABLE_TYPE) - { - SELECT_LEX_UNIT *prev_unit= cursel->master_unit(); - for (SELECT_LEX *sl= prev_unit->outer_select(); - sl; - sl= (prev_unit= sl->master_unit())->outer_select()) - { - upward_lookup= 1; - table_list= (last= sl)->get_table_list(); - if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) - { - /* - it is primary INSERT st_select_lex => skip first table - resolving - */ - table_list= table_list->next; - } + /* + if it is not expression from merged VIEW we will set this field. - Item_subselect *prev_subselect_item= prev_unit->item; - place= prev_subselect_item->parsing_place; - /* - check table fields only if subquery used somewhere out of HAVING - or outer SELECT do not use groupping (i.e. tables are - accessable) - */ - if ((place != IN_HAVING || - (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && - (tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) - { - if (!tmp) - return -1; - prev_subselect_item->used_tables_cache|= tmp->table->map; - prev_subselect_item->const_item_cache= 0; - break; - } - if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && - (refer= find_item_in_list(this, sl->item_list, &counter, - REPORT_EXCEPT_NOT_FOUND, - ¬_used)) != - (Item **) not_found_item) - { - if (refer && (*refer)->fixed) // Avoid crash in case of error - { - prev_subselect_item->used_tables_cache|= (*refer)->used_tables(); - prev_subselect_item->const_item_cache&= (*refer)->const_item(); - } - break; - } - - // Reference is not found => depend from outer (or just error) - prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; - prev_subselect_item->const_item_cache= 0; - - if (sl->master_unit()->first_select()->linkage == - DERIVED_TABLE_TYPE) - break; // do not look over derived table - } - } - if (!tmp) - return -1; - else if (!refer) - return 1; - else if (tmp == not_found_field && refer == (Item **)not_found_item) - { - if (upward_lookup) - { - // We can't say exactly what absend table or field - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), - full_name(), thd->where); - } - else - { - // Call to report error - find_field_in_tables(thd, this, tables, &where, 1); - } - return -1; - } - else if (refer != (Item **)not_found_item) - { - if (!last->ref_pointer_array[counter]) - { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; - } - DBUG_ASSERT((*refer)->fixed); - /* - Here, a subset of actions performed by Item_ref::set_properties - is not enough. So we pass ptr to NULL into Item_[direct]_ref - constructor, so no initialization is performed, and call - fix_fields() below. - */ - Item *save= last->ref_pointer_array[counter]; - last->ref_pointer_array[counter]= NULL; - Item_ref *rf= (place == IN_HAVING ? - new Item_ref(last->ref_pointer_array + counter, - (char *)table_name, - (char *)field_name) : - new Item_direct_ref(last->ref_pointer_array + counter, - (char *)table_name, - (char *)field_name)); - if (!rf) - return 1; - thd->change_item_tree(ref, rf); - last->ref_pointer_array[counter]= save; - /* - rf is Item_ref => never substitute other items (in this case) - during fix_fields() => we can use rf after fix_fields() - */ - if (rf->fix_fields(thd, tables, ref) || rf->check_cols(1)) - return 1; - - mark_as_dependent(thd, last, cursel, rf); - return 0; - } - else - { - mark_as_dependent(thd, last, cursel, this); - if (last->having_fix_field) - { - Item_ref *rf; - rf= new Item_ref((where->db[0] ? where->db : 0), - (char*) where->alias, (char*) field_name); - if (!rf) - return 1; - thd->change_item_tree(ref, rf); - /* - rf is Item_ref => never substitute other items (in this case) - during fix_fields() => we can use rf after fix_fields() - */ - return rf->fix_fields(thd, tables, ref) || rf->check_cols(1); - } - } + We can leave expression substituted from view for next PS/SP rexecution + (i.e. do not register this substitution for reverting on cleupup() + (register_item_tree_changing())), because this subtree will be + fix_field'ed during setup_tables()->setup_underlying() (i.e. before + all other expressions of query, and references on tables which do + not present in query will not make problems. + + Also we suppose that view can't be changed during PS/SP life. + */ + if (from_field == view_ref_found) + return FALSE; + + if (!outer_fixed && cached_table && cached_table->select_lex && + context->select_lex && + cached_table->select_lex != context->select_lex) + { + int ret; + if ((ret= fix_outer_field(thd, &from_field, reference)) < 0) + goto error; + else if (!ret) + return FALSE; + outer_fixed= 1; } - else if (!tmp) - return -1; - set_field(tmp); + set_field(from_field); + if (thd->lex->in_sum_func && + thd->lex->in_sum_func->nest_level == + thd->lex->current_select->nest_level) + set_if_bigger(thd->lex->in_sum_func->max_arg_level, + thd->lex->current_select->nest_level); } else if (thd->set_query_id && field->query_id != thd->query_id) { @@ -1976,26 +3863,259 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) field->query_id=thd->query_id; table->used_fields++; table->used_keys.intersect(field->part_of_key); - fixed= 1; } - return 0; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (any_privileges) + { + char *db, *tab; + if (cached_table->view) + { + db= cached_table->view_db.str; + tab= cached_table->view_name.str; + } + else + { + db= cached_table->db; + tab= cached_table->table_name; + } + if (!(have_privileges= (get_column_grant(thd, &field->table->grant, + db, tab, field_name) & + VIEW_ANY_ACL))) + { + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + "ANY", thd->security_ctx->priv_user, + thd->security_ctx->host_or_ip, field_name, tab); + goto error; + } + } +#endif + fixed= 1; + if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY && + !outer_fixed && !thd->lex->in_sum_func && + thd->lex->current_select->cur_pos_in_select_list != UNDEF_POS) + { + thd->lex->current_select->non_agg_fields.push_back(this); + marker= thd->lex->current_select->cur_pos_in_select_list; + } + return FALSE; + +error: + context->process_error(thd); + return TRUE; } + +Item *Item_field::safe_charset_converter(CHARSET_INFO *tocs) +{ + no_const_subst= 1; + return Item::safe_charset_converter(tocs); +} + + void Item_field::cleanup() { DBUG_ENTER("Item_field::cleanup"); Item_ident::cleanup(); /* Even if this object was created by direct link to field in setup_wild() - it will be linked correctly next tyme by name of field and table alias. + it will be linked correctly next time by name of field and table alias. I.e. we can drop 'field'. */ field= result_field= 0; + null_value= FALSE; DBUG_VOID_RETURN; } +/* + Find a field among specified multiple equalities + + SYNOPSIS + find_item_equal() + cond_equal reference to list of multiple equalities where + the field (this object) is to be looked for + + DESCRIPTION + The function first searches the field among multiple equalities + of the current level (in the cond_equal->current_level list). + If it fails, it continues searching in upper levels accessed + through a pointer cond_equal->upper_levels. + The search terminates as soon as a multiple equality containing + the field is found. + + RETURN VALUES + First Item_equal containing the field, if success + 0, otherwise +*/ +Item_equal *Item_field::find_item_equal(COND_EQUAL *cond_equal) +{ + Item_equal *item= 0; + while (cond_equal) + { + List_iterator_fast<Item_equal> li(cond_equal->current_level); + while ((item= li++)) + { + if (item->contains(field)) + return item; + } + /* + The field is not found in any of the multiple equalities + of the current level. Look for it in upper levels + */ + cond_equal= cond_equal->upper_levels; + } + return 0; +} + + +/* + Check whether a field can be substituted by an equal item + + SYNOPSIS + equal_fields_propagator() + arg - *arg != NULL <-> the field is in the context where + substitution for an equal item is valid + + DESCRIPTION + The function checks whether a substitution of the field + occurrence for an equal item is valid. + + NOTES + The following statement is not always true: + x=y => F(x)=F(x/y). + This means substitution of an item for an equal item not always + yields an equavalent condition. + Here's an example: + 'a'='a ' + (LENGTH('a')=1) != (LENGTH('a ')=2) + Such a substitution is surely valid if either the substituted + field is not of a STRING type or if it is an argument of + a comparison predicate. + + RETURN + TRUE substitution is valid + FALSE otherwise +*/ + +bool Item_field::subst_argument_checker(byte **arg) +{ + return (result_type() != STRING_RESULT) || (*arg); +} + + +/* + Set a pointer to the multiple equality the field reference belongs to + (if any) + + SYNOPSIS + equal_fields_propagator() + arg - reference to list of multiple equalities where + the field (this object) is to be looked for + + DESCRIPTION + The function looks for a multiple equality containing the field item + among those referenced by arg. + In the case such equality exists the function does the following. + If the found multiple equality contains a constant, then the field + reference is substituted for this constant, otherwise it sets a pointer + to the multiple equality in the field item. + + NOTES + This function is supposed to be called as a callback parameter in calls + of the compile method. + + RETURN VALUES + pointer to the replacing constant item, if the field item was substituted + pointer to the field item, otherwise. +*/ + +Item *Item_field::equal_fields_propagator(byte *arg) +{ + if (no_const_subst) + return this; + item_equal= find_item_equal((COND_EQUAL *) arg); + Item *item= 0; + if (item_equal) + item= item_equal->get_const(); + /* + Disable const propagation for items used in different comparison contexts. + This must be done because, for example, Item_hex_string->val_int() is not + the same as (Item_hex_string->val_str() in BINARY column)->val_int(). + We cannot simply disable the replacement in a particular context ( + e.g. <bin_col> = <int_col> AND <bin_col> = <hex_string>) since + Items don't know the context they are in and there are functions like + IF (<hex_string>, 'yes', 'no'). + The same problem occurs when comparing a DATE/TIME field with a + DATE/TIME represented as an int and as a string. + */ + if (!item || + (cmp_context != (Item_result)-1 && item->cmp_context != cmp_context)) + item= this; + return item; +} + + +/* + Mark the item to not be part of substitution if it's not a binary item + See comments in Arg_comparator::set_compare_func() for details +*/ + +bool Item_field::set_no_const_sub(byte *arg) +{ + if (field->charset() != &my_charset_bin) + no_const_subst=1; + return FALSE; +} + + +/* + Replace an Item_field for an equal Item_field that evaluated earlier + (if any) + + SYNOPSIS + replace_equal_field_() + arg - a dummy parameter, is not used here + + DESCRIPTION + The function returns a pointer to an item that is taken from + the very beginning of the item_equal list which the Item_field + object refers to (belongs to) unless item_equal contains a constant + item. In this case the function returns this constant item, + (if the substitution does not require conversion). + If the Item_field object does not refer any Item_equal object + 'this' is returned + + NOTES + This function is supposed to be called as a callback parameter in calls + of the thransformer method. + + RETURN VALUES + pointer to a replacement Item_field if there is a better equal item or + a pointer to a constant equal item; + this - otherwise. +*/ + +Item *Item_field::replace_equal_field(byte *arg) +{ + if (item_equal) + { + Item *const_item= item_equal->get_const(); + if (const_item) + { + if (cmp_context != (Item_result)-1 && + const_item->cmp_context != cmp_context) + return this; + return const_item; + } + Item_field *subst= item_equal->get_first(); + if (subst && !field->eq(subst->field)) + return subst; + } + return this; +} + + void Item::init_make_field(Send_field *tmp_field, - enum enum_field_types field_type) + enum enum_field_types field_type_arg) { char *empty_name= (char*) ""; tmp_field->db_name= empty_name; @@ -2007,7 +4127,7 @@ void Item::init_make_field(Send_field *tmp_field, tmp_field->flags= (maybe_null ? 0 : NOT_NULL_FLAG) | (my_binary_compare(collation.collation) ? BINARY_FLAG : 0); - tmp_field->type=field_type; + tmp_field->type= field_type_arg; tmp_field->length=max_length; tmp_field->decimals=decimals; if (unsigned_flag) @@ -2022,18 +4142,71 @@ void Item::make_field(Send_field *tmp_field) void Item_empty_string::make_field(Send_field *tmp_field) { - init_make_field(tmp_field,FIELD_TYPE_VAR_STRING); + enum_field_types f_type= FIELD_TYPE_VAR_STRING; + if (max_length >= 16777216) + f_type= FIELD_TYPE_LONG_BLOB; + else if (max_length >= 65536) + f_type= FIELD_TYPE_MEDIUM_BLOB; + init_make_field(tmp_field, f_type); } enum_field_types Item::field_type() const { - return ((result_type() == STRING_RESULT) ? FIELD_TYPE_VAR_STRING : - (result_type() == INT_RESULT) ? FIELD_TYPE_LONGLONG : - FIELD_TYPE_DOUBLE); + switch (result_type()) { + case STRING_RESULT: return MYSQL_TYPE_VARCHAR; + case INT_RESULT: return FIELD_TYPE_LONGLONG; + case DECIMAL_RESULT: return FIELD_TYPE_NEWDECIMAL; + case REAL_RESULT: return FIELD_TYPE_DOUBLE; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + return MYSQL_TYPE_VARCHAR; + } } +/* + Create a field to hold a string value from an item + + SYNOPSIS + make_string_field() + table Table for which the field is created + + IMPLEMENTATION + If max_length > CONVERT_IF_BIGGER_TO_BLOB create a blob + If max_length > 0 create a varchar + If max_length == 0 create a CHAR(0) +*/ + + +Field *Item::make_string_field(TABLE *table) +{ + DBUG_ASSERT(collation.collation); + if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB) + return new Field_blob(max_length, maybe_null, name, table, + collation.collation); + /* Item_type_holder holds the exact type, do not change it */ + if (max_length > 0 && + (type() != Item::TYPE_HOLDER || field_type() != MYSQL_TYPE_STRING)) + return new Field_varstring(max_length, maybe_null, name, table, + collation.collation); + return new Field_string(max_length, maybe_null, name, table, + collation.collation); +} + + +/* + Create a field based on field_type of argument + + For now, this is only used to create a field for + IFNULL(x,something) + + RETURN + 0 error + # Created field +*/ + Field *Item::tmp_table_field_from_field_type(TABLE *table) { /* @@ -2043,8 +4216,10 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table) switch (field_type()) { case MYSQL_TYPE_DECIMAL: - return new Field_decimal((char*) 0, max_length, null_ptr, 0, Field::NONE, - name, table, decimals, 0, unsigned_flag); + case MYSQL_TYPE_NEWDECIMAL: + return new Field_new_decimal((char*) 0, max_length, null_ptr, 0, + Field::NONE, name, table, decimals, 0, + unsigned_flag); case MYSQL_TYPE_TINY: return new Field_tiny((char*) 0, max_length, null_ptr, 0, Field::NONE, name, table, 0, unsigned_flag); @@ -2077,40 +4252,41 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table) case MYSQL_TYPE_TIME: return new Field_time(maybe_null, name, table, &my_charset_bin); case MYSQL_TYPE_TIMESTAMP: + return new Field_timestamp(maybe_null, name, table, &my_charset_bin); case MYSQL_TYPE_DATETIME: return new Field_datetime(maybe_null, name, table, &my_charset_bin); case MYSQL_TYPE_YEAR: return new Field_year((char*) 0, max_length, null_ptr, 0, Field::NONE, name, table); + case MYSQL_TYPE_BIT: + return new Field_bit_as_char(NULL, max_length, null_ptr, 0, + Field::NONE, name, table); default: - /* This case should never be choosen */ + /* This case should never be chosen */ DBUG_ASSERT(0); /* If something goes awfully wrong, it's better to get a string than die */ case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: - case MYSQL_TYPE_VAR_STRING: - DBUG_ASSERT(collation.collation); - if (max_length/collation.collation->mbmaxlen > 255) - break; // If blob - return new Field_varstring(max_length, maybe_null, name, table, - collation.collation); case MYSQL_TYPE_STRING: - DBUG_ASSERT(collation.collation); - if (max_length/collation.collation->mbmaxlen > 255) // If blob - break; - return new Field_string(max_length, maybe_null, name, table, - collation.collation); + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: + return make_string_field(table); case MYSQL_TYPE_TINY_BLOB: case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: - case MYSQL_TYPE_GEOMETRY: + if (this->type() == Item::TYPE_HOLDER) + return new Field_blob(max_length, maybe_null, name, table, + collation.collation, 1); + else + return new Field_blob(max_length, maybe_null, name, table, + collation.collation); break; // Blob handled outside of case + case MYSQL_TYPE_GEOMETRY: + return new Field_geom(max_length, maybe_null, name, table, + (Field::geometry_type) + ((Item_geometry_func *)this)->get_geometry_type()); } - - /* blob is special as it's generated for both blobs and long strings */ - return new Field_blob(max_length, maybe_null, name, table, - collation.collation); } @@ -2118,9 +4294,13 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table) void Item_field::make_field(Send_field *tmp_field) { field->make_field(tmp_field); - DBUG_ASSERT(tmp_field->table_name); + DBUG_ASSERT(tmp_field->table_name != 0); if (name) tmp_field->col_name=name; // Use user supplied name + if (table_name) + tmp_field->table_name= table_name; + if (db_name) + tmp_field->db_name= db_name; } @@ -2145,18 +4325,19 @@ void Item_field::save_org_in_field(Field *to) int Item_field::save_in_field(Field *to, bool no_conversions) { + int res; if (result_field->is_null()) { null_value=1; - return set_field_to_null_with_conversions(to, no_conversions); + res= set_field_to_null_with_conversions(to, no_conversions); } else { to->set_notnull(); - field_conv(to,result_field); + res= field_conv(to,result_field); null_value=0; } - return 0; + return res; } @@ -2190,7 +4371,7 @@ int Item_null::save_in_field(Field *field, bool no_conversions) field Field where we want to store NULL RETURN VALUES - 0 ok + 0 OK 1 Field doesn't support NULL values */ @@ -2217,25 +4398,37 @@ int Item::save_in_field(Field *field, bool no_conversions) str_value.set_quick(0, 0, cs); return set_field_to_null_with_conversions(field, no_conversions); } + + /* NOTE: If null_value == FALSE, "result" must be not NULL. */ + field->set_notnull(); error=field->store(result->ptr(),result->length(),cs); str_value.set_quick(0, 0, cs); } else if (result_type() == REAL_RESULT) { - double nr=val(); + double nr= val_real(); if (null_value) return set_field_to_null(field); field->set_notnull(); error=field->store(nr); } + else if (result_type() == DECIMAL_RESULT) + { + my_decimal decimal_value; + my_decimal *value= val_decimal(&decimal_value); + if (null_value) + return set_field_to_null(field); + field->set_notnull(); + error=field->store_decimal(value); + } else { longlong nr=val_int(); if (null_value) return set_field_to_null_with_conversions(field, no_conversions); field->set_notnull(); - error=field->store(nr); + error=field->store(nr, unsigned_flag); } return error; } @@ -2251,12 +4444,10 @@ int Item_string::save_in_field(Field *field, bool no_conversions) return field->store(result->ptr(),result->length(),collation.collation); } + int Item_uint::save_in_field(Field *field, bool no_conversions) { - /* - TODO: To be fixed when wen have a - field->store(longlong, unsigned_flag) method - */ + /* Item_int::save_in_field handles both signed and unsigned. */ return Item_int::save_in_field(field, no_conversions); } @@ -2267,7 +4458,14 @@ int Item_int::save_in_field(Field *field, bool no_conversions) if (null_value) return set_field_to_null(field); field->set_notnull(); - return field->store(nr); + return field->store(nr, unsigned_flag); +} + + +int Item_decimal::save_in_field(Field *field, bool no_conversions) +{ + field->set_notnull(); + return field->store_decimal(&decimal_value); } @@ -2287,7 +4485,7 @@ bool Item_int::eq(const Item *arg, bool binary_cmp) const } -Item *Item_int_with_ref::new_item() +Item *Item_int_with_ref::clone_item() { DBUG_ASSERT(ref->const_item()); /* @@ -2302,12 +4500,64 @@ Item *Item_int_with_ref::new_item() Item_num *Item_uint::neg() { - return new Item_real(name, - ((double) value), 0, max_length); + Item_decimal *item= new Item_decimal(value, 1); + return item->neg(); +} + + +static uint nr_of_decimals(const char *str, const char *end) +{ + const char *decimal_point; + + /* Find position for '.' */ + for (;;) + { + if (str == end) + return 0; + if (*str == 'e' || *str == 'E') + return NOT_FIXED_DEC; + if (*str++ == '.') + break; + } + decimal_point= str; + for (; my_isdigit(system_charset_info, *str) ; str++) + ; + if (*str == 'e' || *str == 'E') + return NOT_FIXED_DEC; + return (uint) (str - decimal_point); +} + + +/* + This function is only called during parsing. We will signal an error if + value is not a true double value (overflow) +*/ + +Item_float::Item_float(const char *str_arg, uint length) +{ + int error; + char *end_not_used; + value= my_strntod(&my_charset_bin, (char*) str_arg, length, &end_not_used, + &error); + if (error) + { + /* + Note that we depend on that str_arg is null terminated, which is true + when we are in the parser + */ + DBUG_ASSERT(str_arg[length] == 0); + my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "double", (char*) str_arg); + } + presentation= name=(char*) str_arg; + decimals=(uint8) nr_of_decimals(str_arg, str_arg+length); + max_length=length; + fixed= 1; } -int Item_real::save_in_field(Field *field, bool no_conversions) + +int Item_float::save_in_field(Field *field, bool no_conversions) { - double nr=val(); + double nr= val_real(); if (null_value) return set_field_to_null(field); field->set_notnull(); @@ -2315,7 +4565,27 @@ int Item_real::save_in_field(Field *field, bool no_conversions) } -bool Item_real::eq(const Item *arg, bool binary_cmp) const +void Item_float::print(String *str) +{ + if (presentation) + { + str->append(presentation); + return; + } + char buffer[20]; + String num(buffer, sizeof(buffer), &my_charset_bin); + num.set(value, decimals, &my_charset_bin); + str->append(num); +} + + +/* + hex item + In string context this is a binary string. + In number context this is a longlong value. +*/ + +bool Item_float::eq(const Item *arg, bool binary_cmp) const { if (arg->basic_const_item() && arg->type() == type()) { @@ -2324,16 +4594,11 @@ bool Item_real::eq(const Item *arg, bool binary_cmp) const a basic constant. */ Item *item= (Item*) arg; - return item->val() == value; + return item->val_real() == value; } return FALSE; } -/**************************************************************************** -** varbinary item -** In string context this is a binary string -** In number context this is a longlong value. -****************************************************************************/ inline uint char_val(char X) { @@ -2343,7 +4608,7 @@ inline uint char_val(char X) } -Item_varbinary::Item_varbinary(const char *str, uint str_length) +Item_hex_string::Item_hex_string(const char *str, uint str_length) { name=(char*) str-2; // Lex makes this start with 0x max_length=(str_length+1)/2; @@ -2365,7 +4630,7 @@ Item_varbinary::Item_varbinary(const char *str, uint str_length) unsigned_flag= 1; } -longlong Item_varbinary::val_int() +longlong Item_hex_string::val_int() { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); @@ -2379,7 +4644,17 @@ longlong Item_varbinary::val_int() } -int Item_varbinary::save_in_field(Field *field, bool no_conversions) +my_decimal *Item_hex_string::val_decimal(my_decimal *decimal_value) +{ + // following assert is redundant, because fixed=1 assigned in constructor + DBUG_ASSERT(fixed == 1); + ulonglong value= (ulonglong)val_int(); + int2my_decimal(E_DEC_FATAL_ERROR, value, TRUE, decimal_value); + return (decimal_value); +} + + +int Item_hex_string::save_in_field(Field *field, bool no_conversions) { field->set_notnull(); if (field->result_type() == STRING_RESULT) @@ -2399,17 +4674,17 @@ int Item_varbinary::save_in_field(Field *field, bool no_conversions) nr= LONGLONG_MAX; goto warn; } - return field->store((longlong) nr); + return field->store((longlong) nr, TRUE); // Assume hex numbers are unsigned warn: - if (!field->store((longlong) nr)) + if (!field->store((longlong) nr, TRUE)) field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } -bool Item_varbinary::eq(const Item *arg, bool binary_cmp) const +bool Item_hex_string::eq(const Item *arg, bool binary_cmp) const { if (arg->basic_const_item() && arg->type() == type()) { @@ -2421,7 +4696,7 @@ bool Item_varbinary::eq(const Item *arg, bool binary_cmp) const } -Item *Item_varbinary::safe_charset_converter(CHARSET_INFO *tocs) +Item *Item_hex_string::safe_charset_converter(CHARSET_INFO *tocs) { Item_string *conv; String tmp, *str= val_str(&tmp); @@ -2429,12 +4704,50 @@ Item *Item_varbinary::safe_charset_converter(CHARSET_INFO *tocs) if (!(conv= new Item_string(str->ptr(), str->length(), tocs))) return NULL; conv->str_value.copy(); - conv->str_value.shrink_to_length(); + conv->str_value.mark_as_const(); return conv; } /* + bin item. + In string context this is a binary string. + In number context this is a longlong value. +*/ + +Item_bin_string::Item_bin_string(const char *str, uint str_length) +{ + const char *end= str + str_length - 1; + uchar bits= 0; + uint power= 1; + + name= (char*) str - 2; + max_length= (str_length + 7) >> 3; + char *ptr= (char*) sql_alloc(max_length + 1); + if (!ptr) + return; + str_value.set(ptr, max_length, &my_charset_bin); + ptr+= max_length - 1; + ptr[1]= 0; // Set end null for string + for (; end >= str; end--) + { + if (power == 256) + { + power= 1; + *ptr--= bits; + bits= 0; + } + if (*end == '1') + bits|= power; + power<<= 1; + } + *ptr= (char) bits; + collation.set(&my_charset_bin, DERIVATION_COERCIBLE); + fixed= 1; +} + + +/* Pack data in buffer for sending */ @@ -2450,10 +4763,10 @@ bool Item_null::send(Protocol *protocol, String *packet) bool Item::send(Protocol *protocol, String *buffer) { bool result; - enum_field_types type; - LINT_INIT(result); + enum_field_types f_type; + LINT_INIT(result); // Will be set if null_value == 0 - switch ((type=field_type())) { + switch ((f_type=field_type())) { default: case MYSQL_TYPE_NULL: case MYSQL_TYPE_DECIMAL: @@ -2466,6 +4779,9 @@ bool Item::send(Protocol *protocol, String *buffer) case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_STRING: case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_NEWDECIMAL: { String *res; if ((res=val_str(buffer))) @@ -2481,6 +4797,7 @@ bool Item::send(Protocol *protocol, String *buffer) break; } case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_YEAR: { longlong nr; nr= val_int(); @@ -2508,15 +4825,14 @@ bool Item::send(Protocol *protocol, String *buffer) case MYSQL_TYPE_FLOAT: { float nr; - nr= (float) val(); + nr= (float) val_real(); if (!null_value) result= protocol->store(nr, decimals, buffer); break; } case MYSQL_TYPE_DOUBLE: { - double nr; - nr= val(); + double nr= val_real(); if (!null_value) result= protocol->store(nr, decimals, buffer); break; @@ -2529,7 +4845,7 @@ bool Item::send(Protocol *protocol, String *buffer) get_date(&tm, TIME_FUZZY_DATE); if (!null_value) { - if (type == MYSQL_TYPE_DATE) + if (f_type == MYSQL_TYPE_DATE) return protocol->store_date(&tm); else result= protocol->store(&tm); @@ -2557,213 +4873,585 @@ bool Item_field::send(Protocol *protocol, String *buffer) } +void Item_field::update_null_value() +{ + /* + need to set no_errors to prevent warnings about type conversion + popping up. + */ + THD *thd= field->table->in_use; + int no_errors; + + no_errors= thd->no_errors; + thd->no_errors= 1; + Item::update_null_value(); + thd->no_errors= no_errors; +} + + +/* + Add the field to the select list and substitute it for the reference to + the field. + + SYNOPSIS + Item_field::update_value_transformer() + select_arg current select + + DESCRIPTION + If the field doesn't belong to the table being inserted into then it is + added to the select list, pointer to it is stored in the ref_pointer_array + of the select and the field itself is substituted for the Item_ref object. + This is done in order to get correct values from update fields that + belongs to the SELECT part in the INSERT .. SELECT .. ON DUPLICATE KEY + UPDATE statement. + + RETURN + 0 if error occured + ref if all conditions are met + this field otherwise +*/ + +Item *Item_field::update_value_transformer(byte *select_arg) +{ + SELECT_LEX *select= (SELECT_LEX*)select_arg; + DBUG_ASSERT(fixed); + + if (field->table != select->context.table_list->table && + type() != Item::TRIGGER_FIELD_ITEM) + { + List<Item> *all_fields= &select->join->all_fields; + Item **ref_pointer_array= select->ref_pointer_array; + int el= all_fields->elements; + Item_ref *ref; + + ref_pointer_array[el]= (Item*)this; + all_fields->push_front((Item*)this); + ref= new Item_ref(&select->context, ref_pointer_array + el, + table_name, field_name); + return ref; + } + return this; +} + + +void Item_field::print(String *str) +{ + if (field && field->table->const_table) + { + char buff[MAX_FIELD_WIDTH]; + String tmp(buff,sizeof(buff),str->charset()); + field->val_str(&tmp); + str->append('\''); + str->append(tmp); + str->append('\''); + return; + } + Item_ident::print(str); +} + + +Item_ref::Item_ref(Name_resolution_context *context_arg, + Item **item, const char *table_name_arg, + const char *field_name_arg, + bool alias_name_used_arg) + :Item_ident(context_arg, NullS, table_name_arg, field_name_arg), + result_field(0), ref(item) +{ + alias_name_used= alias_name_used_arg; + /* + This constructor used to create some internals references over fixed items + */ + if (ref && *ref && (*ref)->fixed) + set_properties(); +} + + /* - This is used for HAVING clause - Find field in select list having the same name + Resolve the name of a reference to a column reference. + + SYNOPSIS + Item_ref::fix_fields() + thd [in] current thread + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in one of: GROUP BY clause, SELECT clause, outer queries. It is + used typically for columns in the HAVING clause which are not under + aggregate functions. + + NOTES + The name resolution algorithm used is (where [T_j] is an optional table + name that qualifies the column name): + + resolve_extended([T_j].col_ref_i) + { + Search for a column or derived column named col_ref_i [in table T_j] + in the SELECT and GROUP clauses of Q. + + if such a column is NOT found AND // Lookup in outer queries. + there are outer queries + { + for each outer query Q_k beginning from the inner-most one + { + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + + if such a column is not found AND + - Q_k is not a group query AND + - Q_k is not inside an aggregate function + OR + - Q_(k-1) is not in a HAVING or SELECT clause of Q_k + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + } + } + } + } + + This procedure treats GROUP BY and SELECT clauses as one namespace for + column references in HAVING. Notice that compared to + Item_field::fix_fields, here we first search the SELECT and GROUP BY + clauses, and then we search the FROM clause. + + POSTCONDITION + Item_ref::ref is 0 or points to a valid item + + RETURN + TRUE if error + FALSE on success */ -bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference) +bool Item_ref::fix_fields(THD *thd, Item **reference) { - DBUG_ASSERT(fixed == 0); - uint counter; enum_parsing_place place= NO_MATTER; - bool not_used; - if (!ref) + DBUG_ASSERT(fixed == 0); + SELECT_LEX *current_sel= thd->lex->current_select; + + if (!ref || ref == not_found_item) { - TABLE_LIST *where= 0, *table_list; - SELECT_LEX_UNIT *prev_unit= thd->lex->current_select->master_unit(); - SELECT_LEX *sl= prev_unit->outer_select(); - /* - Finding only in current select will be performed for selects that have - not outer one and for derived tables (which not support using outer - fields for now) - */ - if ((ref= find_item_in_list(this, - *(thd->lex->current_select->get_item_list()), - &counter, - ((sl && - thd->lex->current_select->master_unit()-> - first_select()->linkage != - DERIVED_TABLE_TYPE) ? - REPORT_EXCEPT_NOT_FOUND : - REPORT_ALL_ERRORS ), ¬_used)) == - (Item **)not_found_item) + if (!(ref= resolve_ref_in_select_and_group(thd, this, + context->select_lex))) + goto error; /* Some error occurred (e.g. ambiguous names). */ + + if (ref == not_found_item) /* This reference was not resolved. */ { - Field *tmp= (Field*) not_found_field; - SELECT_LEX *last= 0; + Name_resolution_context *last_checked_context= context; + Name_resolution_context *outer_context= context->outer_context; + Field *from_field; + ref= 0; + + if (!outer_context) + { + /* The current reference cannot be resolved in this query. */ + my_error(ER_BAD_FIELD_ERROR,MYF(0), + this->full_name(), current_thd->where); + goto error; + } + /* - We can't find table field in select list of current select, - consequently we have to find it in outer subselect(s). - We can't join lists of outer & current select, because of scope - of view rules. For example if both tables (outer & current) have - field 'field' it is not mistake to refer to this field without - mention of table name, but if we join tables in one list it will - cause error ER_NON_UNIQ_ERROR in find_item_in_list. + If there is an outer context (select), and it is not a derived table + (which do not support the use of outer fields for now), try to + resolve this reference in the outer select(s). + + We treat each subselect as a separate namespace, so that different + subselects may contain columns with the same names. The subselects are + searched starting from the innermost. */ - for ( ; sl ; sl= (prev_unit= sl->master_unit())->outer_select()) + from_field= (Field*) not_found_field; + + do { - last= sl; - Item_subselect *prev_subselect_item= prev_unit->item; - if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && - (ref= find_item_in_list(this, sl->item_list, - &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used)) != - (Item **)not_found_item) - { - if (ref && (*ref)->fixed) // Avoid crash in case of error - { - prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); - prev_subselect_item->const_item_cache&= (*ref)->const_item(); - } - break; - } - table_list= sl->get_table_list(); - if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) - { - // it is primary INSERT st_select_lex => skip first table resolving - table_list= table_list->next; - } + SELECT_LEX *select= outer_context->select_lex; + Item_subselect *prev_subselect_item= + last_checked_context->select_lex->master_unit()->item; + last_checked_context= outer_context; + + /* Search in the SELECT and GROUP lists of the outer select. */ + if (outer_context->resolve_in_select_list) + { + if (!(ref= resolve_ref_in_select_and_group(thd, this, select))) + goto error; /* Some error occurred (e.g. ambiguous names). */ + if (ref != not_found_item) + { + DBUG_ASSERT(*ref && (*ref)->fixed); + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + break; + } + /* + Set ref to 0 to ensure that we get an error in case we replaced + this item with another item and still use this item in some + other place of the parse tree. + */ + ref= 0; + } + place= prev_subselect_item->parsing_place; /* - check table fields only if subquery used somewhere out of HAVING - or SELECT list or outer SELECT do not use groupping (i.e. tables - are accessable) + Check table fields only if the subquery is used somewhere out of + HAVING or the outer SELECT does not use grouping (i.e. tables are + accessible). + TODO: + Here we could first find the field anyway, and then test this + condition, so that we can give a better error message - + ER_WRONG_FIELD_WITH_GROUP, instead of the less informative + ER_BAD_FIELD_ERROR which we produce now. */ if ((place != IN_HAVING || - (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && - (tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) + (!select->with_sum_func && + select->group_list.elements == 0))) { - prev_subselect_item->used_tables_cache|= tmp->table->map; - prev_subselect_item->const_item_cache= 0; - break; + /* + In case of view, find_field_in_tables() write pointer to view + field expression to 'reference', i.e. it substitute that + expression instead of this Item_ref + */ + from_field= find_field_in_tables(thd, this, + outer_context-> + first_name_resolution_table, + outer_context-> + last_name_resolution_table, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + TRUE, TRUE); + if (! from_field) + goto error; + if (from_field == view_ref_found) + { + Item::Type refer_type= (*reference)->type(); + prev_subselect_item->used_tables_cache|= + (*reference)->used_tables(); + prev_subselect_item->const_item_cache&= + (*reference)->const_item(); + DBUG_ASSERT((*reference)->type() == REF_ITEM); + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, + ((refer_type == REF_ITEM || + refer_type == FIELD_ITEM) ? + (Item_ident*) (*reference) : + 0)); + /* + view reference found, we substituted it instead of this + Item, so can quit + */ + return FALSE; + } + if (from_field != not_found_field) + { + if (cached_table && cached_table->select_lex && + outer_context->select_lex && + cached_table->select_lex != outer_context->select_lex) + { + /* + Due to cache, find_field_in_tables() can return field which + doesn't belong to provided outer_context. In this case we have + to find proper field context in order to fix field correcly. + */ + do + { + outer_context= outer_context->outer_context; + select= outer_context->select_lex; + prev_subselect_item= + last_checked_context->select_lex->master_unit()->item; + last_checked_context= outer_context; + } while (outer_context && outer_context->select_lex && + cached_table->select_lex != outer_context->select_lex); + } + prev_subselect_item->used_tables_cache|= from_field->table->map; + prev_subselect_item->const_item_cache= 0; + break; + } } - // Reference is not found => depend from outer (or just error) - prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; - prev_subselect_item->const_item_cache= 0; + DBUG_ASSERT(from_field == not_found_field); - if (sl->master_unit()->first_select()->linkage == - DERIVED_TABLE_TYPE) - break; // do not look over derived table - } + /* Reference is not found => depend on outer (or just error). */ + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; + prev_subselect_item->const_item_cache= 0; - if (!ref) - return 1; - if (!tmp) - return -1; - if (ref == (Item **)not_found_item && tmp == not_found_field) - { - // We can't say exactly what absend (table or field) - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), - full_name(), thd->where); - ref= 0; // Safety - return 1; - } - if (tmp != not_found_field) + outer_context= outer_context->outer_context; + } while (outer_context); + + DBUG_ASSERT(from_field != 0 && from_field != view_ref_found); + if (from_field != not_found_field) { Item_field* fld; - /* - Set ref to 0 as we are replacing this item with the found item - and this will ensure we get an error if this item would be - used elsewhere - */ - ref= 0; // Safety - if (!(fld= new Item_field(tmp))) - return 1; - thd->change_item_tree(reference, fld); - mark_as_dependent(thd, last, thd->lex->current_select, fld); - return 0; + if (!(fld= new Item_field(from_field))) + goto error; + thd->change_item_tree(reference, fld); + mark_as_dependent(thd, last_checked_context->select_lex, + thd->lex->current_select, this, fld); + return FALSE; } - if (!last->ref_pointer_array[counter]) + if (ref == 0) { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; + /* The item was not a table field and not a reference */ + my_error(ER_BAD_FIELD_ERROR, MYF(0), + this->full_name(), current_thd->where); + goto error; } - DBUG_ASSERT((*ref)->fixed); - mark_as_dependent(thd, last, thd->lex->current_select, - this); - if (place == IN_HAVING) - { - Item_ref *rf; - if (!(rf= new Item_direct_ref(last->ref_pointer_array + counter, - (char *)table_name, - (char *)field_name))) - return 1; - ref= 0; // Safety - if (rf->fix_fields(thd, tables, ref) || rf->check_cols(1)) - return 1; - thd->change_item_tree(reference, rf); - return 0; - } - ref= last->ref_pointer_array + counter; - } - else if (!ref) - return 1; - else - { - if (!(*ref)->fixed) - { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; - } - ref= thd->lex->current_select->ref_pointer_array + counter; + /* Should be checked in resolve_ref_in_select_and_group(). */ + DBUG_ASSERT(*ref && (*ref)->fixed); + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, this); } } + DBUG_ASSERT(*ref); /* - The following conditional is changed as to correctly identify - incorrect references in group functions or forward references - with sub-select's / derived tables, while it prevents this - check when Item_ref is created in an expression involving - summing function, which is to be placed in the user variable. + Check if this is an incorrect reference in a group function or forward + reference. Do not issue an error if this is an unnamed reference inside an + aggregate function. */ if (((*ref)->with_sum_func && name && - (depended_from || - !(thd->lex->current_select->linkage != GLOBAL_OPTIONS_TYPE && - thd->lex->current_select->having_fix_field))) || + !(current_sel->linkage != GLOBAL_OPTIONS_TYPE && + current_sel->having_fix_field)) || !(*ref)->fixed) { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - ((*ref)->with_sum_func? - "reference on group function": - "forward reference in item list")); - return 1; + my_error(ER_ILLEGAL_REFERENCE, MYF(0), + name, ((*ref)->with_sum_func? + "reference to group function": + "forward reference in item list")); + goto error; } set_properties(); - if (ref && (*ref)->check_cols(1)) - return 1; - return 0; + if ((*ref)->check_cols(1)) + goto error; + return FALSE; + +error: + context->process_error(thd); + return TRUE; } + void Item_ref::set_properties() { max_length= (*ref)->max_length; maybe_null= (*ref)->maybe_null; decimals= (*ref)->decimals; collation.set((*ref)->collation); + /* + We have to remember if we refer to a sum function, to ensure that + split_sum_func() doesn't try to change the reference. + */ with_sum_func= (*ref)->with_sum_func; unsigned_flag= (*ref)->unsigned_flag; fixed= 1; + if (alias_name_used) + return; + if ((*ref)->type() == FIELD_ITEM) + alias_name_used= ((Item_ident *) (*ref))->alias_name_used; + else + alias_name_used= TRUE; // it is not field, so it is was resolved by alias +} + + +void Item_ref::cleanup() +{ + DBUG_ENTER("Item_ref::cleanup"); + Item_ident::cleanup(); + result_field= 0; + DBUG_VOID_RETURN; } + void Item_ref::print(String *str) { - if (ref && *ref) - (*ref)->print(str); + if (ref) + { + if ((*ref)->type() != Item::CACHE_ITEM && ref_type() != VIEW_REF && + !table_name && name && alias_name_used) + { + THD *thd= current_thd; + append_identifier(thd, str, name, (uint) strlen(name)); + } + else + (*ref)->print(str); + } else Item_ident::print(str); } +bool Item_ref::send(Protocol *prot, String *tmp) +{ + if (result_field) + return prot->store(result_field); + return (*ref)->send(prot, tmp); +} + + +double Item_ref::val_result() +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0.0; + return result_field->val_real(); + } + return val_real(); +} + + +longlong Item_ref::val_int_result() +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + return result_field->val_int(); + } + return val_int(); +} + + +String *Item_ref::str_result(String* str) +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + str->set_charset(str_value.charset()); + return result_field->val_str(str, &str_value); + } + return val_str(str); +} + + +my_decimal *Item_ref::val_decimal_result(my_decimal *decimal_value) +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + return result_field->val_decimal(decimal_value); + } + return val_decimal(decimal_value); +} + + +bool Item_ref::val_bool_result() +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + switch (result_field->result_type()) { + case INT_RESULT: + return result_field->val_int() != 0; + case DECIMAL_RESULT: + { + my_decimal decimal_value; + my_decimal *val= result_field->val_decimal(&decimal_value); + if (val) + return !my_decimal_is_zero(val); + return 0; + } + case REAL_RESULT: + case STRING_RESULT: + return result_field->val_real() != 0.0; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } + } + return val_bool(); +} + + +double Item_ref::val_real() +{ + DBUG_ASSERT(fixed); + double tmp=(*ref)->val_result(); + null_value=(*ref)->null_value; + return tmp; +} + + +longlong Item_ref::val_int() +{ + DBUG_ASSERT(fixed); + longlong tmp=(*ref)->val_int_result(); + null_value=(*ref)->null_value; + return tmp; +} + + +bool Item_ref::val_bool() +{ + DBUG_ASSERT(fixed); + bool tmp= (*ref)->val_bool_result(); + null_value= (*ref)->null_value; + return tmp; +} + + +String *Item_ref::val_str(String* tmp) +{ + DBUG_ASSERT(fixed); + tmp=(*ref)->str_result(tmp); + null_value=(*ref)->null_value; + return tmp; +} + + +bool Item_ref::is_null() +{ + DBUG_ASSERT(fixed); + return (*ref)->is_null(); +} + + +bool Item_ref::get_date(TIME *ltime,uint fuzzydate) +{ + return (null_value=(*ref)->get_date_result(ltime,fuzzydate)); +} + + +my_decimal *Item_ref::val_decimal(my_decimal *decimal_value) +{ + my_decimal *val= (*ref)->val_decimal_result(decimal_value); + null_value= (*ref)->null_value; + return val; +} + +int Item_ref::save_in_field(Field *to, bool no_conversions) +{ + int res; + DBUG_ASSERT(!result_field); + res= (*ref)->save_in_field(to, no_conversions); + null_value= (*ref)->null_value; + return res; +} + + +void Item_ref::save_org_in_field(Field *field) +{ + (*ref)->save_org_in_field(field); +} + + +void Item_ref::make_field(Send_field *field) +{ + (*ref)->make_field(field); + /* Non-zero in case of a view */ + if (name) + field->col_name= name; + if (table_name) + field->table_name= table_name; + if (db_name) + field->db_name= db_name; +} + + void Item_ref_null_helper::print(String *str) { - str->append("<ref_null_helper>(", 18); - if (ref && *ref) + str->append(STRING_WITH_LEN("<ref_null_helper>(")); + if (ref) (*ref)->print(str); else str->append('?'); @@ -2771,6 +5459,143 @@ void Item_ref_null_helper::print(String *str) } +double Item_direct_ref::val_real() +{ + double tmp=(*ref)->val_real(); + null_value=(*ref)->null_value; + return tmp; +} + + +longlong Item_direct_ref::val_int() +{ + longlong tmp=(*ref)->val_int(); + null_value=(*ref)->null_value; + return tmp; +} + + +String *Item_direct_ref::val_str(String* tmp) +{ + tmp=(*ref)->val_str(tmp); + null_value=(*ref)->null_value; + return tmp; +} + + +my_decimal *Item_direct_ref::val_decimal(my_decimal *decimal_value) +{ + my_decimal *tmp= (*ref)->val_decimal(decimal_value); + null_value=(*ref)->null_value; + return tmp; +} + + +bool Item_direct_ref::val_bool() +{ + bool tmp= (*ref)->val_bool(); + null_value=(*ref)->null_value; + return tmp; +} + + +bool Item_direct_ref::is_null() +{ + return (*ref)->is_null(); +} + + +bool Item_direct_ref::get_date(TIME *ltime,uint fuzzydate) +{ + return (null_value=(*ref)->get_date(ltime,fuzzydate)); +} + + +/* + Prepare referenced field then call usual Item_direct_ref::fix_fields + + SYNOPSIS + Item_direct_view_ref::fix_fields() + thd thread handler + reference reference on reference where this item stored + + RETURN + FALSE OK + TRUE Error +*/ + +bool Item_direct_view_ref::fix_fields(THD *thd, Item **reference) +{ + /* view fild reference must be defined */ + DBUG_ASSERT(*ref); + /* (*ref)->check_cols() will be made in Item_direct_ref::fix_fields */ + if (!(*ref)->fixed && + ((*ref)->fix_fields(thd, ref))) + return TRUE; + return Item_direct_ref::fix_fields(thd, reference); +} + +/* + Prepare referenced outer field then call usual Item_direct_ref::fix_fields + + SYNOPSIS + Item_outer_ref::fix_fields() + thd thread handler + reference reference on reference where this item stored + + RETURN + FALSE OK + TRUE Error +*/ + +bool Item_outer_ref::fix_fields(THD *thd, Item **reference) +{ + DBUG_ASSERT(*ref); + /* outer_field->check_cols() will be made in Item_direct_ref::fix_fields */ + outer_field->fixed_as_field= 1; + if (!outer_field->fixed && + (outer_field->fix_fields(thd, reference))) + return TRUE; + table_name= outer_field->table_name; + return Item_direct_ref::fix_fields(thd, reference); +} + +/* + Compare two view column references for equality. + + SYNOPSIS + Item_direct_view_ref::eq() + item item to compare with + binary_cmp make binary comparison + + DESCRIPTION + A view column reference is considered equal to another column + reference if the second one is a view column and if both column + references resolve to the same item. It is assumed that both + items are of the same type. + + RETURN + TRUE Referenced item is equal to given item + FALSE otherwise +*/ + + +bool Item_direct_view_ref::eq(const Item *item, bool binary_cmp) const +{ + if (item->type() == REF_ITEM) + { + Item_ref *item_ref= (Item_ref*) item; + if (item_ref->ref_type() == VIEW_REF) + { + Item *item_ref_ref= *(item_ref->ref); + DBUG_ASSERT((*ref)->real_item()->type() == + item_ref_ref->real_item()->type()); + return ((*ref)->real_item() == item_ref_ref->real_item()); + } + } + return FALSE; +} + bool Item_default_value::eq(const Item *item, bool binary_cmp) const { return item->type() == DEFAULT_VALUE_ITEM && @@ -2778,51 +5603,127 @@ bool Item_default_value::eq(const Item *item, bool binary_cmp) const } -bool Item_default_value::fix_fields(THD *thd, - struct st_table_list *table_list, - Item **items) +bool Item_default_value::fix_fields(THD *thd, Item **items) { + Item *real_arg; + Item_field *field_arg; + Field *def_field; DBUG_ASSERT(fixed == 0); + if (!arg) { fixed= 1; - return 0; + return FALSE; } - if (!arg->fixed && arg->fix_fields(thd, table_list, &arg)) - return 1; - - if (arg->type() == REF_ITEM) + if (!arg->fixed && arg->fix_fields(thd, &arg)) + goto error; + + + real_arg= arg->real_item(); + if (real_arg->type() != FIELD_ITEM) { - Item_ref *ref= (Item_ref *)arg; - if (ref->ref[0]->type() != FIELD_ITEM) - { - return 1; - } - arg= ref->ref[0]; + my_error(ER_NO_DEFAULT_FOR_FIELD, MYF(0), arg->name); + goto error; } - Item_field *field_arg= (Item_field *)arg; - Field *def_field= (Field*) sql_alloc(field_arg->field->size_of()); - if (!def_field) - return 1; + + field_arg= (Item_field *)real_arg; + if (field_arg->field->flags & NO_DEFAULT_VALUE_FLAG) + { + my_error(ER_NO_DEFAULT_FOR_FIELD, MYF(0), field_arg->field->field_name); + goto error; + } + if (!(def_field= (Field*) sql_alloc(field_arg->field->size_of()))) + goto error; memcpy(def_field, field_arg->field, field_arg->field->size_of()); - def_field->move_field(def_field->table->default_values - + def_field->move_field(def_field->table->s->default_values - def_field->table->record[0]); set_field(def_field); - return 0; + return FALSE; + +error: + context->process_error(thd); + return TRUE; } + void Item_default_value::print(String *str) { if (!arg) { - str->append("default", 7); + str->append(STRING_WITH_LEN("default")); return; } - str->append("default(", 8); + str->append(STRING_WITH_LEN("default(")); arg->print(str); str->append(')'); } + +int Item_default_value::save_in_field(Field *field_arg, bool no_conversions) +{ + if (!arg) + { + if (field_arg->flags & NO_DEFAULT_VALUE_FLAG) + { + if (field_arg->reset()) + { + my_message(ER_CANT_CREATE_GEOMETRY_OBJECT, + ER(ER_CANT_CREATE_GEOMETRY_OBJECT), MYF(0)); + return -1; + } + + if (context->error_processor == &view_error_processor) + { + TABLE_LIST *view= cached_table->top_table(); + push_warning_printf(field_arg->table->in_use, + MYSQL_ERROR::WARN_LEVEL_WARN, + ER_NO_DEFAULT_FOR_VIEW_FIELD, + ER(ER_NO_DEFAULT_FOR_VIEW_FIELD), + view->view_db.str, + view->view_name.str); + } + else + { + push_warning_printf(field_arg->table->in_use, + MYSQL_ERROR::WARN_LEVEL_WARN, + ER_NO_DEFAULT_FOR_FIELD, + ER(ER_NO_DEFAULT_FOR_FIELD), + field_arg->field_name); + } + return 1; + } + field_arg->set_default(); + return 0; + } + return Item_field::save_in_field(field_arg, no_conversions); +} + + +/* + This method like the walk method traverses the item tree, but at the + same time it can replace some nodes in the tree +*/ + +Item *Item_default_value::transform(Item_transformer transformer, byte *args) +{ + DBUG_ASSERT(!current_thd->is_stmt_prepare()); + + Item *new_item= arg->transform(transformer, args); + if (!new_item) + return 0; + + /* + THD::change_item_tree() should be called only if the tree was + really transformed, i.e. when a new item has been created. + Otherwise we'll be allocating a lot of unnecessary memory for + change records at each execution. + */ + if (arg != new_item) + current_thd->change_item_tree(&arg, new_item); + return (this->*transformer)(args); +} + + bool Item_insert_value::eq(const Item *item, bool binary_cmp) const { return item->type() == INSERT_VALUE_ITEM && @@ -2830,36 +5731,44 @@ bool Item_insert_value::eq(const Item *item, bool binary_cmp) const } -bool Item_insert_value::fix_fields(THD *thd, - struct st_table_list *table_list, - Item **items) +bool Item_insert_value::fix_fields(THD *thd, Item **items) { DBUG_ASSERT(fixed == 0); - st_table_list *orig_next_table= table_list->next; - table_list->next= 0; - if (!arg->fixed && arg->fix_fields(thd, table_list, &arg)) + /* We should only check that arg is in first table */ + if (!arg->fixed) { - table_list->next= orig_next_table; - return 1; + bool res; + st_table_list *orig_next_table= context->last_name_resolution_table; + context->last_name_resolution_table= context->first_name_resolution_table; + res= arg->fix_fields(thd, &arg); + context->last_name_resolution_table= orig_next_table; + if (res) + return TRUE; } - table_list->next= orig_next_table; if (arg->type() == REF_ITEM) { Item_ref *ref= (Item_ref *)arg; if (ref->ref[0]->type() != FIELD_ITEM) { - return 1; + my_error(ER_BAD_FIELD_ERROR, MYF(0), "", "VALUES() function"); + return TRUE; } arg= ref->ref[0]; } + /* + According to our SQL grammar, VALUES() function can reference + only to a column. + */ + DBUG_ASSERT(arg->type() == FIELD_ITEM); + Item_field *field_arg= (Item_field *)arg; if (field_arg->field->table->insert_values) { Field *def_field= (Field*) sql_alloc(field_arg->field->size_of()); if (!def_field) - return 1; + return TRUE; memcpy(def_field, field_arg->field, field_arg->field->size_of()); def_field->move_field(def_field->table->insert_values - def_field->table->record[0]); @@ -2872,16 +5781,155 @@ bool Item_insert_value::fix_fields(THD *thd, set_field(new Field_null(0, 0, Field::NONE, tmp_field->field_name, tmp_field->table, &my_charset_bin)); } - return 0; + return FALSE; } void Item_insert_value::print(String *str) { - str->append("values(", 7); + str->append(STRING_WITH_LEN("values(")); arg->print(str); str->append(')'); } + +/* + Find index of Field object which will be appropriate for item + representing field of row being changed in trigger. + + SYNOPSIS + setup_field() + thd - current thread context + table - table of trigger (and where we looking for fields) + table_grant_info - GRANT_INFO of the subject table + + NOTE + This function does almost the same as fix_fields() for Item_field + but is invoked right after trigger definition parsing. Since at + this stage we can't say exactly what Field object (corresponding + to TABLE::record[0] or TABLE::record[1]) should be bound to this + Item, we only find out index of the Field and then select concrete + Field object in fix_fields() (by that time Table_trigger_list::old_field/ + new_field should point to proper array of Fields). + It also binds Item_trigger_field to Table_triggers_list object for + table of trigger which uses this item. +*/ + +void Item_trigger_field::setup_field(THD *thd, TABLE *table, + GRANT_INFO *table_grant_info) +{ + /* + There is no sense in marking fields used by trigger with current value + of THD::query_id since it is completely unrelated to the THD::query_id + value for statements which will invoke trigger. So instead we use + Table_triggers_list::mark_fields_used() method which is called during + execution of these statements. + */ + bool save_set_query_id= thd->set_query_id; + thd->set_query_id= 0; + /* + Try to find field by its name and if it will be found + set field_idx properly. + */ + (void)find_field_in_table(thd, table, field_name, (uint) strlen(field_name), + 0, &field_idx); + thd->set_query_id= save_set_query_id; + triggers= table->triggers; + table_grants= table_grant_info; +} + + +bool Item_trigger_field::eq(const Item *item, bool binary_cmp) const +{ + return item->type() == TRIGGER_FIELD_ITEM && + row_version == ((Item_trigger_field *)item)->row_version && + !my_strcasecmp(system_charset_info, field_name, + ((Item_trigger_field *)item)->field_name); +} + + +void Item_trigger_field::set_required_privilege(bool rw) +{ + /* + Require SELECT and UPDATE privilege if this field will be read and + set, and only UPDATE privilege for setting the field. + */ + want_privilege= (rw ? SELECT_ACL | UPDATE_ACL : UPDATE_ACL); +} + + +bool Item_trigger_field::set_value(THD *thd, sp_rcontext * /*ctx*/, Item **it) +{ + Item *item= sp_prepare_func_item(thd, it); + + return (!item || (!fixed && fix_fields(thd, 0)) || + (item->save_in_field(field, 0) < 0)); +} + + +bool Item_trigger_field::fix_fields(THD *thd, Item **items) +{ + /* + Since trigger is object tightly associated with TABLE object most + of its set up can be performed during trigger loading i.e. trigger + parsing! So we have little to do in fix_fields. :) + */ + + DBUG_ASSERT(fixed == 0); + + /* Set field. */ + + if (field_idx != (uint)-1) + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Check access privileges for the subject table. We check privileges only + in runtime. + */ + + if (table_grants) + { + table_grants->want_privilege= want_privilege; + + if (check_grant_column(thd, table_grants, triggers->trigger_table->s->db, + triggers->trigger_table->s->table_name, + field_name, + strlen(field_name), thd->security_ctx)) + return TRUE; + } +#endif // NO_EMBEDDED_ACCESS_CHECKS + + field= (row_version == OLD_ROW) ? triggers->old_field[field_idx] : + triggers->new_field[field_idx]; + set_field(field); + fixed= 1; + return FALSE; + } + + my_error(ER_BAD_FIELD_ERROR, MYF(0), field_name, + (row_version == NEW_ROW) ? "NEW" : "OLD"); + return TRUE; +} + + +void Item_trigger_field::print(String *str) +{ + str->append((row_version == NEW_ROW) ? "NEW" : "OLD", 3); + str->append('.'); + str->append(field_name); +} + + +void Item_trigger_field::cleanup() +{ + want_privilege= original_privilege; + /* + Since special nature of Item_trigger_field we should not do most of + things from Item_field::cleanup() or Item_ident::cleanup() here. + */ + Item::cleanup(); +} + + /* If item is a const function, calculate it and return a const item The original item is freed if not returned @@ -2895,6 +5943,9 @@ Item_result item_cmp_type(Item_result a,Item_result b) return INT_RESULT; else if (a == ROW_RESULT || b == ROW_RESULT) return ROW_RESULT; + if ((a == INT_RESULT || a == DECIMAL_RESULT) && + (b == INT_RESULT || b == DECIMAL_RESULT)) + return DECIMAL_RESULT; return REAL_RESULT; } @@ -2909,7 +5960,8 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item) item->result_type()); char *name=item->name; // Alloced by sql_alloc - if (res_type == STRING_RESULT) + switch (res_type) { + case STRING_RESULT: { char buff[MAX_FIELD_WIDTH]; String tmp(buff,sizeof(buff),&my_charset_bin),*result; @@ -2922,17 +5974,19 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item) char *tmp_str= sql_strmake(result->ptr(), length); new_item= new Item_string(name, tmp_str, length, result->charset()); } + break; } - else if (res_type == INT_RESULT) + case INT_RESULT: { longlong result=item->val_int(); uint length=item->max_length; bool null_value=item->null_value; new_item= (null_value ? (Item*) new Item_null(name) : (Item*) new Item_int(name, result, length)); + break; } - else if (res_type == ROW_RESULT && item->type() == Item::ROW_ITEM && - comp_item->type() == Item::ROW_ITEM) + case ROW_RESULT: + if (item->type() == Item::ROW_ITEM && comp_item->type() == Item::ROW_ITEM) { /* Substitute constants only in Item_rows. Don't affect other Items @@ -2956,15 +6010,33 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item) DBUG_ASSERT(item_row->cols() == comp_item_row->cols()); col= item_row->cols(); while (col-- > 0) - resolve_const_item(thd, item_row->addr(col), comp_item_row->el(col)); + resolve_const_item(thd, item_row->addr(col), + comp_item_row->element_index(col)); + break; } - else if (res_type == REAL_RESULT) + /* Fallthrough */ + case REAL_RESULT: { // It must REAL_RESULT - double result=item->val(); + double result= item->val_real(); uint length=item->max_length,decimals=item->decimals; bool null_value=item->null_value; new_item= (null_value ? (Item*) new Item_null(name) : (Item*) - new Item_real(name, result, decimals, length)); + new Item_float(name, result, decimals, length)); + break; + } + case DECIMAL_RESULT: + { + my_decimal decimal_value; + my_decimal *result= item->val_decimal(&decimal_value); + uint length= item->max_length, decimals= item->decimals; + bool null_value= item->null_value; + new_item= (null_value ? + (Item*) new Item_null(name) : + (Item*) new Item_decimal(name, result, length, decimals)); + break; + } + default: + DBUG_ASSERT(0); } if (new_item) thd->change_item_tree(ref, new_item); @@ -2995,7 +6067,17 @@ bool field_is_equal_to_item(Field *field,Item *item) } if (res_type == INT_RESULT) return 1; // Both where of type int - double result=item->val(); + if (res_type == DECIMAL_RESULT) + { + my_decimal item_buf, *item_val, + field_buf, *field_val; + item_val= item->val_decimal(&item_buf); + if (item->null_value) + return 1; // This must be true + field_val= field->val_decimal(&field_buf); + return !my_decimal_cmp(item_val, field_val); + } + double result= item->val_real(); if (item->null_value) return 1; return result == field->val_real(); @@ -3003,12 +6085,13 @@ bool field_is_equal_to_item(Field *field,Item *item) Item_cache* Item_cache::get_cache(Item_result type) { - switch (type) - { + switch (type) { case INT_RESULT: return new Item_cache_int(); case REAL_RESULT: return new Item_cache_real(); + case DECIMAL_RESULT: + return new Item_cache_decimal(); case STRING_RESULT: return new Item_cache_str(); case ROW_RESULT: @@ -3023,7 +6106,7 @@ Item_cache* Item_cache::get_cache(Item_result type) void Item_cache::print(String *str) { - str->append("<cache>(", 8); + str->append(STRING_WITH_LEN("<cache>(")); if (example) example->print(str); else @@ -3036,6 +6119,23 @@ void Item_cache_int::store(Item *item) { value= item->val_int_result(); null_value= item->null_value; + unsigned_flag= item->unsigned_flag; +} + + +String *Item_cache_int::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + str->set(value, default_charset()); + return str; +} + + +my_decimal *Item_cache_int::val_decimal(my_decimal *decimal_val) +{ + DBUG_ASSERT(fixed == 1); + int2my_decimal(E_DEC_FATAL_ERROR, value, unsigned_flag, decimal_val); + return decimal_val; } @@ -3046,6 +6146,68 @@ void Item_cache_real::store(Item *item) } +longlong Item_cache_real::val_int() +{ + DBUG_ASSERT(fixed == 1); + return (longlong) rint(value); +} + + +String* Item_cache_real::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + str->set(value, decimals, default_charset()); + return str; +} + + +my_decimal *Item_cache_real::val_decimal(my_decimal *decimal_val) +{ + DBUG_ASSERT(fixed == 1); + double2my_decimal(E_DEC_FATAL_ERROR, value, decimal_val); + return decimal_val; +} + + +void Item_cache_decimal::store(Item *item) +{ + my_decimal *val= item->val_decimal_result(&decimal_value); + if (!(null_value= item->null_value) && val != &decimal_value) + my_decimal2decimal(val, &decimal_value); +} + +double Item_cache_decimal::val_real() +{ + DBUG_ASSERT(fixed); + double res; + my_decimal2double(E_DEC_FATAL_ERROR, &decimal_value, &res); + return res; +} + +longlong Item_cache_decimal::val_int() +{ + DBUG_ASSERT(fixed); + longlong res; + my_decimal2int(E_DEC_FATAL_ERROR, &decimal_value, unsigned_flag, &res); + return res; +} + +String* Item_cache_decimal::val_str(String *str) +{ + DBUG_ASSERT(fixed); + my_decimal_round(E_DEC_FATAL_ERROR, &decimal_value, decimals, FALSE, + &decimal_value); + my_decimal2string(E_DEC_FATAL_ERROR, &decimal_value, 0, 0, 0, str); + return str; +} + +my_decimal *Item_cache_decimal::val_decimal(my_decimal *val) +{ + DBUG_ASSERT(fixed); + return &decimal_value; +} + + void Item_cache_str::store(Item *item) { value_buff.set(buffer, sizeof(buffer), item->collation.collation); @@ -3067,18 +6229,15 @@ void Item_cache_str::store(Item *item) } } - -double Item_cache_str::val() +double Item_cache_str::val_real() { DBUG_ASSERT(fixed == 1); - int err; + int err_not_used; + char *end_not_used; if (value) - { - char *end_not_used; return my_strntod(value->charset(), (char*) value->ptr(), - value->length(), &end_not_used, &err); - } - return (double)0; + value->length(), &end_not_used, &err_not_used); + return (double) 0; } @@ -3093,6 +6252,16 @@ longlong Item_cache_str::val_int() return (longlong)0; } +my_decimal *Item_cache_str::val_decimal(my_decimal *decimal_val) +{ + DBUG_ASSERT(fixed == 1); + if (value) + string2my_decimal(E_DEC_FATAL_ERROR, value, decimal_val); + else + decimal_val= 0; + return decimal_val; +} + bool Item_cache_row::allocate(uint num) { @@ -3110,7 +6279,7 @@ bool Item_cache_row::setup(Item * item) return 1; for (uint i= 0; i < item_count; i++) { - Item *el= item->el(i); + Item *el= item->element_index(i); Item_cache *tmp; if (!(tmp= values[i]= Item_cache::get_cache(el->result_type()))) return 1; @@ -3126,7 +6295,7 @@ void Item_cache_row::store(Item * item) item->bring_value(); for (uint i= 0; i < item_count; i++) { - values[i]->store(item->el(i)); + values[i]->store(item->element_index(i)); null_value|= values[i]->null_value; } } @@ -3164,7 +6333,7 @@ bool Item_cache_row::null_inside() } else { - values[i]->val_int(); + values[i]->update_null_value(); if (values[i]->null_value) return 1; } @@ -3185,11 +6354,13 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item) :Item(thd, item), enum_set_typelib(0), fld_type(get_real_type(item)) { DBUG_ASSERT(item->fixed); - - max_length= display_length(item); maybe_null= item->maybe_null; collation.set(item->collation); get_full_info(item); + /* fix variable decimals which always is NOT_FIXED_DEC */ + if (Field::result_merge_type(fld_type) == INT_RESULT) + decimals= 0; + prev_decimal_int_part= item->decimal_int_part(); } @@ -3248,7 +6419,7 @@ enum_field_types Item_type_holder::get_real_type(Item *item) break; } case FUNC_ITEM: - if (((Item_func *) item)->functype() == Item_func::VAR_VALUE_FUNC) + if (((Item_func *) item)->functype() == Item_func::GUSERVAR_FUNC) { /* There are work around of problem with changing variable type on the @@ -3256,14 +6427,15 @@ enum_field_types Item_type_holder::get_real_type(Item *item) acceptable information for client in send_field, so we make field type from expression type. */ - switch (item->result_type()) - { + switch (item->result_type()) { case STRING_RESULT: return MYSQL_TYPE_VAR_STRING; case INT_RESULT: return MYSQL_TYPE_LONGLONG; case REAL_RESULT: return MYSQL_TYPE_DOUBLE; + case DECIMAL_RESULT: + return MYSQL_TYPE_NEWDECIMAL; case ROW_RESULT: default: DBUG_ASSERT(0); @@ -3295,14 +6467,37 @@ bool Item_type_holder::join_types(THD *thd, Item *item) { uint max_length_orig= max_length; uint decimals_orig= decimals; - max_length= max(max_length, display_length(item)); - decimals= max(decimals, item->decimals); + DBUG_ENTER("Item_type_holder::join_types"); + DBUG_PRINT("info:", ("was type %d len %d, dec %d name %s", + fld_type, max_length, decimals, + (name ? name : "<NULL>"))); + DBUG_PRINT("info:", ("in type %d len %d, dec %d", + get_real_type(item), + item->max_length, item->decimals)); fld_type= Field::field_type_merge(fld_type, get_real_type(item)); + { + int item_decimals= item->decimals; + /* fix variable decimals which always is NOT_FIXED_DEC */ + if (Field::result_merge_type(fld_type) == INT_RESULT) + item_decimals= 0; + decimals= max(decimals, item_decimals); + } + if (Field::result_merge_type(fld_type) == DECIMAL_RESULT) + { + decimals= min(max(decimals, item->decimals), DECIMAL_MAX_SCALE); + int precision= min(max(prev_decimal_int_part, item->decimal_int_part()) + + decimals, DECIMAL_MAX_PRECISION); + unsigned_flag&= item->unsigned_flag; + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); + } + switch (Field::result_merge_type(fld_type)) { case STRING_RESULT: { const char *old_cs, *old_derivation; + uint32 old_max_chars= max_length / collation.collation->mbmaxlen; old_cs= collation.collation->name; old_derivation= collation.derivation_name(); if (collation.aggregate(item->collation, MY_COLL_ALLOW_CONV)) @@ -3312,8 +6507,16 @@ bool Item_type_holder::join_types(THD *thd, Item *item) item->collation.collation->name, item->collation.derivation_name(), "UNION"); - return TRUE; + DBUG_RETURN(TRUE); } + /* + To figure out max_length, we have to take into account possible + expansion of the size of the values because of character set + conversions. + */ + max_length= max(old_max_chars * collation.collation->mbmaxlen, + display_length(item) / item->collation.collation->mbmaxlen * + collation.collation->mbmaxlen); break; } case REAL_RESULT: @@ -3322,21 +6525,33 @@ bool Item_type_holder::join_types(THD *thd, Item *item) { int delta1= max_length_orig - decimals_orig; int delta2= item->max_length - item->decimals; - if (fld_type == MYSQL_TYPE_DECIMAL) - max_length= max(delta1, delta2) + decimals; - else - max_length= min(max(delta1, delta2) + decimals, - (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7); + max_length= max(delta1, delta2) + decimals; + if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2) + { + max_length= FLT_DIG + 6; + decimals= NOT_FIXED_DEC; + } + if (fld_type == MYSQL_TYPE_DOUBLE && max_length > DBL_DIG + 2) + { + max_length= DBL_DIG + 7; + decimals= NOT_FIXED_DEC; + } } else max_length= (fld_type == MYSQL_TYPE_FLOAT) ? FLT_DIG+6 : DBL_DIG+7; break; } - default:; + default: + max_length= max(max_length, display_length(item)); }; maybe_null|= item->maybe_null; get_full_info(item); - return FALSE; + + /* Remember decimal integer part to be used in DECIMAL_RESULT handleng */ + prev_decimal_int_part= decimal_int_part(); + DBUG_PRINT("info", ("become type: %d len: %u dec: %u", + (int) fld_type, max_length, (uint) decimals)); + DBUG_RETURN(FALSE); } /* @@ -3364,6 +6579,9 @@ uint32 Item_type_holder::display_length(Item *item) case MYSQL_TYPE_DATETIME: case MYSQL_TYPE_YEAR: case MYSQL_TYPE_NEWDATE: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_NEWDECIMAL: case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: case MYSQL_TYPE_TINY_BLOB: @@ -3379,13 +6597,13 @@ uint32 Item_type_holder::display_length(Item *item) case MYSQL_TYPE_SHORT: return 6; case MYSQL_TYPE_LONG: - return 11; + return MY_INT32_NUM_DECIMAL_DIGITS; case MYSQL_TYPE_FLOAT: return 25; case MYSQL_TYPE_DOUBLE: return 53; case MYSQL_TYPE_NULL: - return 4; + return 0; case MYSQL_TYPE_LONGLONG: return 20; case MYSQL_TYPE_INT24: @@ -3429,10 +6647,6 @@ Field *Item_type_holder::make_field_by_type(TABLE *table) Field::NONE, name, table, get_set_pack_length(enum_set_typelib->count), enum_set_typelib, collation.collation); - case MYSQL_TYPE_VAR_STRING: - table->db_create_options|= HA_OPTION_PACK_RECORD; - fld_type= MYSQL_TYPE_STRING; - break; default: break; } @@ -3476,7 +6690,7 @@ void Item_type_holder::get_full_info(Item *item) } -double Item_type_holder::val() +double Item_type_holder::val_real() { DBUG_ASSERT(0); // should never be called return 0.0; @@ -3489,6 +6703,11 @@ longlong Item_type_holder::val_int() return 0; } +my_decimal *Item_type_holder::val_decimal(my_decimal *) +{ + DBUG_ASSERT(0); // should never be called + return 0; +} String *Item_type_holder::val_str(String*) { @@ -3504,13 +6723,42 @@ void Item_result_field::cleanup() DBUG_VOID_RETURN; } +/* + Dummy error processor used by default by Name_resolution_context + + SYNOPSIS + dummy_error_processor() + + NOTE + do nothing +*/ + +void dummy_error_processor(THD *thd, void *data) +{} + +/* + Wrapper of hide_view_error call for Name_resolution_context error processor + + SYNOPSIS + view_error_processor() + + NOTE + hide view underlying tables details in error messages +*/ + +void view_error_processor(THD *thd, void *data) +{ + ((TABLE_LIST *)data)->hide_view_error(thd); +} + /***************************************************************************** ** Instantiate templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List<Item>; template class List_iterator<Item>; template class List_iterator_fast<Item>; +template class List_iterator_fast<Item_field>; template class List<List_item>; #endif diff --git a/sql/item.h b/sql/item.h index f2136c4997a..7b0d18e19c5 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000-2003 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,30 +21,18 @@ class Protocol; struct st_table_list; void item_init(void); /* Init item functions */ - +class Item_field; /* "Declared Type Collation" - A combination of collation and its deriviation. -*/ - -enum Derivation -{ - DERIVATION_IGNORABLE= 5, - DERIVATION_COERCIBLE= 4, - DERIVATION_SYSCONST= 3, - DERIVATION_IMPLICIT= 2, - DERIVATION_NONE= 1, - DERIVATION_EXPLICIT= 0 -}; + A combination of collation and its derivation. -/* Flags for collation aggregation modes: MY_COLL_ALLOW_SUPERSET_CONV - allow conversion to a superset MY_COLL_ALLOW_COERCIBLE_CONV - allow conversion of a coercible value (i.e. constant). MY_COLL_ALLOW_CONV - allow any kind of conversion - (combintion of the above two) + (combination of the above two) MY_COLL_DISALLOW_NONE - don't allow return DERIVATION_NONE (e.g. when aggregating for comparison) MY_COLL_CMP_CONV - combination of MY_COLL_ALLOW_CONV @@ -105,77 +92,411 @@ public: } }; -typedef bool (Item::*Item_processor)(byte *arg); + +/*************************************************************************/ +/* + A framework to easily handle different return types for hybrid items + (hybrid item is an item whose operand can be of any type, e.g. integer, + real, decimal). +*/ + +struct Hybrid_type_traits; + +struct Hybrid_type +{ + longlong integer; + + double real; + /* + Use two decimal buffers interchangeably to speed up += operation + which has no native support in decimal library. + Hybrid_type+= arg is implemented as dec_buf[1]= dec_buf[0] + arg. + The third decimal is used as a handy temporary storage. + */ + my_decimal dec_buf[3]; + int used_dec_buf_no; + + /* + Traits moved to a separate class to + a) be able to easily change object traits in runtime + b) they work as a differentiator for the union above + */ + const Hybrid_type_traits *traits; + + Hybrid_type() {} + /* XXX: add traits->copy() when needed */ + Hybrid_type(const Hybrid_type &rhs) :traits(rhs.traits) {} +}; + + +/* Hybryd_type_traits interface + default implementation for REAL_RESULT */ + +struct Hybrid_type_traits +{ + virtual Item_result type() const { return REAL_RESULT; } + + virtual void + fix_length_and_dec(Item *item, Item *arg) const; + + /* Hybrid_type operations. */ + virtual void set_zero(Hybrid_type *val) const { val->real= 0.0; } + virtual void add(Hybrid_type *val, Field *f) const + { val->real+= f->val_real(); } + virtual void div(Hybrid_type *val, ulonglong u) const + { val->real/= ulonglong2double(u); } + + virtual longlong val_int(Hybrid_type *val, bool unsigned_flag) const + { return (longlong) rint(val->real); } + virtual double val_real(Hybrid_type *val) const { return val->real; } + virtual my_decimal *val_decimal(Hybrid_type *val, my_decimal *buf) const; + virtual String *val_str(Hybrid_type *val, String *buf, uint8 decimals) const; + static const Hybrid_type_traits *instance(); + Hybrid_type_traits() {} + virtual ~Hybrid_type_traits() {} +}; + + +struct Hybrid_type_traits_decimal: public Hybrid_type_traits +{ + virtual Item_result type() const { return DECIMAL_RESULT; } + + virtual void + fix_length_and_dec(Item *arg, Item *item) const; + + /* Hybrid_type operations. */ + virtual void set_zero(Hybrid_type *val) const; + virtual void add(Hybrid_type *val, Field *f) const; + virtual void div(Hybrid_type *val, ulonglong u) const; + + virtual longlong val_int(Hybrid_type *val, bool unsigned_flag) const; + virtual double val_real(Hybrid_type *val) const; + virtual my_decimal *val_decimal(Hybrid_type *val, my_decimal *buf) const + { return &val->dec_buf[val->used_dec_buf_no]; } + virtual String *val_str(Hybrid_type *val, String *buf, uint8 decimals) const; + static const Hybrid_type_traits_decimal *instance(); + Hybrid_type_traits_decimal() {}; +}; + + +struct Hybrid_type_traits_integer: public Hybrid_type_traits +{ + virtual Item_result type() const { return INT_RESULT; } + + virtual void + fix_length_and_dec(Item *arg, Item *item) const; + + /* Hybrid_type operations. */ + virtual void set_zero(Hybrid_type *val) const + { val->integer= 0; } + virtual void add(Hybrid_type *val, Field *f) const + { val->integer+= f->val_int(); } + virtual void div(Hybrid_type *val, ulonglong u) const + { val->integer/= (longlong) u; } + + virtual longlong val_int(Hybrid_type *val, bool unsigned_flag) const + { return val->integer; } + virtual double val_real(Hybrid_type *val) const + { return (double) val->integer; } + virtual my_decimal *val_decimal(Hybrid_type *val, my_decimal *buf) const + { + int2my_decimal(E_DEC_FATAL_ERROR, val->integer, 0, &val->dec_buf[2]); + return &val->dec_buf[2]; + } + virtual String *val_str(Hybrid_type *val, String *buf, uint8 decimals) const + { buf->set(val->integer, &my_charset_bin); return buf;} + static const Hybrid_type_traits_integer *instance(); + Hybrid_type_traits_integer() {}; +}; + + +void dummy_error_processor(THD *thd, void *data); + +void view_error_processor(THD *thd, void *data); + +/* + Instances of Name_resolution_context store the information necesary for + name resolution of Items and other context analysis of a query made in + fix_fields(). + + This structure is a part of SELECT_LEX, a pointer to this structure is + assigned when an item is created (which happens mostly during parsing + (sql_yacc.yy)), but the structure itself will be initialized after parsing + is complete + + TODO: move subquery of INSERT ... SELECT and CREATE ... SELECT to + separate SELECT_LEX which allow to remove tricks of changing this + structure before and after INSERT/CREATE and its SELECT to make correct + field name resolution. +*/ +struct Name_resolution_context: Sql_alloc +{ + /* + The name resolution context to search in when an Item cannot be + resolved in this context (the context of an outer select) + */ + Name_resolution_context *outer_context; + + /* + List of tables used to resolve the items of this context. Usually these + are tables from the FROM clause of SELECT statement. The exceptions are + INSERT ... SELECT and CREATE ... SELECT statements, where SELECT + subquery is not moved to a separate SELECT_LEX. For these types of + statements we have to change this member dynamically to ensure correct + name resolution of different parts of the statement. + */ + TABLE_LIST *table_list; + /* + In most cases the two table references below replace 'table_list' above + for the purpose of name resolution. The first and last name resolution + table references allow us to search only in a sub-tree of the nested + join tree in a FROM clause. This is needed for NATURAL JOIN, JOIN ... USING + and JOIN ... ON. + */ + TABLE_LIST *first_name_resolution_table; + /* + Last table to search in the list of leaf table references that begins + with first_name_resolution_table. + */ + TABLE_LIST *last_name_resolution_table; + + /* + SELECT_LEX item belong to, in case of merged VIEW it can differ from + SELECT_LEX where item was created, so we can't use table_list/field_list + from there + */ + st_select_lex *select_lex; + + /* + Processor of errors caused during Item name resolving, now used only to + hide underlying tables in errors about views (i.e. it substitute some + errors for views) + */ + void (*error_processor)(THD *, void *); + void *error_processor_data; + + /* + When TRUE items are resolved in this context both against the + SELECT list and this->table_list. If FALSE, items are resolved + only against this->table_list. + */ + bool resolve_in_select_list; + + /* + Security context of this name resolution context. It's used for views + and is non-zero only if the view is defined with SQL SECURITY DEFINER. + */ + Security_context *security_ctx; + + Name_resolution_context() + :outer_context(0), table_list(0), select_lex(0), + error_processor_data(0), + security_ctx(0) + {} + + void init() + { + resolve_in_select_list= FALSE; + error_processor= &dummy_error_processor; + first_name_resolution_table= NULL; + last_name_resolution_table= NULL; + } + + void resolve_in_table_list_only(TABLE_LIST *tables) + { + table_list= first_name_resolution_table= tables; + resolve_in_select_list= FALSE; + } + + void process_error(THD *thd) + { + (*error_processor)(thd, error_processor_data); + } +}; + + +/* + Store and restore the current state of a name resolution context. +*/ + +class Name_resolution_context_state +{ +private: + TABLE_LIST *save_table_list; + TABLE_LIST *save_first_name_resolution_table; + TABLE_LIST *save_next_name_resolution_table; + bool save_resolve_in_select_list; + TABLE_LIST *save_next_local; + +public: + Name_resolution_context_state() {} /* Remove gcc warning */ + +public: + /* Save the state of a name resolution context. */ + void save_state(Name_resolution_context *context, TABLE_LIST *table_list) + { + save_table_list= context->table_list; + save_first_name_resolution_table= context->first_name_resolution_table; + save_resolve_in_select_list= context->resolve_in_select_list; + save_next_local= table_list->next_local; + save_next_name_resolution_table= table_list->next_name_resolution_table; + } + + /* Restore a name resolution context from saved state. */ + void restore_state(Name_resolution_context *context, TABLE_LIST *table_list) + { + table_list->next_local= save_next_local; + table_list->next_name_resolution_table= save_next_name_resolution_table; + context->table_list= save_table_list; + context->first_name_resolution_table= save_first_name_resolution_table; + context->resolve_in_select_list= save_resolve_in_select_list; + } + + TABLE_LIST *get_first_name_resolution_table() + { + return save_first_name_resolution_table; + } +}; + +/*************************************************************************/ + +class sp_rcontext; + + +class Settable_routine_parameter +{ +public: + /* + Set required privileges for accessing the parameter. + + SYNOPSIS + set_required_privilege() + rw if 'rw' is true then we are going to read and set the + parameter, so SELECT and UPDATE privileges might be + required, otherwise we only reading it and SELECT + privilege might be required. + */ + Settable_routine_parameter() {} + virtual ~Settable_routine_parameter() {} + virtual void set_required_privilege(bool rw) {}; + + /* + Set parameter value. + + SYNOPSIS + set_value() + thd thread handle + ctx context to which parameter belongs (if it is local + variable). + it item which represents new value + + RETURN + FALSE if parameter value has been set, + TRUE if error has occured. + */ + virtual bool set_value(THD *thd, sp_rcontext *ctx, Item **it)= 0; +}; + + +typedef bool (Item::*Item_processor) (byte *arg); +/* + Analyzer function + SYNOPSIS + argp in/out IN: Analysis parameter + OUT: Parameter to be passed to the transformer + + RETURN + TRUE Invoke the transformer + FALSE Don't do it + +*/ +typedef bool (Item::*Item_analyzer) (byte **argp); +typedef Item* (Item::*Item_transformer) (byte *arg); +typedef void (*Cond_traverser) (const Item *item, void *arg); + class Item { Item(const Item &); /* Prevent use of these */ void operator=(Item &); public: - static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } + static void *operator new(size_t size) + { return (void*) sql_alloc((uint) size); } static void *operator new(size_t size, MEM_ROOT *mem_root) { return (void*) alloc_root(mem_root, (uint) size); } - static void operator delete(void *ptr,size_t size) {} + static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } static void operator delete(void *ptr, MEM_ROOT *mem_root) {} - enum Type {FIELD_ITEM, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM, + enum Type {FIELD_ITEM= 0, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM, INT_ITEM, REAL_ITEM, NULL_ITEM, VARBIN_ITEM, COPY_STR_ITEM, FIELD_AVG_ITEM, DEFAULT_VALUE_ITEM, PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM, FIELD_VARIANCE_ITEM, INSERT_VALUE_ITEM, SUBSELECT_ITEM, ROW_ITEM, CACHE_ITEM, TYPE_HOLDER, - PARAM_ITEM}; + PARAM_ITEM, TRIGGER_FIELD_ITEM, DECIMAL_ITEM, + VIEW_FIXER_ITEM}; enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE }; + + enum traverse_order { POSTFIX, PREFIX }; + /* Reuse size, only used by SP local variable assignment, otherwize 0 */ + uint rsize; + /* str_values's main purpose is to be used to cache the value in save_in_field */ String str_value; my_string name; /* Name from select */ + /* Original item name (if it was renamed)*/ + my_string orig_name; Item *next; uint32 max_length; - uint8 marker,decimals; + uint name_length; /* Length of name */ + int8 marker; + uint8 decimals; my_bool maybe_null; /* If item may be null */ my_bool null_value; /* if item is null */ my_bool unsigned_flag; my_bool with_sum_func; my_bool fixed; /* If item fixed with fix_fields */ + my_bool is_autogenerated_name; /* indicate was name of this Item + autogenerated or set by user */ DTCollation collation; my_bool with_subselect; /* If this item is a subselect or some of its arguments is or contains a subselect */ - + Item_result cmp_context; /* Comparison context */ // alloc & destruct is done as start of select using sql_alloc Item(); /* - Constructor used by Item_field, Item_ref & agregate (sum) functions. + Constructor used by Item_field, Item_ref & aggregate (sum) functions. Used for duplicating lists in processing queries with temporary tables Also it used for Item_cond_and/Item_cond_or for creating - top AND/OR ctructure of WHERE clause to protect it of + top AND/OR structure of WHERE clause to protect it of optimisation changes in prepared statements */ Item(THD *thd, Item *item); - virtual ~Item() { name=0; } /*lint -e1509 */ - void set_name(const char *str,uint length, CHARSET_INFO *cs); - void init_make_field(Send_field *tmp_field,enum enum_field_types type); - virtual void cleanup() + virtual ~Item() { - DBUG_ENTER("Item::cleanup"); - DBUG_PRINT("info", ("Type: %d", (int)type())); - fixed=0; - marker= 0; - DBUG_VOID_RETURN; - } +#ifdef EXTRA_DEBUG + name=0; +#endif + } /*lint -e1509 */ + void set_name(const char *str, uint length, CHARSET_INFO *cs); + void rename(char *new_name); + void init_make_field(Send_field *tmp_field,enum enum_field_types type); + virtual void cleanup(); virtual void make_field(Send_field *field); - virtual bool fix_fields(THD *, struct st_table_list *, Item **); + Field *make_string_field(TABLE *table); + virtual bool fix_fields(THD *, Item **); /* should be used in case where we are sure that we do not need complete fix_fields() procedure. */ inline void quick_fix_field() { fixed= 1; } /* Function returns 1 on overflow and -1 on fatal errors */ + int save_in_field_no_warnings(Field *field, bool no_conversions); virtual int save_in_field(Field *field, bool no_conversions); virtual void save_org_in_field(Field *field) { (void) save_in_field(field, 1); } @@ -188,40 +509,128 @@ public: virtual enum_field_types field_type() const; virtual enum Type type() const =0; /* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */ - virtual double val()=0; + /* + Return double precision floating point representation of item. + + SYNOPSIS + val_real() + + RETURN + In case of NULL value return 0.0 and set null_value flag to TRUE. + If value is not null null_value flag will be reset to FALSE. + */ + virtual double val_real()=0; + /* + Return integer representation of item. + + SYNOPSIS + val_int() + + RETURN + In case of NULL value return 0 and set null_value flag to TRUE. + If value is not null null_value flag will be reset to FALSE. + */ virtual longlong val_int()=0; /* + This is just a shortcut to avoid the cast. You should still use + unsigned_flag to check the sign of the item. + */ + inline ulonglong val_uint() { return (ulonglong) val_int(); } + /* Return string representation of this item object. - The argument to val_str() is an allocated buffer this or any - nested Item object can use to store return value of this method. - This buffer should only be used if the item itself doesn't have an - own String buffer. In case when the item maintains it's own string - buffer, it's preferrable to return it instead to minimize number of - mallocs/memcpys. - The caller of this method can modify returned string, but only in - case when it was allocated on heap, (is_alloced() is true). This - allows the caller to efficiently use a buffer allocated by a child - without having to allocate a buffer of it's own. The buffer, given - to val_str() as agrument, belongs to the caller and is later used - by the caller at it's own choosing. - A few implications from the above: - - unless you return a string object which only points to your buffer - but doesn't manages it you should be ready that it will be - modified. - - even for not allocated strings (is_alloced() == false) the caller - can change charset (see Item_func_{typecast/binary}. XXX: is this - a bug? - - still you should try to minimize data copying and return internal - object whenever possible. + SYNOPSIS + val_str() + str an allocated buffer this or any nested Item object can use to + store return value of this method. + + NOTE + Buffer passed via argument should only be used if the item itself + doesn't have an own String buffer. In case when the item maintains + it's own string buffer, it's preferable to return it instead to + minimize number of mallocs/memcpys. + The caller of this method can modify returned string, but only in case + when it was allocated on heap, (is_alloced() is true). This allows + the caller to efficiently use a buffer allocated by a child without + having to allocate a buffer of it's own. The buffer, given to + val_str() as argument, belongs to the caller and is later used by the + caller at it's own choosing. + A few implications from the above: + - unless you return a string object which only points to your buffer + but doesn't manages it you should be ready that it will be + modified. + - even for not allocated strings (is_alloced() == false) the caller + can change charset (see Item_func_{typecast/binary}. XXX: is this + a bug? + - still you should try to minimize data copying and return internal + object whenever possible. + + RETURN + In case of NULL value return 0 (NULL pointer) and set null_value flag + to TRUE. + If value is not null null_value flag will be reset to FALSE. + */ + virtual String *val_str(String *str)=0; + /* + Return decimal representation of item with fixed point. + + SYNOPSIS + val_decimal() + decimal_buffer buffer which can be used by Item for returning value + (but can be not) + + NOTE + Returned value should not be changed if it is not the same which was + passed via argument. + + RETURN + Return pointer on my_decimal (it can be other then passed via argument) + if value is not NULL (null_value flag will be reset to FALSE). + In case of NULL value it return 0 pointer and set null_value flag + to TRUE. */ - virtual String *val_str(String*)=0; + virtual my_decimal *val_decimal(my_decimal *decimal_buffer)= 0; + /* + Return boolean value of item. + + RETURN + FALSE value is false or NULL + TRUE value is true (not equal to 0) + */ + virtual bool val_bool(); + /* Helper functions, see item_sum.cc */ + String *val_string_from_real(String *str); + String *val_string_from_int(String *str); + String *val_string_from_decimal(String *str); + my_decimal *val_decimal_from_real(my_decimal *decimal_value); + my_decimal *val_decimal_from_int(my_decimal *decimal_value); + my_decimal *val_decimal_from_string(my_decimal *decimal_value); + my_decimal *val_decimal_from_date(my_decimal *decimal_value); + my_decimal *val_decimal_from_time(my_decimal *decimal_value); + longlong val_int_from_decimal(); + double val_real_from_decimal(); + + int save_time_in_field(Field *field); + int save_date_in_field(Field *field); + virtual Field *get_tmp_table_field() { return 0; } + /* This is also used to create fields in CREATE ... SELECT: */ virtual Field *tmp_table_field(TABLE *t_arg) { return 0; } virtual const char *full_name() const { return name ? name : "???"; } - virtual double val_result() { return val(); } + + /* + *result* family of methods is analog of *val* family (see above) but + return value of result_field of item if it is present. If Item have not + result field, it return val(). This methods set null_value flag in same + way as *val* methods do it. + */ + virtual double val_result() { return val_real(); } virtual longlong val_int_result() { return val_int(); } virtual String *str_result(String* tmp) { return val_str(tmp); } + virtual my_decimal *val_decimal_result(my_decimal *val) + { return val_decimal(val); } + virtual bool val_bool_result() { return val_bool(); } + /* bit map of tables used by item */ virtual table_map used_tables() const { return (table_map) 0L; } /* @@ -242,10 +651,13 @@ public: */ virtual bool basic_const_item() const { return 0; } /* cloning of constant items (0 if it is not const) */ - virtual Item *new_item() { return 0; } + virtual Item *clone_item() { return 0; } virtual cond_result eq_cmp_result() const { return COND_OK; } inline uint float_length(uint decimals_par) const { return decimals != NOT_FIXED_DEC ? (DBL_DIG+2+decimals_par) : DBL_DIG+8;} + virtual uint decimal_precision() const; + inline int decimal_int_part() const + { return my_decimal_int_part(decimal_precision(), decimals); } /* Returns true if this is constant (during query execution, i.e. its value will not change until next fix_fields) and its value is known. @@ -257,6 +669,18 @@ public: */ virtual bool const_during_execution() const { return (used_tables() & ~PARAM_TABLE_BIT) == 0; } + /* + This is an essential method for correct functioning of VIEWS. + To save a view in an .frm file we need its unequivocal + definition in SQL that takes into account sql_mode and + environmental settings. Currently such definition is restored + by traversing through the parsed tree of a view and + print()'ing SQL syntax of every node to a String buffer. This + method is used to print the SQL definition of an item. The + second use of this method is for EXPLAIN EXTENDED, to print + the SQL of a query after all optimizations of the parsed tree + have been done. + */ virtual void print(String *str_arg) { str_arg->append(full_name()); } void print_item_w_name(String *); virtual void update_used_tables() {} @@ -264,7 +688,7 @@ public: List<Item> &fields) {} /* Called for items that really have to be split */ void split_sum_func2(THD *thd, Item **ref_pointer_array, List<Item> &fields, - Item **ref); + Item **ref, bool skip_registered); virtual bool get_date(TIME *ltime,uint fuzzydate); virtual bool get_time(TIME *ltime); virtual bool get_date_result(TIME *ltime,uint fuzzydate) @@ -280,6 +704,11 @@ public: virtual bool is_null() { return 0; } /* + Make sure the null_value member has a correct value. + */ + virtual void update_null_value () { (void) val_int(); } + + /* Inform the item that there will be no distinction between its result being FALSE or NULL. @@ -292,14 +721,14 @@ public: virtual void top_level_item() {} /* set field of temporary table for Item which can be switched on temporary - table during query processing (groupping and so on) + table during query processing (grouping and so on) */ virtual void set_result_field(Field *field) {} virtual bool is_result_field() { return 0; } virtual bool is_bool_func() { return 0; } virtual void save_in_result_field(bool no_conversions) {} /* - set value of aggegate function in case of no rows for groupping were found + set value of aggregate function in case of no rows for grouping were found */ virtual void no_rows_in_result() {} virtual Item *copy_or_same(THD *thd) { return this; } @@ -315,12 +744,73 @@ public: return (this->*processor)(arg); } + virtual Item* transform(Item_transformer transformer, byte *arg); + + /* + This function performs a generic "compilation" of the Item tree. + The process of compilation is assumed to go as follows: + + compile() + { + if (this->*some_analyzer(...)) + { + compile children if any; + this->*some_transformer(...); + } + } + + i.e. analysis is performed top-down while transformation is done + bottom-up. + */ + virtual Item* compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t) + { + if ((this->*analyzer) (arg_p)) + return ((this->*transformer) (arg_t)); + return 0; + } + + virtual void traverse_cond(Cond_traverser traverser, + void *arg, traverse_order order) + { + (*traverser)(this, arg); + } + virtual bool remove_dependence_processor(byte * arg) { return 0; } virtual bool remove_fixed(byte * arg) { fixed= 0; return 0; } - + virtual bool cleanup_processor(byte *arg); + virtual bool collect_item_field_processor(byte * arg) { return 0; } + virtual bool find_item_in_field_list_processor(byte *arg) { return 0; } + virtual bool change_context_processor(byte *context) { return 0; } + virtual bool reset_query_id_processor(byte *query_id_arg) { return 0; } + virtual bool is_expensive_processor(byte *arg) { return 0; } + virtual bool subst_argument_checker(byte **arg) + { + if (*arg) + *arg= NULL; + return TRUE; + } + + virtual Item *equal_fields_propagator(byte * arg) { return this; } + virtual bool set_no_const_sub(byte *arg) { return FALSE; } + virtual Item *replace_equal_field(byte * arg) { return this; } + + /* + For SP local variable returns pointer to Item representing its + current value and pointer to current Item otherwise. + */ + virtual Item *this_item() { return this; } + virtual const Item *this_item() const { return this; } + + /* + For SP local variable returns address of pointer to Item representing its + current value and pointer passed via parameter otherwise. + */ + virtual Item **this_item_addr(THD *thd, Item **addr_arg) { return addr_arg; } + // Row emulation virtual uint cols() { return 1; } - virtual Item* el(uint i) { return this; } + virtual Item* element_index(uint i) { return this; } virtual Item** addr(uint i) { return 0; } virtual bool check_cols(uint c); // It is not row => null inside is impossible @@ -329,14 +819,27 @@ public: virtual void bring_value() {} Field *tmp_table_field_from_field_type(TABLE *table); + virtual Item_field *filed_for_view_update() { return 0; } virtual Item *neg_transformer(THD *thd) { return NULL; } + virtual Item *update_value_transformer(byte *select_arg) { return this; } virtual Item *safe_charset_converter(CHARSET_INFO *tocs); void delete_self() { cleanup(); delete this; } + + virtual bool is_splocal() { return 0; } /* Needed for error checking */ + + /* + Return Settable_routine_parameter interface of the Item. Return 0 + if this Item is not Settable_routine_parameter. + */ + virtual Settable_routine_parameter *get_settable_routine_parameter() + { + return 0; + } /* result_as_longlong() must return TRUE for Items representing DATE/TIME functions and DATE/TIME table fields. @@ -348,18 +851,281 @@ public: }; +class sp_head; + + +/***************************************************************************** + The class is a base class for representation of stored routine variables in + the Item-hierarchy. There are the following kinds of SP-vars: + - local variables (Item_splocal); + - CASE expression (Item_case_expr); +*****************************************************************************/ + +class Item_sp_variable :public Item +{ +protected: + /* + THD, which is stored in fix_fields() and is used in this_item() to avoid + current_thd use. + */ + THD *m_thd; + +public: + LEX_STRING m_name; + +public: +#ifndef DBUG_OFF + /* + Routine to which this Item_splocal belongs. Used for checking if correct + runtime context is used for variable handling. + */ + sp_head *m_sp; +#endif + +public: + Item_sp_variable(char *sp_var_name_str, uint sp_var_name_length); + +public: + bool fix_fields(THD *thd, Item **); + + double val_real(); + longlong val_int(); + String *val_str(String *sp); + my_decimal *val_decimal(my_decimal *decimal_value); + bool is_null(); + +public: + inline void make_field(Send_field *field); + + inline bool const_item() const; + + inline int save_in_field(Field *field, bool no_conversions); + inline bool send(Protocol *protocol, String *str); +}; + +/***************************************************************************** + Item_sp_variable inline implementation. +*****************************************************************************/ + +inline void Item_sp_variable::make_field(Send_field *field) +{ + Item *it= this_item(); + + if (name) + it->set_name(name, (uint) strlen(name), system_charset_info); + else + it->set_name(m_name.str, m_name.length, system_charset_info); + it->make_field(field); +} + +inline bool Item_sp_variable::const_item() const +{ + return TRUE; +} + +inline int Item_sp_variable::save_in_field(Field *field, bool no_conversions) +{ + return this_item()->save_in_field(field, no_conversions); +} + +inline bool Item_sp_variable::send(Protocol *protocol, String *str) +{ + return this_item()->send(protocol, str); +} + + +/***************************************************************************** + A reference to local SP variable (incl. reference to SP parameter), used in + runtime. +*****************************************************************************/ + +class Item_splocal :public Item_sp_variable, + private Settable_routine_parameter +{ + uint m_var_idx; + + Type m_type; + Item_result m_result_type; + +public: + /* + Position of this reference to SP variable in the statement (the + statement itself is in sp_instr_stmt::m_query). + This is valid only for references to SP variables in statements, + excluding DECLARE CURSOR statement. It is used to replace references to SP + variables with NAME_CONST calls when putting statements into the binary + log. + Value of 0 means that this object doesn't corresponding to reference to + SP variable in query text. + */ + uint pos_in_query; + + Item_splocal(const LEX_STRING &sp_var_name, uint sp_var_idx, + enum_field_types sp_var_type, uint pos_in_q= 0); + + bool is_splocal() { return 1; } /* Needed for error checking */ + + Item *this_item(); + const Item *this_item() const; + Item **this_item_addr(THD *thd, Item **); + + void print(String *str); + +public: + inline const LEX_STRING *my_name() const; + + inline uint get_var_idx() const; + + inline enum Type type() const; + inline Item_result result_type() const; + +private: + bool set_value(THD *thd, sp_rcontext *ctx, Item **it); + +public: + Settable_routine_parameter *get_settable_routine_parameter() + { + return this; + } +}; + +/***************************************************************************** + Item_splocal inline implementation. +*****************************************************************************/ + +inline const LEX_STRING *Item_splocal::my_name() const +{ + return &m_name; +} + +inline uint Item_splocal::get_var_idx() const +{ + return m_var_idx; +} + +inline enum Item::Type Item_splocal::type() const +{ + return m_type; +} + +inline Item_result Item_splocal::result_type() const +{ + return m_result_type; +} + + +/***************************************************************************** + A reference to case expression in SP, used in runtime. +*****************************************************************************/ + +class Item_case_expr :public Item_sp_variable +{ +public: + Item_case_expr(int case_expr_id); + +public: + Item *this_item(); + const Item *this_item() const; + Item **this_item_addr(THD *thd, Item **); + + inline enum Type type() const; + inline Item_result result_type() const; + +public: + /* + NOTE: print() is intended to be used from views and for debug. + Item_case_expr can not occur in views, so here it is only for debug + purposes. + */ + void print(String *str); + +private: + int m_case_expr_id; +}; + +/***************************************************************************** + Item_case_expr inline implementation. +*****************************************************************************/ + +inline enum Item::Type Item_case_expr::type() const +{ + return this_item()->type(); +} + +inline Item_result Item_case_expr::result_type() const +{ + return this_item()->result_type(); +} + + +/* + NAME_CONST(given_name, const_value). + This 'function' has all properties of the supplied const_value (which is + assumed to be a literal constant), and the name given_name. + + This is used to replace references to SP variables when we write PROCEDURE + statements into the binary log. + + TODO + Together with Item_splocal and Item::this_item() we can actually extract + common a base of this class and Item_splocal. Maybe it is possible to + extract a common base with class Item_ref, too. +*/ + +class Item_name_const : public Item +{ + Item *value_item; + Item *name_item; +public: + Item_name_const(Item *name_arg, Item *val): + value_item(val), name_item(name_arg) + { + Item::maybe_null= TRUE; + } + + bool fix_fields(THD *, Item **); + + enum Type type() const; + double val_real(); + longlong val_int(); + String *val_str(String *sp); + my_decimal *val_decimal(my_decimal *); + bool is_null(); + void print(String *str); + + Item_result result_type() const + { + return value_item->result_type(); + } + + bool const_item() const + { + return TRUE; + } + + int save_in_field(Field *field, bool no_conversions) + { + return value_item->save_in_field(field, no_conversions); + } + + bool send(Protocol *protocol, String *str) + { + return value_item->send(protocol, str); + } +}; + bool agg_item_collations(DTCollation &c, const char *name, - Item **items, uint nitems, uint flags= 0); + Item **items, uint nitems, uint flags, int item_sep); bool agg_item_collations_for_comparison(DTCollation &c, const char *name, - Item **items, uint nitems, - uint flags= 0); + Item **items, uint nitems, uint flags); bool agg_item_charsets(DTCollation &c, const char *name, - Item **items, uint nitems, uint flags= 0); + Item **items, uint nitems, uint flags, int item_sep); class Item_num: public Item { public: + Item_num() {} /* Remove gcc warning */ virtual Item_num *neg()= 0; Item *safe_charset_converter(CHARSET_INFO *tocs); }; @@ -379,10 +1145,13 @@ protected: const char *orig_db_name; const char *orig_table_name; const char *orig_field_name; + public: + Name_resolution_context *context; const char *db_name; const char *table_name; const char *field_name; + bool alias_name_used; /* true if item was resolved against alias */ /* Cached value of index for this field in table->field array, used by prep. stmts for speeding up their re-execution. Holds NO_CACHED_FIELD_INDEX @@ -396,26 +1165,66 @@ public: */ TABLE_LIST *cached_table; st_select_lex *depended_from; - Item_ident(const char *db_name_par,const char *table_name_par, - const char *field_name_par); + Item_ident(Name_resolution_context *context_arg, + const char *db_name_arg, const char *table_name_arg, + const char *field_name_arg); Item_ident(THD *thd, Item_ident *item); const char *full_name() const; void cleanup(); bool remove_dependence_processor(byte * arg); + void print(String *str); + virtual bool change_context_processor(byte *cntx) + { context= (Name_resolution_context *)cntx; return FALSE; } + friend bool insert_fields(THD *thd, Name_resolution_context *context, + const char *db_name, + const char *table_name, List_iterator<Item> *it, + bool any_privileges); +}; + + +class Item_ident_for_show :public Item +{ +public: + Field *field; + const char *db_name; + const char *table_name; + + Item_ident_for_show(Field *par_field, const char *db_arg, + const char *table_name_arg) + :field(par_field), db_name(db_arg), table_name(table_name_arg) + {} + + enum Type type() const { return FIELD_ITEM; } + double val_real() { return field->val_real(); } + longlong val_int() { return field->val_int(); } + String *val_str(String *str) { return field->val_str(str); } + my_decimal *val_decimal(my_decimal *dec) { return field->val_decimal(dec); } + void make_field(Send_field *tmp_field); }; +class Item_equal; +class COND_EQUAL; + class Item_field :public Item_ident { +protected: void set_field(Field *field); public: Field *field,*result_field; - - Item_field(const char *db_par,const char *table_name_par, - const char *field_name_par) - :Item_ident(db_par,table_name_par,field_name_par), - field(0), result_field(0) - { collation.set(DERIVATION_IMPLICIT); } + Item_equal *item_equal; + bool no_const_subst; + /* + if any_privileges set to TRUE then here real effective privileges will + be stored + */ + uint have_privileges; + /* field need any privileges (for VIEW creation) */ + bool any_privileges; + bool fixed_as_field; + Item_field(Name_resolution_context *context_arg, + const char *db_arg,const char *table_name_arg, + const char *field_name_arg); /* Constructor needed to process subselect with temporary tables (see Item) */ @@ -425,7 +1234,7 @@ public: and database names will live as long as Item_field (this is important in prepared statements). */ - Item_field(THD *thd, Field *field); + Item_field(THD *thd, Name_resolution_context *context_arg, Field *field); /* If this constructor is used, fix_fields() won't work, because db_name, table_name and column_name are unknown. It's necessary to call @@ -434,15 +1243,18 @@ public: Item_field(Field *field); enum Type type() const { return FIELD_ITEM; } bool eq(const Item *item, bool binary_cmp) const; - double val(); + double val_real(); longlong val_int(); + my_decimal *val_decimal(my_decimal *); String *val_str(String*); double val_result(); longlong val_int_result(); String *str_result(String* tmp); + my_decimal *val_decimal_result(my_decimal *); + bool val_bool_result(); bool send(Protocol *protocol, String *str_arg); void reset_field(Field *f); - bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_fields(THD *, Item **); void make_field(Send_field *tmp_field); int save_in_field(Field *field,bool no_conversions); void save_org_in_field(Field *field); @@ -465,13 +1277,33 @@ public: bool get_date_result(TIME *ltime,uint fuzzydate); bool get_time(TIME *ltime); bool is_null() { return field->is_null(); } + void update_null_value(); Item *get_tmp_table_item(THD *thd); + bool collect_item_field_processor(byte * arg); + bool find_item_in_field_list_processor(byte *arg); + bool reset_query_id_processor(byte *arg) + { + field->query_id= *((query_id_t *) arg); + if (result_field) + result_field->query_id= field->query_id; + return 0; + } void cleanup(); - inline uint32 max_disp_length() { return field->max_length(); } bool result_as_longlong() { return field->can_be_compared_as_longlong(); } + Item_equal *find_item_equal(COND_EQUAL *cond_equal); + bool subst_argument_checker(byte **arg); + Item *equal_fields_propagator(byte *arg); + bool set_no_const_sub(byte *arg); + Item *replace_equal_field(byte *arg); + inline uint32 max_disp_length() { return field->max_display_length(); } + Item_field *filed_for_view_update() { return this; } + Item *safe_charset_converter(CHARSET_INFO *tocs); + int fix_outer_field(THD *thd, Field **field, Item **reference); + virtual Item *update_value_transformer(byte *select_arg); + void print(String *str); friend class Item_default_value; friend class Item_insert_value; friend class st_select_lex_unit; @@ -490,20 +1322,21 @@ public: } enum Type type() const { return NULL_ITEM; } bool eq(const Item *item, bool binary_cmp) const; - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); + my_decimal *val_decimal(my_decimal *); int save_in_field(Field *field, bool no_conversions); int save_safe_in_field(Field *field); bool send(Protocol *protocol, String *str); enum Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_NULL; } - // to prevent drop fixed flag (no need parent cleanup call) + /* to prevent drop fixed flag (no need parent cleanup call) */ void cleanup() {} bool basic_const_item() const { return 1; } - Item *new_item() { return new Item_null(name); } + Item *clone_item() { return new Item_null(name); } bool is_null() { return 1; } - void print(String *str) { str->append("NULL", 4); } + void print(String *str) { str->append(STRING_WITH_LEN("NULL")); } Item *safe_charset_converter(CHARSET_INFO *tocs); }; @@ -523,11 +1356,16 @@ public: class Item_param :public Item { + char cnvbuf[MAX_FIELD_WIDTH]; + String cnvstr; + Item *cnvitem; public: + enum enum_item_param_state { NO_VALUE, NULL_VALUE, INT_VALUE, REAL_VALUE, - STRING_VALUE, TIME_VALUE, LONG_DATA_VALUE + STRING_VALUE, TIME_VALUE, LONG_DATA_VALUE, + DECIMAL_VALUE } state; /* @@ -541,6 +1379,7 @@ public: Can not be declared inside the union as it's not a POD type. */ String str_value_ptr; + my_decimal decimal_value; union { longlong integer; @@ -591,8 +1430,9 @@ public: enum Type type() const { return item_type; } enum_field_types field_type() const { return param_type; } - double val(); + double val_real(); longlong val_int(); + my_decimal *val_decimal(my_decimal*); String *val_str(String*); bool get_time(TIME *tm); bool get_date(TIME *tm, uint fuzzydate); @@ -601,6 +1441,7 @@ public: void set_null(); void set_int(longlong i, uint32 max_length_arg); void set_double(double i); + void set_decimal(const char *str, ulong length); bool set_str(const char *str, ulong length); bool set_longdata(const char *str, ulong length); void set_time(TIME *tm, timestamp_type type, uint32 max_length_arg); @@ -625,7 +1466,7 @@ public: */ virtual table_map used_tables() const { return state != NO_VALUE ? (table_map)0 : PARAM_TABLE_BIT; } - void print(String *str) { str->append('?'); } + void print(String *str); bool is_null() { DBUG_ASSERT(state != NO_VALUE); return state == NULL_VALUE; } bool basic_const_item() const; @@ -639,8 +1480,8 @@ public: constant, assert otherwise. This method is called only if basic_const_item returned TRUE. */ - Item *new_item(); Item *safe_charset_converter(CHARSET_INFO *tocs); + Item *clone_item(); /* Implement by-value equality evaluation if parameter value is set and is a basic constant (integer, real or string). @@ -654,12 +1495,15 @@ class Item_int :public Item_num { public: longlong value; - Item_int(int32 i,uint length=11) :value((longlong) i) + Item_int(int32 i,uint length= MY_INT32_NUM_DECIMAL_DIGITS) + :value((longlong) i) { max_length=length; fixed= 1; } -#ifdef HAVE_LONG_LONG - Item_int(longlong i,uint length=21) :value(i) - { max_length=length; fixed= 1;} -#endif + Item_int(longlong i,uint length= MY_INT64_NUM_DECIMAL_DIGITS) + :value(i) + { max_length=length; fixed= 1; } + Item_int(ulonglong i, uint length= MY_INT64_NUM_DECIMAL_DIGITS) + :value((longlong)i) + { max_length=length; fixed= 1; unsigned_flag= 1; } Item_int(const char *str_arg,longlong i,uint length) :value(i) { max_length=length; name=(char*) str_arg; fixed= 1; } Item_int(const char *str_arg, uint length=64); @@ -667,15 +1511,18 @@ public: enum Item_result result_type () const { return INT_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } + my_decimal *val_decimal(my_decimal *); String *val_str(String*); int save_in_field(Field *field, bool no_conversions); bool basic_const_item() const { return 1; } - Item *new_item() { return new Item_int(name,value,max_length); } + Item *clone_item() { return new Item_int(name,value,max_length); } // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} void print(String *str); Item_num *neg() { value= -value; return this; } + uint decimal_precision() const + { return (uint)(max_length - test(value < 0)); } bool eq(const Item *, bool binary_cmp) const; }; @@ -684,44 +1531,82 @@ class Item_uint :public Item_int { public: Item_uint(const char *str_arg, uint length); + Item_uint(ulonglong i) :Item_int((ulonglong) i, 10) {} Item_uint(const char *str_arg, longlong i, uint length); - Item_uint(uint32 i) :Item_int((longlong) i, 10) - { unsigned_flag= 1; } - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return ulonglong2double((ulonglong)value); } String *val_str(String*); - Item *new_item() { return new Item_uint(name,max_length); } + Item *clone_item() { return new Item_uint(name,max_length); } int save_in_field(Field *field, bool no_conversions); void print(String *str); Item_num *neg (); + uint decimal_precision() const { return max_length; } }; -class Item_real :public Item_num +/* decimal (fixed point) constant */ +class Item_decimal :public Item_num { +protected: + my_decimal decimal_value; public: - double value; - // Item_real() :value(0) {} - Item_real(const char *str_arg, uint length) :value(my_atof(str_arg)) + Item_decimal(const char *str_arg, uint length, CHARSET_INFO *charset); + Item_decimal(const char *str, const my_decimal *val_arg, + uint decimal_par, uint length); + Item_decimal(my_decimal *value_par); + Item_decimal(longlong val, bool unsig); + Item_decimal(double val, int precision, int scale); + Item_decimal(const char *bin, int precision, int scale); + + enum Type type() const { return DECIMAL_ITEM; } + enum Item_result result_type () const { return DECIMAL_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; } + longlong val_int(); + double val_real(); + String *val_str(String*); + my_decimal *val_decimal(my_decimal *val) { return &decimal_value; } + int save_in_field(Field *field, bool no_conversions); + bool basic_const_item() const { return 1; } + Item *clone_item() { - name=(char*) str_arg; - decimals=(uint8) nr_of_decimals(str_arg); - max_length=length; - fixed= 1; + return new Item_decimal(name, &decimal_value, decimals, max_length); + } + // to prevent drop fixed flag (no need parent cleanup call) + void cleanup() {} + void print(String *str); + Item_num *neg() + { + my_decimal_neg(&decimal_value); + unsigned_flag= !decimal_value.sign(); + return this; } - Item_real(const char *str,double val_arg,uint decimal_par,uint length) + uint decimal_precision() const { return decimal_value.precision(); } + bool eq(const Item *, bool binary_cmp) const; + void set_decimal_value(my_decimal *value_par); +}; + + +class Item_float :public Item_num +{ + char *presentation; +public: + double value; + // Item_real() :value(0) {} + Item_float(const char *str_arg, uint length); + Item_float(const char *str,double val_arg,uint decimal_par,uint length) :value(val_arg) { - name=(char*) str; + presentation= name=(char*) str; decimals=(uint8) decimal_par; max_length=length; fixed= 1; } - Item_real(double value_par) :value(value_par) { fixed= 1; } + Item_float(double value_par) :presentation(0), value(value_par) { fixed= 1; } + int save_in_field(Field *field, bool no_conversions); enum Type type() const { return REAL_ITEM; } enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } - double val() { DBUG_ASSERT(fixed == 1); return value; } + double val_real() { DBUG_ASSERT(fixed == 1); return value; } longlong val_int() { DBUG_ASSERT(fixed == 1); @@ -733,28 +1618,34 @@ public: { return LONGLONG_MAX; } - return (longlong) (value+(value > 0 ? 0.5 : -0.5)); + return (longlong) rint(value); } String *val_str(String*); + my_decimal *val_decimal(my_decimal *); bool basic_const_item() const { return 1; } // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} - Item *new_item() { return new Item_real(name,value,decimals,max_length); } + Item *clone_item() + { return new Item_float(name, value, decimals, max_length); } Item_num *neg() { value= -value; return this; } + void print(String *str); bool eq(const Item *, bool binary_cmp) const; }; -class Item_float :public Item_real +class Item_static_float_func :public Item_float { + const char *func_name; public: - Item_float(const char *str,uint length) :Item_real(str,length) - { - decimals=NOT_FIXED_DEC; - max_length=DBL_DIG+8; - } + Item_static_float_func(const char *str, double val_arg, uint decimal_par, + uint length) + :Item_float(NullS, val_arg, decimal_par, length), func_name(str) + {} + void print(String *str) { str->append(func_name); } + Item *safe_charset_converter(CHARSET_INFO *tocs); }; + class Item_string :public Item { public: @@ -776,63 +1667,87 @@ public: // it is constant => can be used without fix_fields (and frequently used) fixed= 1; } + /* Just create an item and do not fill string representation */ + Item_string(CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE) + { + collation.set(cs, dv); + max_length= 0; + set_name(NULL, 0, cs); + decimals= NOT_FIXED_DEC; + fixed= 1; + } Item_string(const char *name_par, const char *str, uint length, CHARSET_INFO *cs, Derivation dv= DERIVATION_COERCIBLE) { collation.set(cs, dv); str_value.set_or_copy_aligned(str,length,cs); max_length= str_value.numchars()*cs->mbmaxlen; - set_name(name_par,0,cs); + set_name(name_par, 0, cs); decimals=NOT_FIXED_DEC; // it is constant => can be used without fix_fields (and frequently used) fixed= 1; } - enum Type type() const { return STRING_ITEM; } - double val() - { - DBUG_ASSERT(fixed == 1); - int err; - char *end_not_used; - return my_strntod(str_value.charset(), (char*) str_value.ptr(), - str_value.length(), &end_not_used, &err); - } - longlong val_int() + /* + This is used in stored procedures to avoid memory leaks and + does a deep copy of its argument. + */ + void set_str_with_copy(const char *str_arg, uint length_arg) { - DBUG_ASSERT(fixed == 1); - int err; - return my_strntoll(str_value.charset(), str_value.ptr(), - str_value.length(), 10, (char**) 0, &err); + str_value.copy(str_arg, length_arg, collation.collation); + max_length= str_value.numchars() * collation.collation->mbmaxlen; } + enum Type type() const { return STRING_ITEM; } + double val_real(); + longlong val_int(); String *val_str(String*) { DBUG_ASSERT(fixed == 1); return (String*) &str_value; } + my_decimal *val_decimal(my_decimal *); int save_in_field(Field *field, bool no_conversions); enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } bool basic_const_item() const { return 1; } bool eq(const Item *item, bool binary_cmp) const; - Item *new_item() + Item *clone_item() { return new Item_string(name, str_value.ptr(), str_value.length(), collation.collation); } Item *safe_charset_converter(CHARSET_INFO *tocs); - String *const_string() { return &str_value; } - inline void append(char *str, uint length) { str_value.append(str, length); } + inline void append(char *str, uint length) + { + str_value.append(str, length); + max_length= str_value.numchars() * collation.collation->mbmaxlen; + } void print(String *str); // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} }; + +class Item_static_string_func :public Item_string +{ + const char *func_name; +public: + Item_static_string_func(const char *name_par, const char *str, uint length, + CHARSET_INFO *cs, + Derivation dv= DERIVATION_COERCIBLE) + :Item_string(NullS, str, length, cs, dv), func_name(name_par) + {} + Item *safe_charset_converter(CHARSET_INFO *tocs); + void print(String *str) { str->append(func_name); } +}; + + /* for show tables */ class Item_datetime :public Item_string { public: Item_datetime(const char *item_name): Item_string(item_name,"",0, - &my_charset_bin) + &my_charset_bin) { max_length=19;} enum_field_types field_type() const { return MYSQL_TYPE_DATETIME; } }; @@ -850,9 +1765,9 @@ class Item_return_int :public Item_int { enum_field_types int_field_type; public: - Item_return_int(const char *name, uint length, + Item_return_int(const char *name_arg, uint length, enum_field_types field_type_arg) - :Item_int(name, 0, length), int_field_type(field_type_arg) + :Item_int(name_arg, 0, length), int_field_type(field_type_arg) { unsigned_flag=1; } @@ -860,20 +1775,25 @@ public: }; -class Item_varbinary :public Item +class Item_hex_string: public Item { public: - Item_varbinary(const char *str,uint str_length); + Item_hex_string(): Item() {} + Item_hex_string(const char *str,uint str_length); enum Type type() const { return VARBIN_ITEM; } - double val() - { DBUG_ASSERT(fixed == 1); return (double) Item_varbinary::val_int(); } + double val_real() + { + DBUG_ASSERT(fixed == 1); + return (double) (ulonglong) Item_hex_string::val_int(); + } longlong val_int(); bool basic_const_item() const { return 1; } String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; } + my_decimal *val_decimal(my_decimal *); int save_in_field(Field *field, bool no_conversions); enum Item_result result_type () const { return STRING_RESULT; } enum Item_result cast_to_int_type() const { return INT_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} bool eq(const Item *item, bool binary_cmp) const; @@ -881,6 +1801,12 @@ public: }; +class Item_bin_string: public Item_hex_string +{ +public: + Item_bin_string(const char *str,uint str_length); +}; + class Item_result_field :public Item /* Item with result field */ { public: @@ -910,11 +1836,14 @@ class Item_ref :public Item_ident protected: void set_properties(); public: + enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF }; Field *result_field; /* Save result here */ Item **ref; - Item_ref(const char *db_par, const char *table_name_par, - const char *field_name_par) - :Item_ident(db_par, table_name_par, field_name_par), ref(0) {} + Item_ref(Name_resolution_context *context_arg, + const char *db_arg, const char *table_name_arg, + const char *field_name_arg) + :Item_ident(context_arg, db_arg, table_name_arg, field_name_arg), + result_field(0), ref(0) {} /* This constructor is used in two scenarios: A) *item = NULL @@ -929,76 +1858,76 @@ public: TODO we probably fix a superset of problems like in BUG#6658. Check this with Bar, and if we have a more broader set of problems like this. */ - Item_ref(Item **item, const char *table_name_par, const char *field_name_par) - :Item_ident(NullS, table_name_par, field_name_par), ref(item) - { - DBUG_ASSERT(item); - if (*item) - set_properties(); - } + Item_ref(Name_resolution_context *context_arg, Item **item, + const char *table_name_arg, const char *field_name_arg, + bool alias_name_used_arg= FALSE); /* Constructor need to process subselect with temporary tables (see Item) */ - Item_ref(THD *thd, Item_ref *item) :Item_ident(thd, item), ref(item->ref) {} + Item_ref(THD *thd, Item_ref *item) + :Item_ident(thd, item), result_field(item->result_field), ref(item->ref) {} enum Type type() const { return REF_ITEM; } bool eq(const Item *item, bool binary_cmp) const - { return ref && (*ref)->eq(item, binary_cmp); } - double val() - { - DBUG_ASSERT(fixed); - double tmp=(*ref)->val_result(); - null_value=(*ref)->null_value; - return tmp; - } - longlong val_int() - { - DBUG_ASSERT(fixed); - longlong tmp=(*ref)->val_int_result(); - null_value=(*ref)->null_value; - return tmp; - } - String *val_str(String* tmp) - { - DBUG_ASSERT(fixed); - tmp=(*ref)->str_result(tmp); - null_value=(*ref)->null_value; - return tmp; - } - bool is_null() - { - DBUG_ASSERT(fixed); - (void) (*ref)->val_int_result(); - return (*ref)->null_value; - } - bool get_date(TIME *ltime,uint fuzzydate) - { - DBUG_ASSERT(fixed); - return (null_value=(*ref)->get_date_result(ltime,fuzzydate)); + { + Item *it= ((Item *) item)->real_item(); + return ref && (*ref)->eq(it, binary_cmp); } - bool send(Protocol *prot, String *tmp){ return (*ref)->send(prot, tmp); } - void make_field(Send_field *field) { (*ref)->make_field(field); } - bool fix_fields(THD *, struct st_table_list *, Item **); - int save_in_field(Field *field, bool no_conversions) - { return (*ref)->save_in_field(field, no_conversions); } - void save_org_in_field(Field *field) { (*ref)->save_org_in_field(field); } + double val_real(); + longlong val_int(); + my_decimal *val_decimal(my_decimal *); + bool val_bool(); + String *val_str(String* tmp); + bool is_null(); + bool get_date(TIME *ltime,uint fuzzydate); + double val_result(); + longlong val_int_result(); + String *str_result(String* tmp); + my_decimal *val_decimal_result(my_decimal *); + bool val_bool_result(); + bool send(Protocol *prot, String *tmp); + void make_field(Send_field *field); + bool fix_fields(THD *, Item **); + int save_in_field(Field *field, bool no_conversions); + void save_org_in_field(Field *field); enum Item_result result_type () const { return (*ref)->result_type(); } enum_field_types field_type() const { return (*ref)->field_type(); } - Field *get_tmp_table_field() { return result_field; } - table_map used_tables() const + Field *get_tmp_table_field() + { return result_field ? result_field : (*ref)->get_tmp_table_field(); } + Item *get_tmp_table_item(THD *thd) { + return (result_field ? new Item_field(result_field) : + (*ref)->get_tmp_table_item(thd)); + } + table_map used_tables() const + { return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables(); } + void update_used_tables() + { + if (!depended_from) + (*ref)->update_used_tables(); + } + table_map not_null_tables() const { return (*ref)->not_null_tables(); } void set_result_field(Field *field) { result_field= field; } bool is_result_field() { return 1; } void save_in_result_field(bool no_conversions) { (*ref)->save_in_field(result_field, no_conversions); } - Item *real_item() { return *ref; } + Item *real_item() + { + return ref ? (*ref)->real_item() : this; + } + bool walk(Item_processor processor, byte *arg) + { return (*ref)->walk(processor, arg); } void print(String *str); bool result_as_longlong() { return (*ref)->result_as_longlong(); } + void cleanup(); + Item_field *filed_for_view_update() + { return (*ref)->filed_for_view_update(); } + virtual Ref_Type ref_type() { return REF; } }; @@ -1009,55 +1938,108 @@ public: class Item_direct_ref :public Item_ref { public: - Item_direct_ref(Item **item, const char *table_name_par, - const char *field_name_par) - :Item_ref(item, table_name_par, field_name_par) {} + Item_direct_ref(Name_resolution_context *context_arg, Item **item, + const char *table_name_arg, + const char *field_name_arg, + bool alias_name_used_arg= FALSE) + :Item_ref(context_arg, item, table_name_arg, + field_name_arg, alias_name_used_arg) + {} /* Constructor need to process subselect with temporary tables (see Item) */ Item_direct_ref(THD *thd, Item_direct_ref *item) : Item_ref(thd, item) {} - double val() - { - double tmp=(*ref)->val(); - null_value=(*ref)->null_value; - return tmp; - } - longlong val_int() + double val_real(); + longlong val_int(); + String *val_str(String* tmp); + my_decimal *val_decimal(my_decimal *); + bool val_bool(); + bool is_null(); + bool get_date(TIME *ltime,uint fuzzydate); + virtual Ref_Type ref_type() { return DIRECT_REF; } +}; + +/* + Class for view fields, the same as Item_direct_ref, but call fix_fields + of reference if it is not called yet +*/ +class Item_direct_view_ref :public Item_direct_ref +{ +public: + Item_direct_view_ref(Name_resolution_context *context_arg, Item **item, + const char *table_name_arg, + const char *field_name_arg) + :Item_direct_ref(context_arg, item, table_name_arg, field_name_arg) {} + /* Constructor need to process subselect with temporary tables (see Item) */ + Item_direct_view_ref(THD *thd, Item_direct_ref *item) + :Item_direct_ref(thd, item) {} + + bool fix_fields(THD *, Item **); + bool eq(const Item *item, bool binary_cmp) const; + virtual Ref_Type ref_type() { return VIEW_REF; } +}; + + +class Item_outer_ref :public Item_direct_ref +{ +public: + Item_field *outer_field; + Item_outer_ref(Name_resolution_context *context_arg, + Item_field *outer_field_arg) + :Item_direct_ref(context_arg, 0, outer_field_arg->table_name, + outer_field_arg->field_name), + outer_field(outer_field_arg) { - longlong tmp=(*ref)->val_int(); - null_value=(*ref)->null_value; - return tmp; + ref= (Item**)&outer_field; + set_properties(); + fixed= 0; } - String *val_str(String* tmp) + void cleanup() { - tmp=(*ref)->val_str(tmp); - null_value=(*ref)->null_value; - return tmp; + ref= (Item**)&outer_field; + fixed= 0; + Item_direct_ref::cleanup(); + outer_field->cleanup(); } - bool is_null() + void save_in_result_field(bool no_conversions) { - (void) (*ref)->val_int(); - return (*ref)->null_value; + outer_field->save_org_in_field(result_field); } - bool get_date(TIME *ltime,uint fuzzydate) + bool fix_fields(THD *, Item **); + table_map used_tables() const { - return (null_value=(*ref)->get_date(ltime,fuzzydate)); + return (*ref)->const_item() ? 0 : OUTER_REF_TABLE_BIT; } + virtual Ref_Type ref_type() { return OUTER_REF; } }; class Item_in_subselect; + +/* + An object of this class: + - Converts val_XXX() calls to ref->val_XXX_result() calls, like Item_ref. + - Sets owner->was_null=TRUE if it has returned a NULL value from any + val_XXX() function. This allows to inject an Item_ref_null_helper + object into subquery and then check if the subquery has produced a row + with NULL value. +*/ + class Item_ref_null_helper: public Item_ref { protected: Item_in_subselect* owner; public: - Item_ref_null_helper(Item_in_subselect* master, Item **item, - const char *table_name_par, const char *field_name_par): - Item_ref(item, table_name_par, field_name_par), owner(master) {} - double val(); + Item_ref_null_helper(Name_resolution_context *context_arg, + Item_in_subselect* master, Item **item, + const char *table_name_arg, const char *field_name_arg) + :Item_ref(context_arg, item, table_name_arg, field_name_arg), + owner(master) {} + double val_real(); longlong val_int(); String* val_str(String* s); + my_decimal *val_decimal(my_decimal *); + bool val_bool(); bool get_date(TIME *ltime, uint fuzzydate); void print(String *str); /* @@ -1084,15 +2066,17 @@ class Item_int_with_ref :public Item_int { Item *ref; public: - Item_int_with_ref(longlong i, Item *ref_arg) :Item_int(i), ref(ref_arg) + Item_int_with_ref(longlong i, Item *ref_arg, my_bool unsigned_arg) : + Item_int(i), ref(ref_arg) { - unsigned_flag= ref_arg->unsigned_flag; + unsigned_flag= unsigned_arg; } int save_in_field(Field *field, bool no_conversions) { return ref->save_in_field(field, no_conversions); } - Item *new_item(); + Item *clone_item(); + virtual Item *real_item() { return ref; } }; @@ -1124,20 +2108,23 @@ public: enum Type type() const { return COPY_STR_ITEM; } enum Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { return cached_field_type; } - double val() + double val_real() { - int err; + int err_not_used; char *end_not_used; return (null_value ? 0.0 : my_strntod(str_value.charset(), (char*) str_value.ptr(), - str_value.length(), &end_not_used, &err)); + str_value.length(), &end_not_used, &err_not_used)); } longlong val_int() - { + { int err; - return null_value ? LL(0) : my_strntoll(str_value.charset(),str_value.ptr(),str_value.length(),10, (char**) 0,&err); + return null_value ? LL(0) : my_strntoll(str_value.charset(),str_value.ptr(), + str_value.length(),10, (char**) 0, + &err); } String *val_str(String*); + my_decimal *val_decimal(my_decimal *); void make_field(Send_field *field) { item->make_field(field); } void copy(); int save_in_field(Field *field, bool no_conversions); @@ -1147,55 +2134,64 @@ public: }; -class Item_buff :public Sql_alloc +class Cached_item :public Sql_alloc { public: my_bool null_value; - Item_buff() :null_value(0) {} + Cached_item() :null_value(0) {} virtual bool cmp(void)=0; - virtual ~Item_buff(); /*line -e1509 */ + virtual ~Cached_item(); /*line -e1509 */ }; -class Item_str_buff :public Item_buff +class Cached_item_str :public Cached_item { Item *item; String value,tmp_value; public: - Item_str_buff(THD *thd, Item *arg); + Cached_item_str(THD *thd, Item *arg); bool cmp(void); - ~Item_str_buff(); // Deallocate String:s + ~Cached_item_str(); // Deallocate String:s }; -class Item_real_buff :public Item_buff +class Cached_item_real :public Cached_item { Item *item; double value; public: - Item_real_buff(Item *item_par) :item(item_par),value(0.0) {} + Cached_item_real(Item *item_par) :item(item_par),value(0.0) {} bool cmp(void); }; -class Item_int_buff :public Item_buff +class Cached_item_int :public Cached_item { Item *item; longlong value; public: - Item_int_buff(Item *item_par) :item(item_par),value(0) {} + Cached_item_int(Item *item_par) :item(item_par),value(0) {} bool cmp(void); }; -class Item_field_buff :public Item_buff +class Cached_item_decimal :public Cached_item +{ + Item *item; + my_decimal value; +public: + Cached_item_decimal(Item *item_par); + bool cmp(void); +}; + +class Cached_item_field :public Cached_item { char *buff; Field *field; uint length; public: - Item_field_buff(Item_field *item) + Cached_item_field(Item_field *item) { - field=item->field; + field= item->field; buff= (char*) sql_calloc(length=field->pack_length()); } bool cmp(void); @@ -1205,40 +2201,50 @@ class Item_default_value : public Item_field { public: Item *arg; - Item_default_value() : - Item_field((const char *)NULL, (const char *)NULL, (const char *)NULL), arg(NULL) {} - Item_default_value(Item *a) : - Item_field((const char *)NULL, (const char *)NULL, (const char *)NULL), arg(a) {} + Item_default_value(Name_resolution_context *context_arg) + :Item_field(context_arg, (const char *)NULL, (const char *)NULL, + (const char *)NULL), + arg(NULL) {} + Item_default_value(Name_resolution_context *context_arg, Item *a) + :Item_field(context_arg, (const char *)NULL, (const char *)NULL, + (const char *)NULL), + arg(a) {} enum Type type() const { return DEFAULT_VALUE_ITEM; } bool eq(const Item *item, bool binary_cmp) const; - bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_fields(THD *, Item **); void print(String *str); - int save_in_field(Field *field_arg, bool no_conversions) - { - if (!arg) - { - field_arg->set_default(); - return 0; - } - return Item_field::save_in_field(field_arg, no_conversions); - } + int save_in_field(Field *field_arg, bool no_conversions); table_map used_tables() const { return (table_map)0L; } - + bool walk(Item_processor processor, byte *args) { return arg->walk(processor, args) || (this->*processor)(args); } + + Item *transform(Item_transformer transformer, byte *args); }; +/* + Item_insert_value -- an implementation of VALUES() function. + You can use the VALUES(col_name) function in the UPDATE clause + to refer to column values from the INSERT portion of the INSERT + ... UPDATE statement. In other words, VALUES(col_name) in the + UPDATE clause refers to the value of col_name that would be + inserted, had no duplicate-key conflict occurred. + In all other places this function returns NULL. +*/ + class Item_insert_value : public Item_field { public: Item *arg; - Item_insert_value(Item *a) : - Item_field((const char *)NULL, (const char *)NULL, (const char *)NULL), arg(a) {} + Item_insert_value(Name_resolution_context *context_arg, Item *a) + :Item_field(context_arg, (const char *)NULL, (const char *)NULL, + (const char *)NULL), + arg(a) {} bool eq(const Item *item, bool binary_cmp) const; - bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_fields(THD *, Item **); void print(String *str); int save_in_field(Field *field_arg, bool no_conversions) { @@ -1257,6 +2263,111 @@ public: } }; + +/* + We need this two enums here instead of sql_lex.h because + at least one of them is used by Item_trigger_field interface. + + Time when trigger is invoked (i.e. before or after row actually + inserted/updated/deleted). +*/ +enum trg_action_time_type +{ + TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1, TRG_ACTION_MAX +}; + +/* + Event on which trigger is invoked. +*/ +enum trg_event_type +{ + TRG_EVENT_INSERT= 0 , TRG_EVENT_UPDATE= 1, TRG_EVENT_DELETE= 2, TRG_EVENT_MAX +}; + +class Table_triggers_list; + +/* + Represents NEW/OLD version of field of row which is + changed/read in trigger. + + Note: For this item main part of actual binding to Field object happens + not during fix_fields() call (like for Item_field) but right after + parsing of trigger definition, when table is opened, with special + setup_field() call. On fix_fields() stage we simply choose one of + two Field instances representing either OLD or NEW version of this + field. +*/ +class Item_trigger_field : public Item_field, + private Settable_routine_parameter +{ +public: + /* Is this item represents row from NEW or OLD row ? */ + enum row_version_type {OLD_ROW, NEW_ROW}; + row_version_type row_version; + /* Next in list of all Item_trigger_field's in trigger */ + Item_trigger_field *next_trg_field; + /* Index of the field in the TABLE::field array */ + uint field_idx; + /* Pointer to Table_trigger_list object for table of this trigger */ + Table_triggers_list *triggers; + + Item_trigger_field(Name_resolution_context *context_arg, + row_version_type row_ver_arg, + const char *field_name_arg, + ulong priv, const bool ro) + :Item_field(context_arg, + (const char *)NULL, (const char *)NULL, field_name_arg), + row_version(row_ver_arg), field_idx((uint)-1), original_privilege(priv), + want_privilege(priv), table_grants(NULL), read_only (ro) + {} + void setup_field(THD *thd, TABLE *table, GRANT_INFO *table_grant_info); + enum Type type() const { return TRIGGER_FIELD_ITEM; } + bool eq(const Item *item, bool binary_cmp) const; + bool fix_fields(THD *, Item **); + void print(String *str); + table_map used_tables() const { return (table_map)0L; } + Field *get_tmp_table_field() { return 0; } + Item *copy_or_same(THD *thd) { return this; } + Item *get_tmp_table_item(THD *thd) { return copy_or_same(thd); } + void cleanup(); + +private: + void set_required_privilege(bool rw); + bool set_value(THD *thd, sp_rcontext *ctx, Item **it); + +public: + Settable_routine_parameter *get_settable_routine_parameter() + { + return (read_only ? 0 : this); + } + + bool set_value(THD *thd, Item **it) + { + return set_value(thd, NULL, it); + } + +private: + /* + 'want_privilege' holds privileges required to perform operation on + this trigger field (SELECT_ACL if we are going to read it and + UPDATE_ACL if we are going to update it). It is initialized at + parse time but can be updated later if this trigger field is used + as OUT or INOUT parameter of stored routine (in this case + set_required_privilege() is called to appropriately update + want_privilege and cleanup() is responsible for restoring of + original want_privilege once parameter's value is updated). + */ + ulong original_privilege; + ulong want_privilege; + GRANT_INFO *table_grants; + /* + Trigger field is read-only unless it belongs to the NEW row in a + BEFORE INSERT of BEFORE UPDATE trigger. + */ + bool read_only; +}; + + class Item_cache: public Item { protected: @@ -1274,6 +2385,7 @@ public: max_length= item->max_length; decimals= item->decimals; collation.set(item->collation); + unsigned_flag= item->unsigned_flag; return 0; }; virtual void store(Item *)= 0; @@ -1286,24 +2398,23 @@ public: void print(String *str); }; + class Item_cache_int: public Item_cache { +protected: longlong value; public: Item_cache_int(): Item_cache(), value(0) {} - + void store(Item *item); - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } - String* val_str(String *str) - { - DBUG_ASSERT(fixed == 1); - str->set(value, default_charset()); - return str; - } + String* val_str(String *str); + my_decimal *val_decimal(my_decimal *); enum Item_result result_type() const { return INT_RESULT; } }; + class Item_cache_real: public Item_cache { double value; @@ -1311,31 +2422,42 @@ public: Item_cache_real(): Item_cache(), value(0) {} void store(Item *item); - double val() { DBUG_ASSERT(fixed == 1); return value; } - longlong val_int() - { - DBUG_ASSERT(fixed == 1); - return (longlong) (value+(value > 0 ? 0.5 : -0.5)); - } - String* val_str(String *str) - { - str->set(value, decimals, default_charset()); - return str; - } + double val_real() { DBUG_ASSERT(fixed == 1); return value; } + longlong val_int(); + String* val_str(String *str); + my_decimal *val_decimal(my_decimal *); enum Item_result result_type() const { return REAL_RESULT; } }; + +class Item_cache_decimal: public Item_cache +{ +protected: + my_decimal decimal_value; +public: + Item_cache_decimal(): Item_cache() {} + + void store(Item *item); + double val_real(); + longlong val_int(); + String* val_str(String *str); + my_decimal *val_decimal(my_decimal *); + enum Item_result result_type() const { return DECIMAL_RESULT; } +}; + + class Item_cache_str: public Item_cache { - char buffer[80]; + char buffer[STRING_BUFFER_USUAL_SIZE]; String *value, value_buff; public: Item_cache_str(): Item_cache(), value(0) { } - + void store(Item *item); - double val(); + double val_real(); longlong val_int(); String* val_str(String *) { DBUG_ASSERT(fixed == 1); return value; } + my_decimal *val_decimal(my_decimal *); enum Item_result result_type() const { return STRING_RESULT; } CHARSET_INFO *charset() const { return value->charset(); }; }; @@ -1365,7 +2487,7 @@ public: { illegal_method_call((const char*)"make_field"); }; - double val() + double val_real() { illegal_method_call((const char*)"val"); return 0; @@ -1380,11 +2502,17 @@ public: illegal_method_call((const char*)"val_str"); return 0; }; + my_decimal *val_decimal(my_decimal *val) + { + illegal_method_call((const char*)"val_decimal"); + return 0; + }; + enum Item_result result_type() const { return ROW_RESULT; } uint cols() { return item_count; } - Item* el(uint i) { return values[i]; } - Item** addr(uint i) { return (Item **) (values + i); } + Item *element_index(uint i) { return values[i]; } + Item **addr(uint i) { return (Item **) (values + i); } bool check_cols(uint c); bool null_inside(); void bring_value(); @@ -1416,14 +2544,18 @@ protected: enum_field_types fld_type; void get_full_info(Item *item); + + /* It is used to count decimal precision in join_types */ + int prev_decimal_int_part; public: Item_type_holder(THD*, Item*); Item_result result_type() const; - virtual enum_field_types field_type() const { return fld_type; }; + enum_field_types field_type() const { return fld_type; }; enum Type type() const { return TYPE_HOLDER; } - double val(); + double val_real(); longlong val_int(); + my_decimal *val_decimal(my_decimal *); String *val_str(String*); bool join_types(THD *thd, Item *); Field *make_field_by_type(TABLE *table); @@ -1432,7 +2564,14 @@ public: }; -extern Item_buff *new_Item_buff(THD *thd, Item *item); +class st_select_lex; +void mark_select_range_as_dependent(THD *thd, + st_select_lex *last_select, + st_select_lex *current_sel, + Field *found_field, Item *found_item, + Item_ident *resolved_item); + +extern Cached_item *new_Cached_item(THD *thd, Item *item); extern Item_result item_cmp_type(Item_result a,Item_result b); extern void resolve_const_item(THD *thd, Item **ref, Item *cmp_item); extern bool field_is_equal_to_item(Field *field,Item *item); diff --git a/sql/item_buff.cc b/sql/item_buff.cc index 8298ce2cfb7..c162b84f457 100644 --- a/sql/item_buff.cc +++ b/sql/item_buff.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -20,33 +19,42 @@ #include "mysql_priv.h" /* -** Create right type of item_buffer for an item +** Create right type of Cached_item for an item */ -Item_buff *new_Item_buff(THD *thd, Item *item) +Cached_item *new_Cached_item(THD *thd, Item *item) { - if (item->type() == Item::FIELD_ITEM && - !(((Item_field *) item)->field->flags & BLOB_FLAG)) - return new Item_field_buff((Item_field *) item); - if (item->result_type() == STRING_RESULT) - return new Item_str_buff(thd, (Item_field *) item); - if (item->result_type() == INT_RESULT) - return new Item_int_buff((Item_field *) item); - return new Item_real_buff(item); + if (item->real_item()->type() == Item::FIELD_ITEM && + !(((Item_field *) (item->real_item()))->field->flags & BLOB_FLAG)) + return new Cached_item_field((Item_field *) (item->real_item())); + switch (item->result_type()) { + case STRING_RESULT: + return new Cached_item_str(thd, (Item_field *) item); + case INT_RESULT: + return new Cached_item_int((Item_field *) item); + case REAL_RESULT: + return new Cached_item_real(item); + case DECIMAL_RESULT: + return new Cached_item_decimal(item); + case ROW_RESULT: + default: + DBUG_ASSERT(0); + return 0; + } } -Item_buff::~Item_buff() {} +Cached_item::~Cached_item() {} /* ** Compare with old value and replace value with new value ** Return true if values have changed */ -Item_str_buff::Item_str_buff(THD *thd, Item *arg) +Cached_item_str::Cached_item_str(THD *thd, Item *arg) :item(arg), value(min(arg->max_length, thd->variables.max_sort_length)) {} -bool Item_str_buff::cmp(void) +bool Cached_item_str::cmp(void) { String *res; bool tmp; @@ -68,14 +76,14 @@ bool Item_str_buff::cmp(void) return tmp; } -Item_str_buff::~Item_str_buff() +Cached_item_str::~Cached_item_str() { item=0; // Safety } -bool Item_real_buff::cmp(void) +bool Cached_item_real::cmp(void) { - double nr=item->val(); + double nr= item->val_real(); if (null_value != item->null_value || nr != value) { null_value= item->null_value; @@ -85,7 +93,7 @@ bool Item_real_buff::cmp(void) return FALSE; } -bool Item_int_buff::cmp(void) +bool Cached_item_int::cmp(void) { longlong nr=item->val_int(); if (null_value != item->null_value || nr != value) @@ -98,7 +106,7 @@ bool Item_int_buff::cmp(void) } -bool Item_field_buff::cmp(void) +bool Cached_item_field::cmp(void) { bool tmp= field->cmp(buff) != 0; // This is not a blob! if (tmp) @@ -112,11 +120,38 @@ bool Item_field_buff::cmp(void) } +Cached_item_decimal::Cached_item_decimal(Item *it) + :item(it) +{ + my_decimal_set_zero(&value); +} + + +bool Cached_item_decimal::cmp() +{ + my_decimal tmp; + my_decimal *ptmp= item->val_decimal(&tmp); + if (null_value != item->null_value || + (!item->null_value && my_decimal_cmp(&value, ptmp))) + { + null_value= item->null_value; + /* Save only not null values */ + if (!null_value) + { + my_decimal2decimal(ptmp, &value); + return TRUE; + } + return FALSE; + } + return FALSE; +} + + /***************************************************************************** ** Instansiate templates *****************************************************************************/ -#ifdef __GNUC__ -template class List<Item_buff>; -template class List_iterator<Item_buff>; +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION +template class List<Cached_item>; +template class List_iterator<Cached_item>; #endif diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index ffb60754381..8943b9d3586 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000-2003 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -27,12 +26,18 @@ static bool convert_constant_item(THD *thd, Field *field, Item **item); -static Item_result item_store_type(Item_result a,Item_result b) +static Item_result item_store_type(Item_result a, Item *item, + my_bool unsigned_flag) { + Item_result b= item->result_type(); + if (a == STRING_RESULT || b == STRING_RESULT) return STRING_RESULT; else if (a == REAL_RESULT || b == REAL_RESULT) return REAL_RESULT; + else if (a == DECIMAL_RESULT || b == DECIMAL_RESULT || + unsigned_flag != item->unsigned_flag) + return DECIMAL_RESULT; else return INT_RESULT; } @@ -40,6 +45,7 @@ static Item_result item_store_type(Item_result a,Item_result b) static void agg_result_type(Item_result *type, Item **items, uint nitems) { Item **item, **item_end; + my_bool unsigned_flag= 0; *type= STRING_RESULT; /* Skip beginning NULL items */ @@ -48,6 +54,7 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems) if ((*item)->type() != Item::NULL_ITEM) { *type= (*item)->result_type(); + unsigned_flag= (*item)->unsigned_flag; item++; break; } @@ -56,7 +63,7 @@ static void agg_result_type(Item_result *type, Item **items, uint nitems) for (; item < item_end; item++) { if ((*item)->type() != Item::NULL_ITEM) - *type= item_store_type(type[0], (*item)->result_type()); + *type= item_store_type(*type, *item, unsigned_flag); } } @@ -85,13 +92,14 @@ static void agg_cmp_type(THD *thd, Item_result *type, Item **items, uint nitems) type[0]= item_cmp_type(type[0], items[i]->result_type()); } + static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) { - my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), - fname); + my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0), + c1.collation->name,c1.derivation_name(), + c2.collation->name,c2.derivation_name(), + fname); } @@ -139,23 +147,39 @@ Item_bool_func2* Le_creator::create(Item *a, Item *b) const longlong Item_func_not::val_int() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + bool value= args[0]->val_bool(); null_value=args[0]->null_value; return ((!null_value && value == 0) ? 1 : 0); } /* + We put any NOT expression into parenthesis to avoid + possible problems with internal view representations where + any '!' is converted to NOT. It may cause a problem if + '!' is used in an expression together with other operators + whose precedence is lower than the precedence of '!' yet + higher than the precedence of NOT. +*/ + +void Item_func_not::print(String *str) +{ + str->append('('); + Item_func::print(str); + str->append(')'); +} + +/* special NOT for ALL subquery */ longlong Item_func_not_all::val_int() { DBUG_ASSERT(fixed == 1); - double value= args[0]->val(); + bool value= args[0]->val_bool(); /* - return TRUE if there was records in underlaying select in max/min - optimisation (ALL subquery) + return TRUE if there was records in underlying select in max/min + optimization (ALL subquery) */ if (empty_underlying_subquery()) return 1; @@ -182,18 +206,18 @@ void Item_func_not_all::print(String *str) /* Special NOP (No OPeration) for ALL subquery it is like Item_func_not_all - (return TRUE if underlaying sudquery do not return rows) but if subquery + (return TRUE if underlying subquery do not return rows) but if subquery returns some rows it return same value as argument (TRUE/FALSE). */ longlong Item_func_nop_all::val_int() { DBUG_ASSERT(fixed == 1); - double value= args[0]->val(); + longlong value= args[0]->val_int(); /* - return FALSE if there was records in underlaying select in max/min - optimisation (SAME/ANY subquery) + return FALSE if there was records in underlying select in max/min + optimization (SAME/ANY subquery) */ if (empty_underlying_subquery()) return 0; @@ -204,10 +228,28 @@ longlong Item_func_nop_all::val_int() /* - Convert a constant expression or string to an integer. - This is done when comparing DATE's of different formats and - also when comparing bigint to strings (in which case the string - is converted once to a bigint). + Convert a constant item to an int and replace the original item + + SYNOPSIS + convert_constant_item() + thd thread handle + field item will be converted using the type of this field + item [in/out] reference to the item to convert + + DESCRIPTION + The function converts a constant expression or string to an integer. + On successful conversion the original item is substituted for the + result of the item evaluation. + This is done when comparing DATE/TIME of different formats and + also when comparing bigint to strings (in which case strings + are converted to bigints). + + NOTES + This function is called only at prepare stage. + As all derived tables are filled only after all derived tables + are prepared we do not evaluate items with subselects here because + they can contain derived tables and thus we may attempt to use a + table that has not been populated yet. RESULT VALUES 0 Can't convert item @@ -216,15 +258,21 @@ longlong Item_func_nop_all::val_int() static bool convert_constant_item(THD *thd, Field *field, Item **item) { - if ((*item)->const_item()) + if (!(*item)->with_subselect && (*item)->const_item()) { + /* For comparison purposes allow invalid dates like 2000-01-32 */ + ulong orig_sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode|= MODE_INVALID_DATES; if (!(*item)->save_in_field(field, 1) && !((*item)->null_value)) { - Item *tmp=new Item_int_with_ref(field->val_int(), *item); + Item *tmp=new Item_int_with_ref(field->val_int(), *item, + test(field->flags & UNSIGNED_FLAG)); + thd->variables.sql_mode= orig_sql_mode; if (tmp) thd->change_item_tree(item, tmp); return 1; // Item was replaced } + thd->variables.sql_mode= orig_sql_mode; } return 0; } @@ -233,7 +281,7 @@ static bool convert_constant_item(THD *thd, Field *field, Item **item) void Item_bool_func2::fix_length_and_dec() { max_length= 1; // Function returns 0 or 1 - THD *thd= current_thd; + THD *thd; /* As some compare functions are generated after sql_yacc, @@ -242,12 +290,27 @@ void Item_bool_func2::fix_length_and_dec() if (!args[0] || !args[1]) return; + /* + We allow to convert to Unicode character sets in some cases. + The conditions when conversion is possible are: + - arguments A and B have different charsets + - A wins according to coercibility rules + - character set of A is superset for character set of B + + If all of the above is true, then it's possible to convert + B into the character set of A, and then compare according + to the collation of A. + */ + + DTCollation coll; if (args[0]->result_type() == STRING_RESULT && args[1]->result_type() == STRING_RESULT && - agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV)) + agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV, 1)) return; - + + args[0]->cmp_context= args[1]->cmp_context= + item_cmp_type(args[0]->result_type(), args[1]->result_type()); // Make a special case of compare with fields to get nicer DATE comparisons if (functype() == LIKE_FUNC) // Disable conversion in case of LIKE function. @@ -255,30 +318,38 @@ void Item_bool_func2::fix_length_and_dec() set_cmp_func(); return; } - - if (args[0]->type() == FIELD_ITEM) + + thd= current_thd; + if (!thd->is_context_analysis_only()) { - Field *field=((Item_field*) args[0])->field; - if (field->can_be_compared_as_longlong()) + Item *arg_real_item= args[0]->real_item(); + if (arg_real_item->type() == FIELD_ITEM) { - if (convert_constant_item(thd, field,&args[1])) + Field *field=((Item_field*) arg_real_item)->field; + if (field->can_be_compared_as_longlong()) { - cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, - INT_RESULT); // Works for all types. - return; + if (convert_constant_item(thd, field,&args[1])) + { + cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, + INT_RESULT); // Works for all types. + args[0]->cmp_context= args[1]->cmp_context= INT_RESULT; + return; + } } } - } - if (args[1]->type() == FIELD_ITEM) - { - Field *field=((Item_field*) args[1])->field; - if (field->can_be_compared_as_longlong()) + arg_real_item= args[1]->real_item(); + if (arg_real_item->type() == FIELD_ITEM) { - if (convert_constant_item(thd, field,&args[0])) + Field *field=((Item_field*) arg_real_item)->field; + if (field->can_be_compared_as_longlong()) { - cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, - INT_RESULT); // Works for all types. - return; + if (convert_constant_item(thd, field,&args[0])) + { + cmp.set_cmp_func(this, tmp_arg, tmp_arg+1, + INT_RESULT); // Works for all types. + args[0]->cmp_context= args[1]->cmp_context= INT_RESULT; + return; + } } } } @@ -291,7 +362,8 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) owner= item; func= comparator_matrix[type] [test(owner->functype() == Item_func::EQUAL_FUNC)]; - if (type == ROW_RESULT) + switch (type) { + case ROW_RESULT: { uint n= (*a)->cols(); if (n != (*b)->cols()) @@ -304,15 +376,16 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) return 1; for (uint i=0; i < n; i++) { - if ((*a)->el(i)->cols() != (*b)->el(i)->cols()) + if ((*a)->element_index(i)->cols() != (*b)->element_index(i)->cols()) { - my_error(ER_OPERAND_COLUMNS, MYF(0), (*a)->el(i)->cols()); + my_error(ER_OPERAND_COLUMNS, MYF(0), (*a)->element_index(i)->cols()); return 1; } comparators[i].set_cmp_func(owner, (*a)->addr(i), (*b)->addr(i)); } + break; } - else if (type == STRING_RESULT) + case STRING_RESULT: { /* We must set cmp_charset here as we may be called from for an automatic @@ -334,15 +407,28 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) func= &Arg_comparator::compare_binary_string; else if (func == &Arg_comparator::compare_e_string) func= &Arg_comparator::compare_e_binary_string; + + /* + As this is binary compassion, mark all fields that they can't be + transformed. Otherwise we would get into trouble with comparisons + like: + WHERE col= 'j' AND col LIKE BINARY 'j' + which would be transformed to: + WHERE col= 'j' + */ + (*a)->walk(&Item::set_no_const_sub, (byte*) 0); + (*b)->walk(&Item::set_no_const_sub, (byte*) 0); } + break; } - else if (type == INT_RESULT) + case INT_RESULT: { if (func == &Arg_comparator::compare_int_signed) { if ((*a)->unsigned_flag) - func= ((*b)->unsigned_flag)? &Arg_comparator::compare_int_unsigned : - &Arg_comparator::compare_int_unsigned_signed; + func= (((*b)->unsigned_flag)? + &Arg_comparator::compare_int_unsigned : + &Arg_comparator::compare_int_unsigned_signed); else if ((*b)->unsigned_flag) func= &Arg_comparator::compare_int_signed_unsigned; } @@ -351,8 +437,11 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) if ((*a)->unsigned_flag ^ (*b)->unsigned_flag) func= &Arg_comparator::compare_e_int_diff_signedness; } + break; } - else if (type == REAL_RESULT) + case DECIMAL_RESULT: + break; + case REAL_RESULT: { if ((*a)->decimals < NOT_FIXED_DEC && (*b)->decimals < NOT_FIXED_DEC) { @@ -362,6 +451,10 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) else if (func == &Arg_comparator::compare_e_real) func= &Arg_comparator::compare_e_real_fixed; } + break; + } + default: + DBUG_ASSERT(0); } return 0; } @@ -445,10 +538,10 @@ int Arg_comparator::compare_real() performing the comparison. */ volatile double val1, val2; - val1= (*a)->val(); + val1= (*a)->val_real(); if (!(*a)->null_value) { - val2= (*b)->val(); + val2= (*b)->val_real(); if (!(*b)->null_value) { owner->null_value= 0; @@ -461,15 +554,43 @@ int Arg_comparator::compare_real() return -1; } +int Arg_comparator::compare_decimal() +{ + my_decimal value1; + my_decimal *val1= (*a)->val_decimal(&value1); + if (!(*a)->null_value) + { + my_decimal value2; + my_decimal *val2= (*b)->val_decimal(&value2); + if (!(*b)->null_value) + { + owner->null_value= 0; + return my_decimal_cmp(val1, val2); + } + } + owner->null_value= 1; + return -1; +} + int Arg_comparator::compare_e_real() { - double val1= (*a)->val(); - double val2= (*b)->val(); + double val1= (*a)->val_real(); + double val2= (*b)->val_real(); if ((*a)->null_value || (*b)->null_value) return test((*a)->null_value && (*b)->null_value); return test(val1 == val2); } +int Arg_comparator::compare_e_decimal() +{ + my_decimal value1, value2; + my_decimal *val1= (*a)->val_decimal(&value1); + my_decimal *val2= (*b)->val_decimal(&value2); + if ((*a)->null_value || (*b)->null_value) + return test((*a)->null_value && (*b)->null_value); + return test(my_decimal_cmp(val1, val2) == 0); +} + int Arg_comparator::compare_real_fixed() { @@ -479,10 +600,10 @@ int Arg_comparator::compare_real_fixed() performing the comparison. */ volatile double val1, val2; - val1= (*a)->val(); + val1= (*a)->val_real(); if (!(*a)->null_value) { - val2= (*b)->val(); + val2= (*b)->val_real(); if (!(*b)->null_value) { owner->null_value= 0; @@ -500,8 +621,8 @@ int Arg_comparator::compare_real_fixed() int Arg_comparator::compare_e_real_fixed() { - double val1= (*a)->val(); - double val2= (*b)->val(); + double val1= (*a)->val_real(); + double val2= (*b)->val_real(); if ((*a)->null_value || (*b)->null_value) return test((*a)->null_value && (*b)->null_value); return test(val1 == val2 || fabs(val1 - val2) < precision); @@ -626,19 +747,38 @@ int Arg_comparator::compare_e_int_diff_signedness() int Arg_comparator::compare_row() { int res= 0; + bool was_null= 0; (*a)->bring_value(); (*b)->bring_value(); uint n= (*a)->cols(); for (uint i= 0; i<n; i++) { - if ((res= comparators[i].compare())) - return res; + res= comparators[i].compare(); if (owner->null_value) - return -1; + { + // NULL was compared + if (owner->abort_on_null) + return -1; // We do not need correct NULL returning + was_null= 1; + owner->null_value= 0; + res= 0; // continue comparison (maybe we will meet explicit difference) + } + else if (res) + return res; } - return res; + if (was_null) + { + /* + There was NULL(s) in comparison in some parts, but there was not + explicit difference in other parts, so we have to return NULL + */ + owner->null_value= 1; + return -1; + } + return 0; } + int Arg_comparator::compare_e_row() { (*a)->bring_value(); @@ -653,21 +793,66 @@ int Arg_comparator::compare_e_row() } -bool Item_in_optimizer::fix_left(THD *thd, - struct st_table_list *tables, - Item **ref) +void Item_func_truth::fix_length_and_dec() { - if (!args[0]->fixed && args[0]->fix_fields(thd, tables, args) || + maybe_null= 0; + null_value= 0; + decimals= 0; + max_length= 1; +} + + +void Item_func_truth::print(String *str) +{ + str->append('('); + args[0]->print(str); + str->append(STRING_WITH_LEN(" is ")); + if (! affirmative) + str->append(STRING_WITH_LEN("not ")); + if (value) + str->append(STRING_WITH_LEN("true")); + else + str->append(STRING_WITH_LEN("false")); + str->append(')'); +} + + +bool Item_func_truth::val_bool() +{ + bool val= args[0]->val_bool(); + if (args[0]->null_value) + { + /* + NULL val IS {TRUE, FALSE} --> FALSE + NULL val IS NOT {TRUE, FALSE} --> TRUE + */ + return (! affirmative); + } + + if (affirmative) + { + /* {TRUE, FALSE} val IS {TRUE, FALSE} value */ + return (val == value); + } + + /* {TRUE, FALSE} val IS NOT {TRUE, FALSE} value */ + return (val != value); +} + + +longlong Item_func_truth::val_int() +{ + return (val_bool() ? 1 : 0); +} + + +bool Item_in_optimizer::fix_left(THD *thd, Item **ref) +{ + if (!args[0]->fixed && args[0]->fix_fields(thd, args) || !cache && !(cache= Item_cache::get_cache(args[0]->result_type()))) return 1; cache->setup(args[0]); - /* - If it is preparation PS only then we do not know values of parameters => - cant't get there values and do not need that values. - */ - if (! thd->current_arena->is_stmt_prepare()) - cache->store(args[0]); if (cache->cols() == 1) { if ((used_tables_cache= args[0]->used_tables())) @@ -680,10 +865,10 @@ bool Item_in_optimizer::fix_left(THD *thd, uint n= cache->cols(); for (uint i= 0; i < n; i++) { - if (args[0]->el(i)->used_tables()) - ((Item_cache *)cache->el(i))->set_used_tables(OUTER_REF_TABLE_BIT); + if (args[0]->element_index(i)->used_tables()) + ((Item_cache *)cache->element_index(i))->set_used_tables(OUTER_REF_TABLE_BIT); else - ((Item_cache *)cache->el(i))->set_used_tables(0); + ((Item_cache *)cache->element_index(i))->set_used_tables(0); } used_tables_cache= args[0]->used_tables(); } @@ -694,22 +879,21 @@ bool Item_in_optimizer::fix_left(THD *thd, } -bool Item_in_optimizer::fix_fields(THD *thd, struct st_table_list *tables, - Item ** ref) +bool Item_in_optimizer::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); - if (fix_left(thd, tables, ref)) - return 1; + if (fix_left(thd, ref)) + return TRUE; if (args[0]->maybe_null) maybe_null=1; - if (!args[1]->fixed && args[1]->fix_fields(thd, tables, args+1)) - return 1; + if (!args[1]->fixed && args[1]->fix_fields(thd, args+1)) + return TRUE; Item_in_subselect * sub= (Item_in_subselect *)args[1]; if (args[0]->cols() != sub->engine->cols()) { my_error(ER_OPERAND_COLUMNS, MYF(0), args[0]->cols()); - return 1; + return TRUE; } if (args[1]->maybe_null) maybe_null=1; @@ -718,20 +902,77 @@ bool Item_in_optimizer::fix_fields(THD *thd, struct st_table_list *tables, not_null_tables_cache|= args[1]->not_null_tables(); const_item_cache&= args[1]->const_item(); fixed= 1; - return 0; + return FALSE; } longlong Item_in_optimizer::val_int() { + bool tmp; DBUG_ASSERT(fixed == 1); cache->store(args[0]); + if (cache->null_value) { - null_value= 1; + if (((Item_in_subselect*)args[1])->is_top_level_item()) + { + /* + We're evaluating "NULL IN (SELECT ...)". The result can be NULL or + FALSE, and we can return one instead of another. Just return NULL. + */ + null_value= 1; + } + else + { + if (!((Item_in_subselect*)args[1])->is_correlated && + result_for_null_param != UNKNOWN) + { + /* Use cached value from previous execution */ + null_value= result_for_null_param; + } + else + { + /* + We're evaluating "NULL IN (SELECT ...)". The result is: + FALSE if SELECT produces an empty set, or + NULL otherwise. + We disable the predicates we've pushed down into subselect, run the + subselect and see if it has produced any rows. + */ + Item_in_subselect *item_subs=(Item_in_subselect*)args[1]; + if (cache->cols() == 1) + { + item_subs->set_cond_guard_var(0, FALSE); + (void) args[1]->val_bool_result(); + result_for_null_param= null_value= !item_subs->engine->no_rows(); + item_subs->set_cond_guard_var(0, TRUE); + } + else + { + uint i; + uint ncols= cache->cols(); + /* + Turn off the predicates that are based on column compares for + which the left part is currently NULL + */ + for (i= 0; i < ncols; i++) + { + if (cache->element_index(i)->null_value) + item_subs->set_cond_guard_var(i, FALSE); + } + + (void) args[1]->val_bool_result(); + result_for_null_param= null_value= !item_subs->engine->no_rows(); + + /* Turn all predicates back on */ + for (i= 0; i < ncols; i++) + item_subs->set_cond_guard_var(i, TRUE); + } + } + } return 0; } - longlong tmp= args[1]->val_int_result(); + tmp= args[1]->val_bool_result(); null_value= args[1]->null_value; return tmp; } @@ -840,20 +1081,53 @@ longlong Item_func_strcmp::val_int() void Item_func_interval::fix_length_and_dec() { + use_decimal_comparison= (row->element_index(0)->result_type() == DECIMAL_RESULT) || + (row->element_index(0)->result_type() == INT_RESULT); if (row->cols() > 8) { bool consts=1; for (uint i=1 ; consts && i < row->cols() ; i++) { - consts&= row->el(i)->const_item(); + consts&= row->element_index(i)->const_item(); } if (consts && - (intervals=(double*) sql_alloc(sizeof(double)*(row->cols()-1)))) + (intervals= + (interval_range*) sql_alloc(sizeof(interval_range)*(row->cols()-1)))) { - for (uint i=1 ; i < row->cols(); i++) - intervals[i-1]=row->el(i)->val(); + if (use_decimal_comparison) + { + for (uint i=1 ; i < row->cols(); i++) + { + Item *el= row->element_index(i); + interval_range *range= intervals + (i-1); + if ((el->result_type() == DECIMAL_RESULT) || + (el->result_type() == INT_RESULT)) + { + range->type= DECIMAL_RESULT; + range->dec.init(); + my_decimal *dec= el->val_decimal(&range->dec); + if (dec != &range->dec) + { + range->dec= *dec; + range->dec.fix_buffer_pointer(); + } + } + else + { + range->type= REAL_RESULT; + range->dbl= el->val_real(); + } + } + } + else + { + for (uint i=1 ; i < row->cols(); i++) + { + intervals[i-1].dbl= row->element_index(i)->val_real(); + } + } } } maybe_null= 0; @@ -866,20 +1140,43 @@ void Item_func_interval::fix_length_and_dec() /* - return -1 if null value, - 0 if lower than lowest - 1 - arg_count-1 if between args[n] and args[n+1] - arg_count if higher than biggest argument + Execute Item_func_interval() + + SYNOPSIS + Item_func_interval::val_int() + + NOTES + If we are doing a decimal comparison, we are + evaluating the first item twice. + + RETURN + -1 if null value, + 0 if lower than lowest + 1 - arg_count-1 if between args[n] and args[n+1] + arg_count if higher than biggest argument */ longlong Item_func_interval::val_int() { DBUG_ASSERT(fixed == 1); - double value= row->el(0)->val(); + double value; + my_decimal dec_buf, *dec= NULL; uint i; - if (row->el(0)->null_value) - return -1; // -1 if null + if (use_decimal_comparison) + { + dec= row->element_index(0)->val_decimal(&dec_buf); + if (row->element_index(0)->null_value) + return -1; + my_decimal2double(E_DEC_FATAL_ERROR, dec, &value); + } + else + { + value= row->element_index(0)->val_real(); + if (row->element_index(0)->null_value) + return -1; + } + if (intervals) { // Use binary search to find interval uint start,end; @@ -888,17 +1185,40 @@ longlong Item_func_interval::val_int() while (start != end) { uint mid= (start + end + 1) / 2; - if (intervals[mid] <= value) + interval_range *range= intervals + mid; + my_bool cmp_result; + /* + The values in the range intervall may have different types, + Only do a decimal comparision of the first argument is a decimal + and we are comparing against a decimal + */ + if (dec && range->type == DECIMAL_RESULT) + cmp_result= my_decimal_cmp(&range->dec, dec) <= 0; + else + cmp_result= (range->dbl <= value); + if (cmp_result) start= mid; else end= mid - 1; } - return (value < intervals[start]) ? 0 : start + 1; + interval_range *range= intervals+start; + return ((dec && range->type == DECIMAL_RESULT) ? + my_decimal_cmp(dec, &range->dec) < 0 : + value < range->dbl) ? 0 : start + 1; } for (i=1 ; i < row->cols() ; i++) { - if (row->el(i)->val() > value) + Item *el= row->element_index(i); + if (use_decimal_comparison && + ((el->result_type() == DECIMAL_RESULT) || + (el->result_type() == INT_RESULT))) + { + my_decimal e_dec_buf, *e_dec= row->element_index(i)->val_decimal(&e_dec_buf); + if (my_decimal_cmp(e_dec, dec) > 0) + return i-1; + } + else if (row->element_index(i)->val_real() > value) return i-1; } return i-1; @@ -934,12 +1254,13 @@ longlong Item_func_interval::val_int() 1 got error */ -bool Item_func_between::fix_fields(THD *thd, struct st_table_list *tables, - Item **ref) +bool Item_func_between::fix_fields(THD *thd, Item **ref) { - if (Item_func_opt_neg::fix_fields(thd, tables, ref)) + if (Item_func_opt_neg::fix_fields(thd, ref)) return 1; + thd->lex->current_select->between_count++; + /* not_null_tables_cache == union(T1(e),T1(e1),T1(e2)) */ if (pred_level && !negated) return 0; @@ -960,13 +1281,13 @@ void Item_func_between::fix_length_and_dec() /* As some compare functions are generated after sql_yacc, - we have to check for out of memory conditons here + we have to check for out of memory conditions here */ if (!args[0] || !args[1] || !args[2]) return; agg_cmp_type(thd, &cmp_type, args, 3); if (cmp_type == STRING_RESULT && - agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV)) + agg_arg_charsets(cmp_collation, args, 3, MY_COLL_CMP_CONV, 1)) return; /* @@ -974,9 +1295,11 @@ void Item_func_between::fix_length_and_dec() They are compared as integers, so for const item this time-consuming conversion can be done only once, not for every single comparison */ - if (args[0]->type() == FIELD_ITEM) + if (args[0]->real_item()->type() == FIELD_ITEM && + thd->lex->sql_command != SQLCOM_CREATE_VIEW && + thd->lex->sql_command != SQLCOM_SHOW_CREATE) { - Field *field=((Item_field*) args[0])->field; + Field *field=((Item_field*) (args[0]->real_item()))->field; if (field->can_be_compared_as_longlong()) { /* @@ -1022,7 +1345,7 @@ longlong Item_func_between::val_int() } else if (cmp_type == INT_RESULT) { - longlong value=args[0]->val_int(),a,b; + longlong value=args[0]->val_int(), a, b; if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ a=args[1]->val_int(); @@ -1040,13 +1363,31 @@ longlong Item_func_between::val_int() null_value= value >= a; } } + else if (cmp_type == DECIMAL_RESULT) + { + my_decimal dec_buf, *dec= args[0]->val_decimal(&dec_buf), + a_buf, *a_dec, b_buf, *b_dec; + if ((null_value=args[0]->null_value)) + return 0; /* purecov: inspected */ + a_dec= args[1]->val_decimal(&a_buf); + b_dec= args[2]->val_decimal(&b_buf); + if (!args[1]->null_value && !args[2]->null_value) + return (longlong) ((my_decimal_cmp(dec, a_dec) >= 0 && + my_decimal_cmp(dec, b_dec) <= 0) != negated); + if (args[1]->null_value && args[2]->null_value) + null_value=1; + else if (args[1]->null_value) + null_value= (my_decimal_cmp(dec, b_dec) <= 0); + else + null_value= (my_decimal_cmp(dec, a_dec) >= 0); + } else { - double value=args[0]->val(),a,b; + double value= args[0]->val_real(),a,b; if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ - a=args[1]->val(); - b=args[2]->val(); + a= args[1]->val_real(); + b= args[2]->val_real(); if (!args[1]->null_value && !args[2]->null_value) return (longlong) ((value >= a && value <= b) != negated); if (args[1]->null_value && args[2]->null_value) @@ -1069,10 +1410,10 @@ void Item_func_between::print(String *str) str->append('('); args[0]->print(str); if (negated) - str->append(" not", 4); - str->append(" between ", 9); + str->append(STRING_WITH_LEN(" not")); + str->append(STRING_WITH_LEN(" between ")); args[1]->print(str); - str->append(" and ", 5); + str->append(STRING_WITH_LEN(" and ")); args[2]->print(str); str->append(')'); } @@ -1080,20 +1421,41 @@ void Item_func_between::print(String *str) void Item_func_ifnull::fix_length_and_dec() { + agg_result_type(&hybrid_type, args, 2); maybe_null=args[1]->maybe_null; - max_length=max(args[0]->max_length,args[1]->max_length); - decimals=max(args[0]->decimals,args[1]->decimals); - agg_result_type(&cached_result_type, args, 2); - if (cached_result_type == STRING_RESULT) - agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV); - else if (cached_result_type != REAL_RESULT) + decimals= max(args[0]->decimals, args[1]->decimals); + max_length= (hybrid_type == DECIMAL_RESULT || hybrid_type == INT_RESULT) ? + (max(args[0]->max_length - args[0]->decimals, + args[1]->max_length - args[1]->decimals) + decimals) : + max(args[0]->max_length, args[1]->max_length); + + switch (hybrid_type) { + case STRING_RESULT: + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1); + break; + case DECIMAL_RESULT: + case REAL_RESULT: + break; + case INT_RESULT: decimals= 0; - + break; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } cached_field_type= args[0]->field_type(); if (cached_field_type != args[1]->field_type()) cached_field_type= Item_func::field_type(); } + +uint Item_func_ifnull::decimal_precision() const +{ + int max_int_part=max(args[0]->decimal_int_part(),args[1]->decimal_int_part()); + return min(max_int_part + decimals, DECIMAL_MAX_PRECISION); +} + + enum_field_types Item_func_ifnull::field_type() const { return cached_field_type; @@ -1105,23 +1467,23 @@ Field *Item_func_ifnull::tmp_table_field(TABLE *table) } double -Item_func_ifnull::val() +Item_func_ifnull::real_op() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if (!args[0]->null_value) { null_value=0; return value; } - value=args[1]->val(); + value= args[1]->val_real(); if ((null_value=args[1]->null_value)) return 0.0; return value; } longlong -Item_func_ifnull::val_int() +Item_func_ifnull::int_op() { DBUG_ASSERT(fixed == 1); longlong value=args[0]->val_int(); @@ -1136,8 +1498,25 @@ Item_func_ifnull::val_int() return value; } + +my_decimal *Item_func_ifnull::decimal_op(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + my_decimal *value= args[0]->val_decimal(decimal_value); + if (!args[0]->null_value) + { + null_value= 0; + return value; + } + value= args[1]->val_decimal(decimal_value); + if ((null_value= args[1]->null_value)) + return 0; + return value; +} + + String * -Item_func_ifnull::val_str(String *str) +Item_func_ifnull::str_op(String *str) { DBUG_ASSERT(fixed == 1); String *res =args[0]->val_str(str); @@ -1183,12 +1562,12 @@ Item_func_ifnull::val_str(String *str) */ bool -Item_func_if::fix_fields(THD *thd, struct st_table_list *tlist, Item **ref) +Item_func_if::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); args[0]->top_level_item(); - if (Item_func::fix_fields(thd, tlist, ref)) + if (Item_func::fix_fields(thd, ref)) return 1; not_null_tables_cache= (args[1]->not_null_tables() & @@ -1202,8 +1581,9 @@ void Item_func_if::fix_length_and_dec() { maybe_null=args[1]->maybe_null || args[2]->maybe_null; - max_length=max(args[1]->max_length,args[2]->max_length); - decimals=max(args[1]->decimals,args[2]->decimals); + decimals= max(args[1]->decimals, args[2]->decimals); + unsigned_flag=args[1]->unsigned_flag && args[2]->unsigned_flag; + enum Item_result arg1_type=args[1]->result_type(); enum Item_result arg2_type=args[2]->result_type(); bool null1=args[1]->const_item() && args[1]->null_value; @@ -1224,23 +1604,45 @@ Item_func_if::fix_length_and_dec() agg_result_type(&cached_result_type, args+1, 2); if (cached_result_type == STRING_RESULT) { - if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV)) - return; + if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV, 1)) + return; } else { collation.set(&my_charset_bin); // Number } } + + if ((cached_result_type == DECIMAL_RESULT ) + || (cached_result_type == INT_RESULT)) + { + int len1= args[1]->max_length - args[1]->decimals + - (args[1]->unsigned_flag ? 0 : 1); + + int len2= args[2]->max_length - args[2]->decimals + - (args[2]->unsigned_flag ? 0 : 1); + + max_length=max(len1, len2) + decimals + (unsigned_flag ? 0 : 1); + } + else + max_length= max(args[1]->max_length, args[2]->max_length); +} + + +uint Item_func_if::decimal_precision() const +{ + int precision=(max(args[1]->decimal_int_part(),args[2]->decimal_int_part())+ + decimals); + return min(precision, DECIMAL_MAX_PRECISION); } double -Item_func_if::val() +Item_func_if::val_real() { DBUG_ASSERT(fixed == 1); - Item *arg= args[0]->val_int() ? args[1] : args[2]; - double value=arg->val(); + Item *arg= args[0]->val_bool() ? args[1] : args[2]; + double value= arg->val_real(); null_value=arg->null_value; return value; } @@ -1249,7 +1651,7 @@ longlong Item_func_if::val_int() { DBUG_ASSERT(fixed == 1); - Item *arg= args[0]->val_int() ? args[1] : args[2]; + Item *arg= args[0]->val_bool() ? args[1] : args[2]; longlong value=arg->val_int(); null_value=arg->null_value; return value; @@ -1259,7 +1661,7 @@ String * Item_func_if::val_str(String *str) { DBUG_ASSERT(fixed == 1); - Item *arg= args[0]->val_int() ? args[1] : args[2]; + Item *arg= args[0]->val_bool() ? args[1] : args[2]; String *res=arg->val_str(str); if (res) res->set_charset(collation.collation); @@ -1268,6 +1670,17 @@ Item_func_if::val_str(String *str) } +my_decimal * +Item_func_if::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + Item *arg= args[0]->val_bool() ? args[1] : args[2]; + my_decimal *value= arg->val_decimal(decimal_value); + null_value= arg->null_value; + return value; +} + + void Item_func_nullif::fix_length_and_dec() { @@ -1277,13 +1690,15 @@ Item_func_nullif::fix_length_and_dec() { max_length=args[0]->max_length; decimals=args[0]->decimals; - agg_result_type(&cached_result_type, args, 2); + unsigned_flag= args[0]->unsigned_flag; + cached_result_type= args[0]->result_type(); if (cached_result_type == STRING_RESULT && - agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV)) + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1)) return; } } + /* nullif () returns NULL if arguments are equal, else it returns the first argument. @@ -1292,7 +1707,7 @@ Item_func_nullif::fix_length_and_dec() */ double -Item_func_nullif::val() +Item_func_nullif::val_real() { DBUG_ASSERT(fixed == 1); double value; @@ -1301,7 +1716,7 @@ Item_func_nullif::val() null_value=1; return 0.0; } - value=args[0]->val(); + value= args[0]->val_real(); null_value=args[0]->null_value; return value; } @@ -1337,6 +1752,22 @@ Item_func_nullif::val_str(String *str) } +my_decimal * +Item_func_nullif::val_decimal(my_decimal * decimal_value) +{ + DBUG_ASSERT(fixed == 1); + my_decimal *res; + if (!cmp.compare()) + { + null_value=1; + return 0; + } + res= args[0]->val_decimal(decimal_value); + null_value= args[0]->null_value; + return res; +} + + bool Item_func_nullif::is_null() { @@ -1350,16 +1781,18 @@ Item_func_nullif::is_null() Item *Item_func_case::find_item(String *str) { - String *first_expr_str,*tmp; + String *first_expr_str, *tmp; + my_decimal *first_expr_dec, first_expr_dec_val; longlong first_expr_int; double first_expr_real; char buff[MAX_FIELD_WIDTH]; String buff_str(buff,sizeof(buff),default_charset()); - + /* These will be initialized later */ LINT_INIT(first_expr_str); LINT_INIT(first_expr_int); LINT_INIT(first_expr_real); + LINT_INIT(first_expr_dec); if (first_expr_num != -1) { @@ -1376,13 +1809,18 @@ Item *Item_func_case::find_item(String *str) return else_expr_num != -1 ? args[else_expr_num] : 0; break; case REAL_RESULT: - first_expr_real= args[first_expr_num]->val(); + first_expr_real= args[first_expr_num]->val_real(); if (args[first_expr_num]->null_value) return else_expr_num != -1 ? args[else_expr_num] : 0; break; + case DECIMAL_RESULT: + first_expr_dec= args[first_expr_num]->val_decimal(&first_expr_dec_val); + if (args[first_expr_num]->null_value) + return else_expr_num != -1 ? args[else_expr_num] : 0; + break; case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); break; } @@ -1394,7 +1832,7 @@ Item *Item_func_case::find_item(String *str) if (first_expr_num == -1) { // No expression between CASE and the first WHEN - if (args[i]->val_int()) + if (args[i]->val_bool()) return args[i+1]; continue; } @@ -1409,12 +1847,19 @@ Item *Item_func_case::find_item(String *str) return args[i+1]; break; case REAL_RESULT: - if (args[i]->val()==first_expr_real && !args[i]->null_value) + if (args[i]->val_real() == first_expr_real && !args[i]->null_value) + return args[i+1]; + break; + case DECIMAL_RESULT: + { + my_decimal value; + if (my_decimal_cmp(args[i]->val_decimal(&value), first_expr_dec) == 0) return args[i+1]; break; + } case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); break; } @@ -1424,7 +1869,6 @@ Item *Item_func_case::find_item(String *str) } - String *Item_func_case::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -1461,7 +1905,7 @@ longlong Item_func_case::val_int() return res; } -double Item_func_case::val() +double Item_func_case::val_real() { DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; @@ -1474,21 +1918,66 @@ double Item_func_case::val() null_value=1; return 0; } - res=item->val(); + res= item->val_real(); null_value=item->null_value; return res; } + +my_decimal *Item_func_case::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + char buff[MAX_FIELD_WIDTH]; + String dummy_str(buff, sizeof(buff), default_charset()); + Item *item= find_item(&dummy_str); + my_decimal *res; + + if (!item) + { + null_value=1; + return 0; + } + + res= item->val_decimal(decimal_value); + null_value= item->null_value; + return res; +} + + +bool Item_func_case::fix_fields(THD *thd, Item **ref) +{ + /* + buff should match stack usage from + Item_func_case::val_int() -> Item_func_case::find_item() + */ +#ifndef EMBEDDED_LIBRARY + char buff[MAX_FIELD_WIDTH*2+sizeof(String)*2+sizeof(String*)*2+sizeof(double)*2+sizeof(longlong)*2]; +#endif + bool res= Item_func::fix_fields(thd, ref); + /* + Call check_stack_overrun after fix_fields to be sure that stack variable + is not optimized away + */ + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + return TRUE; // Fatal error flag is set! + return res; +} + + + void Item_func_case::fix_length_and_dec() { Item **agg; uint nagg; + THD *thd= current_thd; if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) return; - // Aggregate all THEN and ELSE expression types - // and collations when string result + /* + Aggregate all THEN and ELSE expression types + and collations when string result + */ for (nagg= 0 ; nagg < ncases/2 ; nagg++) agg[nagg]= args[nagg*2+1]; @@ -1498,7 +1987,7 @@ void Item_func_case::fix_length_and_dec() agg_result_type(&cached_result_type, agg, nagg); if ((cached_result_type == STRING_RESULT) && - agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV)) + agg_arg_charsets(collation, agg, nagg, MY_COLL_ALLOW_CONV, 1)) return; @@ -1512,10 +2001,10 @@ void Item_func_case::fix_length_and_dec() for (nagg= 0; nagg < ncases/2 ; nagg++) agg[nagg+1]= args[nagg*2]; nagg++; - agg_cmp_type(current_thd, &cmp_type, agg, nagg); + agg_cmp_type(thd, &cmp_type, agg, nagg); if ((cmp_type == STRING_RESULT) && - agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV)) - return; + agg_arg_charsets(cmp_collation, agg, nagg, MY_COLL_CMP_CONV, 1)) + return; } if (else_expr_num == -1 || args[else_expr_num]->maybe_null) @@ -1536,11 +2025,23 @@ void Item_func_case::fix_length_and_dec() } +uint Item_func_case::decimal_precision() const +{ + int max_int_part=0; + for (uint i=0 ; i < ncases ; i+=2) + set_if_bigger(max_int_part, args[i+1]->decimal_int_part()); + + if (else_expr_num != -1) + set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part()); + return min(max_int_part + decimals, DECIMAL_MAX_PRECISION); +} + + /* TODO: Fix this so that it prints the whole CASE expression */ void Item_func_case::print(String *str) { - str->append("(case ", 6); + str->append(STRING_WITH_LEN("(case ")); if (first_expr_num != -1) { args[first_expr_num]->print(str); @@ -1548,26 +2049,26 @@ void Item_func_case::print(String *str) } for (uint i=0 ; i < ncases ; i+=2) { - str->append("when ", 5); + str->append(STRING_WITH_LEN("when ")); args[i]->print(str); - str->append(" then ", 6); + str->append(STRING_WITH_LEN(" then ")); args[i+1]->print(str); str->append(' '); } if (else_expr_num != -1) { - str->append("else ", 5); + str->append(STRING_WITH_LEN("else ")); args[else_expr_num]->print(str); str->append(' '); } - str->append("end)", 4); + str->append(STRING_WITH_LEN("end)")); } /* Coalesce - return first not NULL argument. */ -String *Item_func_coalesce::val_str(String *str) +String *Item_func_coalesce::str_op(String *str) { DBUG_ASSERT(fixed == 1); null_value=0; @@ -1581,7 +2082,7 @@ String *Item_func_coalesce::val_str(String *str) return 0; } -longlong Item_func_coalesce::val_int() +longlong Item_func_coalesce::int_op() { DBUG_ASSERT(fixed == 1); null_value=0; @@ -1595,13 +2096,13 @@ longlong Item_func_coalesce::val_int() return 0; } -double Item_func_coalesce::val() +double Item_func_coalesce::real_op() { DBUG_ASSERT(fixed == 1); null_value=0; for (uint i=0 ; i < arg_count ; i++) { - double res=args[i]->val(); + double res= args[i]->val_real(); if (!args[i]->null_value) return res; } @@ -1610,29 +2111,144 @@ double Item_func_coalesce::val() } -void Item_func_coalesce::fix_length_and_dec() +my_decimal *Item_func_coalesce::decimal_op(my_decimal *decimal_value) { - max_length= 0; - decimals= 0; - agg_result_type(&cached_result_type, args, arg_count); - for (uint i=0 ; i < arg_count ; i++) + DBUG_ASSERT(fixed == 1); + null_value= 0; + for (uint i= 0; i < arg_count; i++) { - set_if_bigger(max_length,args[i]->max_length); - set_if_bigger(decimals,args[i]->decimals); + my_decimal *res= args[i]->val_decimal(decimal_value); + if (!args[i]->null_value) + return res; } - if (cached_result_type == STRING_RESULT) - agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV); - else if (cached_result_type != REAL_RESULT) + null_value=1; + return 0; +} + + +void Item_func_coalesce::fix_length_and_dec() +{ + agg_result_type(&hybrid_type, args, arg_count); + switch (hybrid_type) { + case STRING_RESULT: + count_only_length(); + decimals= NOT_FIXED_DEC; + agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1); + break; + case DECIMAL_RESULT: + count_decimal_length(); + break; + case REAL_RESULT: + count_real_length(); + break; + case INT_RESULT: + count_only_length(); decimals= 0; + break; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } } /**************************************************************************** Classes and function for the IN operator ****************************************************************************/ -static int cmp_longlong(void *cmp_arg, longlong *a,longlong *b) +/* + Determine which of the signed longlong arguments is bigger + + SYNOPSIS + cmp_longs() + a_val left argument + b_val right argument + + DESCRIPTION + This function will compare two signed longlong arguments + and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +static inline int cmp_longs (longlong a_val, longlong b_val) { - return *a < *b ? -1 : *a == *b ? 0 : 1; + return a_val < b_val ? -1 : a_val == b_val ? 0 : 1; +} + + +/* + Determine which of the unsigned longlong arguments is bigger + + SYNOPSIS + cmp_ulongs() + a_val left argument + b_val right argument + + DESCRIPTION + This function will compare two unsigned longlong arguments + and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +static inline int cmp_ulongs (ulonglong a_val, ulonglong b_val) +{ + return a_val < b_val ? -1 : a_val == b_val ? 0 : 1; +} + + +/* + Compare two integers in IN value list format (packed_longlong) + + SYNOPSIS + cmp_longlong() + cmp_arg an argument passed to the calling function (qsort2) + a left argument + b right argument + + DESCRIPTION + This function will compare two integer arguments in the IN value list + format and will return -1, 0, or 1 if left argument is smaller than, + equal to or greater than the right argument. + It's used in sorting the IN values list and finding an element in it. + Depending on the signedness of the arguments cmp_longlong() will + compare them as either signed (using cmp_longs()) or unsigned (using + cmp_ulongs()). + + RETURN VALUE + -1 left argument is smaller than the right argument. + 0 left argument is equal to the right argument. + 1 left argument is greater than the right argument. +*/ +int cmp_longlong(void *cmp_arg, + in_longlong::packed_longlong *a, + in_longlong::packed_longlong *b) +{ + if (a->unsigned_flag != b->unsigned_flag) + { + /* + One of the args is unsigned and is too big to fit into the + positive signed range. Report no match. + */ + if (a->unsigned_flag && ((ulonglong) a->val) > (ulonglong) LONGLONG_MAX || + b->unsigned_flag && ((ulonglong) b->val) > (ulonglong) LONGLONG_MAX) + return a->unsigned_flag ? 1 : -1; + /* + Although the signedness differs both args can fit into the signed + positive range. Make them signed and compare as usual. + */ + return cmp_longs (a->val, b->val); + } + if (a->unsigned_flag) + return cmp_ulongs ((ulonglong) a->val, (ulonglong) b->val); + else + return cmp_longs (a->val, b->val); } static int cmp_double(void *cmp_arg, double *a,double *b) @@ -1640,11 +2256,24 @@ static int cmp_double(void *cmp_arg, double *a,double *b) return *a < *b ? -1 : *a == *b ? 0 : 1; } -static int cmp_row(void *cmp_arg, cmp_item_row* a, cmp_item_row* b) +static int cmp_row(void *cmp_arg, cmp_item_row *a, cmp_item_row *b) { return a->compare(b); } + +static int cmp_decimal(void *cmp_arg, my_decimal *a, my_decimal *b) +{ + /* + We need call of fixing buffer pointer, because fast sort just copy + decimal buffers in memory and pointers left pointing on old buffer place + */ + a->fix_buffer_pointer(); + b->fix_buffer_pointer(); + return my_decimal_cmp(a, b); +} + + int in_vector::find(Item *item) { byte *result=get_value(item); @@ -1744,19 +2373,23 @@ void in_row::set(uint pos, Item *item) } in_longlong::in_longlong(uint elements) - :in_vector(elements,sizeof(longlong),(qsort2_cmp) cmp_longlong, 0) + :in_vector(elements,sizeof(packed_longlong),(qsort2_cmp) cmp_longlong, 0) {} void in_longlong::set(uint pos,Item *item) { - ((longlong*) base)[pos]=item->val_int(); + struct packed_longlong *buff= &((packed_longlong*) base)[pos]; + + buff->val= item->val_int(); + buff->unsigned_flag= item->unsigned_flag; } byte *in_longlong::get_value(Item *item) { - tmp= item->val_int(); + tmp.val= item->val_int(); if (item->null_value) return 0; + tmp.unsigned_flag= item->unsigned_flag; return (byte*) &tmp; } @@ -1766,28 +2399,59 @@ in_double::in_double(uint elements) void in_double::set(uint pos,Item *item) { - ((double*) base)[pos]=item->val(); + ((double*) base)[pos]= item->val_real(); } byte *in_double::get_value(Item *item) { - tmp= item->val(); + tmp= item->val_real(); if (item->null_value) return 0; /* purecov: inspected */ return (byte*) &tmp; } -cmp_item* cmp_item::get_comparator(Item *item) + +in_decimal::in_decimal(uint elements) + :in_vector(elements, sizeof(my_decimal),(qsort2_cmp) cmp_decimal, 0) +{} + + +void in_decimal::set(uint pos, Item *item) +{ + /* as far as 'item' is constant, we can store reference on my_decimal */ + my_decimal *dec= ((my_decimal *)base) + pos; + dec->len= DECIMAL_BUFF_LENGTH; + dec->fix_buffer_pointer(); + my_decimal *res= item->val_decimal(dec); + /* if item->val_decimal() is evaluated to NULL then res == 0 */ + if (!item->null_value && res != dec) + my_decimal2decimal(res, dec); +} + + +byte *in_decimal::get_value(Item *item) +{ + my_decimal *result= item->val_decimal(&val); + if (item->null_value) + return 0; + return (byte *)result; +} + + +cmp_item* cmp_item::get_comparator(Item_result type, + CHARSET_INFO *cs) { - switch (item->result_type()) { + switch (type) { case STRING_RESULT: - return new cmp_item_sort_string(item->collation.collation); + return new cmp_item_sort_string(cs); case INT_RESULT: return new cmp_item_int; case REAL_RESULT: return new cmp_item_real; case ROW_RESULT: return new cmp_item_row; + case DECIMAL_RESULT: + return new cmp_item_decimal; default: DBUG_ASSERT(0); break; @@ -1820,7 +2484,7 @@ cmp_item* cmp_item_row::make_same() cmp_item_row::~cmp_item_row() { DBUG_ENTER("~cmp_item_row"); - DBUG_PRINT("enter",("this: %lx", this)); + DBUG_PRINT("enter",("this: 0x%lx", (long) this)); if (comparators) { for (uint i= 0; i < n; i++) @@ -1846,10 +2510,12 @@ void cmp_item_row::store_value(Item *item) for (uint i=0; i < n; i++) { if (!comparators[i]) - if (!(comparators[i]= cmp_item::get_comparator(item->el(i)))) + if (!(comparators[i]= + cmp_item::get_comparator(item->element_index(i)->result_type(), + item->element_index(i)->collation.collation))) break; // new failed - comparators[i]->store_value(item->el(i)); - item->null_value|= item->el(i)->null_value; + comparators[i]->store_value(item->element_index(i)); + item->null_value|= item->element_index(i)->null_value; } } DBUG_VOID_RETURN; @@ -1874,8 +2540,8 @@ void cmp_item_row::store_value_by_template(cmp_item *t, Item *item) if (!(comparators[i]= tmpl->comparators[i]->make_same())) break; // new failed comparators[i]->store_value_by_template(tmpl->comparators[i], - item->el(i)); - item->null_value|= item->el(i)->null_value; + item->element_index(i)); + item->null_value|= item->element_index(i)->null_value; } } } @@ -1893,9 +2559,9 @@ int cmp_item_row::cmp(Item *arg) arg->bring_value(); for (uint i=0; i < n; i++) { - if (comparators[i]->cmp(arg->el(i))) + if (comparators[i]->cmp(arg->element_index(i))) { - if (!arg->el(i)->null_value) + if (!arg->element_index(i)->null_value) return 1; was_null= 1; } @@ -1906,17 +2572,48 @@ int cmp_item_row::cmp(Item *arg) int cmp_item_row::compare(cmp_item *c) { - cmp_item_row *cmp= (cmp_item_row *) c; + cmp_item_row *l_cmp= (cmp_item_row *) c; for (uint i=0; i < n; i++) { int res; - if ((res= comparators[i]->compare(cmp->comparators[i]))) + if ((res= comparators[i]->compare(l_cmp->comparators[i]))) return res; } return 0; } +void cmp_item_decimal::store_value(Item *item) +{ + my_decimal *val= item->val_decimal(&value); + /* val may be zero if item is nnull */ + if (val && val != &value) + my_decimal2decimal(val, &value); +} + + +int cmp_item_decimal::cmp(Item *arg) +{ + my_decimal tmp_buf, *tmp= arg->val_decimal(&tmp_buf); + if (arg->null_value) + return 1; + return my_decimal_cmp(&value, tmp); +} + + +int cmp_item_decimal::compare(cmp_item *arg) +{ + cmp_item_decimal *l_cmp= (cmp_item_decimal*) arg; + return my_decimal_cmp(&value, &l_cmp->value); +} + + +cmp_item* cmp_item_decimal::make_same() +{ + return new cmp_item_decimal(); +} + + bool Item_func_in::nulls_in_row() { Item **arg,**arg_end; @@ -1959,11 +2656,11 @@ bool Item_func_in::nulls_in_row() */ bool -Item_func_in::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_func_in::fix_fields(THD *thd, Item **ref) { Item **arg, **arg_end; - if (Item_func_opt_neg::fix_fields(thd, tables, ref)) + if (Item_func_opt_neg::fix_fields(thd, ref)) return 1; /* not_null_tables_cache == union(T1(e),union(T1(ei))) */ @@ -1983,7 +2680,7 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y) { return cs->coll->strnncollsp(cs, (uchar *) x->ptr(),x->length(), - (uchar *) y->ptr(),y->length()); + (uchar *) y->ptr(),y->length(), 0); } @@ -1996,11 +2693,17 @@ void Item_func_in::fix_length_and_dec() agg_cmp_type(thd, &cmp_type, args, arg_count); if (cmp_type == STRING_RESULT && - agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV)) + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1)) return; for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) - const_itm&= arg[0]->const_item(); + { + if (!arg[0]->const_item()) + { + const_itm= 0; + break; + } + } /* Row item with NULLs inside can return NULL or FALSE => @@ -2008,6 +2711,31 @@ void Item_func_in::fix_length_and_dec() */ if (const_itm && !nulls_in_row()) { + /* + IN must compare INT/DATE/DATETIME/TIMESTAMP columns and constants + as int values (the same way as equality does). + So we must check here if the column on the left and all the constant + values on the right can be compared as integers and adjust the + comparison type accordingly. + */ + if (args[0]->real_item()->type() == FIELD_ITEM && + thd->lex->sql_command != SQLCOM_CREATE_VIEW && + thd->lex->sql_command != SQLCOM_SHOW_CREATE && + cmp_type != INT_RESULT) + { + Field *field= ((Item_field*) (args[0]->real_item()))->field; + if (field->can_be_compared_as_longlong()) + { + bool all_converted= TRUE; + for (arg=args+1, arg_end=args+arg_count; arg != arg_end ; arg++) + { + if (!convert_constant_item (thd, field, &arg[0])) + all_converted= FALSE; + } + if (all_converted) + cmp_type= INT_RESULT; + } + } switch (cmp_type) { case STRING_RESULT: array=new in_string(arg_count-1,(qsort2_cmp) srtcmp_in, @@ -2022,6 +2750,9 @@ void Item_func_in::fix_length_and_dec() case ROW_RESULT: array= new in_row(arg_count-1, args[0]); break; + case DECIMAL_RESULT: + array= new in_decimal(arg_count - 1); + break; default: DBUG_ASSERT(0); return; @@ -2043,7 +2774,7 @@ void Item_func_in::fix_length_and_dec() } else { - in_item= cmp_item::get_comparator(args[0]); + in_item= cmp_item::get_comparator(cmp_type, cmp_collation.collation); if (cmp_type == STRING_RESULT) in_item->cmp_charset= cmp_collation.collation; } @@ -2056,10 +2787,10 @@ void Item_func_in::print(String *str) str->append('('); args[0]->print(str); if (negated) - str->append(" not", 4); - str->append(" in (", 5); + str->append(STRING_WITH_LEN(" not")); + str->append(STRING_WITH_LEN(" in (")); print_args(str, 1); - str->append("))", 2); + str->append(STRING_WITH_LEN("))")); } @@ -2146,7 +2877,7 @@ void Item_cond::copy_andor_arguments(THD *thd, Item_cond *item) bool -Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_cond::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); List_iterator<Item> li(list); @@ -2155,20 +2886,36 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) char buff[sizeof(char*)]; // Max local vars in function #endif not_null_tables_cache= used_tables_cache= 0; - const_item_cache= 0; + const_item_cache= 1; /* and_table_cache is the value that Item_cond_or() returns for not_null_tables() */ and_tables_cache= ~(table_map) 0; - if (check_stack_overrun(thd, buff)) - return 1; // Fatal error flag is set! + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + return TRUE; // Fatal error flag is set! + /* + The following optimization reduces the depth of an AND-OR tree. + E.g. a WHERE clause like + F1 AND (F2 AND (F2 AND F4)) + is parsed into a tree with the same nested structure as defined + by braces. This optimization will transform such tree into + AND (F1, F2, F3, F4). + Trees of OR items are flattened as well: + ((F1 OR F2) OR (F3 OR F4)) => OR (F1, F2, F3, F4) + Items for removed AND/OR levels will dangle until the death of the + entire statement. + The optimization is currently prepared statements and stored procedures + friendly as it doesn't allocate any memory and its effects are durable + (i.e. do not depend on PS/SP arguments). + */ while ((item=li++)) { table_map tmp_table_map; while (item->type() == Item::COND_ITEM && - ((Item_cond*) item)->functype() == functype()) + ((Item_cond*) item)->functype() == functype() && + !((Item_cond*) item)->list.is_empty()) { // Identical function li.replace(((Item_cond*) item)->list); ((Item_cond*) item)->list.empty(); @@ -2179,14 +2926,19 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) // item can be substituted in fix_fields if ((!item->fixed && - item->fix_fields(thd, tables, li.ref())) || + item->fix_fields(thd, li.ref())) || (item= *li.ref())->check_cols(1)) - return 1; /* purecov: inspected */ + return TRUE; /* purecov: inspected */ used_tables_cache|= item->used_tables(); - tmp_table_map= item->not_null_tables(); - not_null_tables_cache|= tmp_table_map; - and_tables_cache&= tmp_table_map; - const_item_cache&= item->const_item(); + if (item->const_item()) + and_tables_cache= (table_map) 0; + else + { + tmp_table_map= item->not_null_tables(); + not_null_tables_cache|= tmp_table_map; + and_tables_cache&= tmp_table_map; + const_item_cache= FALSE; + } with_sum_func= with_sum_func || item->with_sum_func; with_subselect|= item->with_subselect; if (item->maybe_null) @@ -2195,7 +2947,7 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) thd->lex->current_select->cond_count+= list.elements; fix_length_and_dec(); fixed= 1; - return 0; + return FALSE; } bool Item_cond::walk(Item_processor processor, byte *arg) @@ -2210,6 +2962,124 @@ bool Item_cond::walk(Item_processor processor, byte *arg) /* + Transform an Item_cond object with a transformer callback function + + SYNOPSIS + transform() + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg parameter to be passed to the transformer + + DESCRIPTION + The function recursively applies the transform method to each + member item of the condition list. + If the call of the method for a member item returns a new item + the old item is substituted for a new one. + After this the transformer is applied to the root node + of the Item_cond object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_cond::transform(Item_transformer transformer, byte *arg) +{ + DBUG_ASSERT(!current_thd->is_stmt_prepare()); + + List_iterator<Item> li(list); + Item *item; + while ((item= li++)) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + + /* + THD::change_item_tree() should be called only if the tree was + really transformed, i.e. when a new item has been created. + Otherwise we'll be allocating a lot of unnecessary memory for + change records at each execution. + */ + if (new_item != item) + current_thd->change_item_tree(li.ref(), new_item); + } + return Item_func::transform(transformer, arg); +} + + +/* + Compile Item_cond object with a processor and a transformer callback functions + + SYNOPSIS + compile() + analyzer the analyzer callback function to be applied to the nodes + of the tree of the object + arg_p in/out parameter to be passed to the analyzer + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg_t parameter to be passed to the transformer + + DESCRIPTION + First the function applies the analyzer to the root node of + the Item_func object. Then if the analyzer succeeeds (returns TRUE) + the function recursively applies the compile method to member + item of the condition list. + If the call of the method for a member item returns a new item + the old item is substituted for a new one. + After this the transformer is applied to the root node + of the Item_cond object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_cond::compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t) +{ + if (!(this->*analyzer)(arg_p)) + return 0; + + List_iterator<Item> li(list); + Item *item; + while ((item= li++)) + { + /* + The same parameter value of arg_p must be passed + to analyze any argument of the condition formula. + */ + byte *arg_v= *arg_p; + Item *new_item= item->compile(analyzer, &arg_v, transformer, arg_t); + if (new_item && new_item != item) + li.replace(new_item); + } + return Item_func::transform(transformer, arg_t); +} + +void Item_cond::traverse_cond(Cond_traverser traverser, + void *arg, traverse_order order) +{ + List_iterator<Item> li(list); + Item *item; + + switch(order) { + case(PREFIX): + (*traverser)(this, arg); + while ((item= li++)) + { + item->traverse_cond(traverser, arg, order); + } + (*traverser)(NULL, arg); + break; + case(POSTFIX): + while ((item= li++)) + { + item->traverse_cond(traverser, arg, order); + } + (*traverser)(this, arg); + } +} + +/* Move SUM items out from item tree and replace with reference SYNOPSIS @@ -2234,7 +3104,7 @@ void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array, List_iterator<Item> li(list); Item *item; while ((item= li++)) - item->split_sum_func2(thd, ref_pointer_array, fields, li.ref()); + item->split_sum_func2(thd, ref_pointer_array, fields, li.ref(), TRUE); } @@ -2256,7 +3126,7 @@ void Item_cond::update_used_tables() { item->update_used_tables(); used_tables_cache|= item->used_tables(); - const_item_cache&= item->const_item(); + const_item_cache&= item->const_item(); } } @@ -2297,7 +3167,7 @@ void Item_cond::neg_arguments(THD *thd) /* - Evalution of AND(expr, expr, expr ...) + Evaluation of AND(expr, expr, expr ...) NOTES: abort_if_null is set for AND expressions for which we don't care if the @@ -2322,7 +3192,7 @@ longlong Item_cond_and::val_int() null_value= 0; while ((item=li++)) { - if (item->val_int() == 0) + if (!item->val_bool()) { if (abort_on_null || !(null_value= item->null_value)) return 0; // return FALSE @@ -2340,7 +3210,7 @@ longlong Item_cond_or::val_int() null_value=0; while ((item=li++)) { - if (item->val_int() != 0) + if (item->val_bool()) { null_value=0; return 1; @@ -2413,7 +3283,7 @@ longlong Item_is_not_null_test::val_int() if (!used_tables_cache && !with_subselect) { owner->was_null|= (!cached_value); - DBUG_PRINT("info", ("cached :%d", cached_value)); + DBUG_PRINT("info", ("cached :%ld", (long) cached_value)); DBUG_RETURN(cached_value); } if (args[0]->is_null()) @@ -2457,7 +3327,7 @@ void Item_func_isnotnull::print(String *str) { str->append('('); args[0]->print(str); - str->append(" is not null)", 13); + str->append(STRING_WITH_LEN(" is not null)")); } @@ -2507,17 +3377,17 @@ Item_func::optimize_type Item_func_like::select_optimize() const } -bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) +bool Item_func_like::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); - if (Item_bool_func2::fix_fields(thd, tlist, ref) || - escape_item->fix_fields(thd, tlist, &escape_item)) - return 1; + if (Item_bool_func2::fix_fields(thd, ref) || + escape_item->fix_fields(thd, &escape_item)) + return TRUE; if (!escape_item->const_during_execution()) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE"); - return 1; + return TRUE; } if (escape_item->const_item()) @@ -2526,6 +3396,15 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) String *escape_str= escape_item->val_str(&tmp_value1); if (escape_str) { + if (escape_used_in_parsing && ( + (((thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) && + escape_str->numchars() != 1) || + escape_str->numchars() > 1))) + { + my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE"); + return TRUE; + } + if (use_mb(cmp.cmp_collation.collation)) { CHARSET_INFO *cs= escape_str->charset(); @@ -2561,7 +3440,7 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) } else escape= '\\'; - + /* We could also do boyer-more for non-const items, but as we would have to recompute the tables for each row it's not worth it. @@ -2571,7 +3450,7 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) { String* res2 = args[1]->val_str(&tmp_value2); if (!res2) - return 0; // Null argument + return FALSE; // Null argument const size_t len = res2->length(); const char* first = res2->ptr(); @@ -2592,10 +3471,11 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) if (canDoTurboBM) { pattern = first + 1; - pattern_len = len - 2; + pattern_len = (int) len - 2; DBUG_PRINT("info", ("Initializing pattern: '%s'", first)); - int *suff = (int*) thd->alloc(sizeof(int)*((pattern_len + 1)*2+ - alphabet_size)); + int *suff = (int*) thd->alloc((int) (sizeof(int)* + ((pattern_len + 1)*2+ + alphabet_size))); bmGs = suff + pattern_len + 1; bmBc = bmGs + pattern_len + 1; turboBM_compute_good_suffix_shifts(suff); @@ -2604,7 +3484,7 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) } } } - return 0; + return FALSE; } void Item_func_like::cleanup() @@ -2616,20 +3496,20 @@ void Item_func_like::cleanup() #ifdef USE_REGEX bool -Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_func_regex::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); if ((!args[0]->fixed && - args[0]->fix_fields(thd, tables, args)) || args[0]->check_cols(1) || - (!args[1]->fixed && - args[1]->fix_fields(thd,tables, args + 1)) || args[1]->check_cols(1)) - return 1; /* purecov: inspected */ + args[0]->fix_fields(thd, args)) || args[0]->check_cols(1) || + (!args[1]->fixed && + args[1]->fix_fields(thd, args + 1)) || args[1]->check_cols(1)) + return TRUE; /* purecov: inspected */ with_sum_func=args[0]->with_sum_func || args[1]->with_sum_func; max_length= 1; decimals= 0; - if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV)) - return 1; + if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1)) + return TRUE; used_tables_cache=args[0]->used_tables() | args[1]->used_tables(); not_null_tables_cache= (args[0]->not_null_tables() | @@ -2643,7 +3523,7 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) if (args[1]->null_value) { // Will always return NULL maybe_null=1; - return 0; + return FALSE; } int error; if ((error= my_regcomp(&preg,res->c_ptr(), @@ -2654,8 +3534,8 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) cmp_collation.collation))) { (void) my_regerror(error,&preg,buff,sizeof(buff)); - my_printf_error(ER_REGEXP_ERROR,ER(ER_REGEXP_ERROR),MYF(0),buff); - return 1; + my_error(ER_REGEXP_ERROR, MYF(0), buff); + return TRUE; } regex_compiled=regex_is_const=1; maybe_null=args[0]->maybe_null; @@ -2663,7 +3543,7 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) else maybe_null=1; fixed= 1; - return 0; + return FALSE; } @@ -2698,7 +3578,7 @@ longlong Item_func_regex::val_int() my_regfree(&preg); regex_compiled=0; } - if (my_regcomp(&preg,res2->c_ptr(), + if (my_regcomp(&preg,res2->c_ptr_safe(), ((cmp_collation.collation->state & (MY_CS_BINSORT | MY_CS_CSSORT)) ? REG_EXTENDED | REG_NOSUB : @@ -2712,7 +3592,7 @@ longlong Item_func_regex::val_int() } } null_value=0; - return my_regexec(&preg,res->c_ptr(),0,(my_regmatch_t*) 0,0) ? 0 : 1; + return my_regexec(&preg,res->c_ptr_safe(),0,(my_regmatch_t*) 0,0) ? 0 : 1; } @@ -2995,7 +3875,7 @@ longlong Item_cond_xor::val_int() /* Apply NOT transformation to the item and return a new one. - SYNPOSIS + SYNOPSIS neg_transformer() thd thread handler @@ -3128,3 +4008,332 @@ Item *Item_bool_rowready_func2::negated_item() DBUG_ASSERT(0); return 0; } + +Item_equal::Item_equal(Item_field *f1, Item_field *f2) + : Item_bool_func(), const_item(0), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + fields.push_back(f1); + fields.push_back(f2); +} + +Item_equal::Item_equal(Item *c, Item_field *f) + : Item_bool_func(), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + fields.push_back(f); + const_item= c; +} + + +Item_equal::Item_equal(Item_equal *item_equal) + : Item_bool_func(), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + List_iterator_fast<Item_field> li(item_equal->fields); + Item_field *item; + while ((item= li++)) + { + fields.push_back(item); + } + const_item= item_equal->const_item; + cond_false= item_equal->cond_false; +} + +void Item_equal::add(Item *c) +{ + if (cond_false) + return; + if (!const_item) + { + const_item= c; + return; + } + Item_func_eq *func= new Item_func_eq(c, const_item); + func->set_cmp_func(); + func->quick_fix_field(); + if ((cond_false= !func->val_int())) + const_item_cache= 1; +} + +void Item_equal::add(Item_field *f) +{ + fields.push_back(f); +} + +uint Item_equal::members() +{ + return fields.elements; +} + + +/* + Check whether a field is referred in the multiple equality + + SYNOPSIS + contains() + field field whose occurrence is to be checked + + DESCRIPTION + The function checks whether field is occurred in the Item_equal object + + RETURN VALUES + 1 if nultiple equality contains a reference to field + 0 otherwise +*/ + +bool Item_equal::contains(Field *field) +{ + List_iterator_fast<Item_field> it(fields); + Item_field *item; + while ((item= it++)) + { + if (field->eq(item->field)) + return 1; + } + return 0; +} + + +/* + Join members of another Item_equal object + + SYNOPSIS + merge() + item multiple equality whose members are to be joined + + DESCRIPTION + The function actually merges two multiple equalities. + After this operation the Item_equal object additionally contains + the field items of another item of the type Item_equal. + If the optional constant items are not equal the cond_false flag is + set to 1. + + RETURN VALUES + none +*/ + +void Item_equal::merge(Item_equal *item) +{ + fields.concat(&item->fields); + Item *c= item->const_item; + if (c) + { + /* + The flag cond_false will be set to 1 after this, if + the multiple equality already contains a constant and its + value is not equal to the value of c. + */ + add(c); + } + cond_false|= item->cond_false; +} + + +/* + Order field items in multiple equality according to a sorting criteria + + SYNOPSIS + sort() + cmp function to compare field item + arg context extra parameter for the cmp function + + DESCRIPTION + The function perform ordering of the field items in the Item_equal + object according to the criteria determined by the cmp callback parameter. + If cmp(item_field1,item_field2,arg)<0 than item_field1 must be + placed after item_fiel2. + + IMPLEMENTATION + The function sorts field items by the exchange sort algorithm. + The list of field items is looked through and whenever two neighboring + members follow in a wrong order they are swapped. This is performed + again and again until we get all members in a right order. + + RETURN VALUES + None +*/ + +void Item_equal::sort(Item_field_cmpfunc cmp, void *arg) +{ + bool swap; + List_iterator<Item_field> it(fields); + do + { + Item_field *item1= it++; + Item_field **ref1= it.ref(); + Item_field *item2; + + swap= FALSE; + while ((item2= it++)) + { + Item_field **ref2= it.ref(); + if (cmp(item1, item2, arg) < 0) + { + Item_field *item= *ref1; + *ref1= *ref2; + *ref2= item; + swap= TRUE; + } + else + { + item1= item2; + ref1= ref2; + } + } + it.rewind(); + } while (swap); +} + + +/* + Check appearance of new constant items in the multiple equality object + + SYNOPSIS + update_const() + + DESCRIPTION + The function checks appearance of new constant items among + the members of multiple equalities. Each new constant item is + compared with the designated constant item if there is any in the + multiple equality. If there is none the first new constant item + becomes designated. + + RETURN VALUES + none +*/ + +void Item_equal::update_const() +{ + List_iterator<Item_field> it(fields); + Item *item; + while ((item= it++)) + { + if (item->const_item()) + { + it.remove(); + add(item); + } + } +} + +bool Item_equal::fix_fields(THD *thd, Item **ref) +{ + List_iterator_fast<Item_field> li(fields); + Item *item; + not_null_tables_cache= used_tables_cache= 0; + const_item_cache= 0; + while ((item= li++)) + { + table_map tmp_table_map; + used_tables_cache|= item->used_tables(); + tmp_table_map= item->not_null_tables(); + not_null_tables_cache|= tmp_table_map; + if (item->maybe_null) + maybe_null=1; + } + fix_length_and_dec(); + fixed= 1; + return 0; +} + +void Item_equal::update_used_tables() +{ + List_iterator_fast<Item_field> li(fields); + Item *item; + not_null_tables_cache= used_tables_cache= 0; + if ((const_item_cache= cond_false)) + return; + while ((item=li++)) + { + item->update_used_tables(); + used_tables_cache|= item->used_tables(); + const_item_cache&= item->const_item(); + } +} + +longlong Item_equal::val_int() +{ + Item_field *item_field; + if (cond_false) + return 0; + List_iterator_fast<Item_field> it(fields); + Item *item= const_item ? const_item : it++; + if ((null_value= item->null_value)) + return 0; + eval_item->store_value(item); + while ((item_field= it++)) + { + /* Skip fields of non-const tables. They haven't been read yet */ + if (item_field->field->table->const_table) + { + if ((null_value= item_field->null_value) || eval_item->cmp(item_field)) + return 0; + } + } + return 1; +} + +void Item_equal::fix_length_and_dec() +{ + Item *item= get_first(); + eval_item= cmp_item::get_comparator(item->result_type(), + item->collation.collation); +} + +bool Item_equal::walk(Item_processor processor, byte *arg) +{ + List_iterator_fast<Item_field> it(fields); + Item *item; + while ((item= it++)) + if (item->walk(processor, arg)) + return 1; + return Item_func::walk(processor, arg); +} + +Item *Item_equal::transform(Item_transformer transformer, byte *arg) +{ + DBUG_ASSERT(!current_thd->is_stmt_prepare()); + + List_iterator<Item_field> it(fields); + Item *item; + while ((item= it++)) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + + /* + THD::change_item_tree() should be called only if the tree was + really transformed, i.e. when a new item has been created. + Otherwise we'll be allocating a lot of unnecessary memory for + change records at each execution. + */ + if (new_item != item) + current_thd->change_item_tree((Item **) it.ref(), new_item); + } + return Item_func::transform(transformer, arg); +} + +void Item_equal::print(String *str) +{ + str->append(func_name()); + str->append('('); + List_iterator_fast<Item_field> it(fields); + Item *item; + if (const_item) + const_item->print(str); + else + { + item= it++; + item->print(str); + } + while ((item= it++)) + { + str->append(','); + str->append(' '); + item->print(str); + } + str->append(')'); +} + diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index a13be83e093..f6c6f612c5b 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -27,6 +26,8 @@ class Arg_comparator; typedef int (Arg_comparator::*arg_cmp_func)(); +typedef int (*Item_field_cmpfunc)(Item_field *f1, Item_field *f2, void *arg); + class Arg_comparator: public Sql_alloc { Item **a, **b; @@ -67,6 +68,7 @@ public: int compare_string(); // compare args[0] & args[1] int compare_binary_string(); // compare args[0] & args[1] int compare_real(); // compare args[0] & args[1] + int compare_decimal(); // compare args[0] & args[1] int compare_int_signed(); // compare args[0] & args[1] int compare_int_signed_unsigned(); int compare_int_unsigned_signed(); @@ -75,13 +77,14 @@ public: int compare_e_string(); // compare args[0] & args[1] int compare_e_binary_string(); // compare args[0] & args[1] int compare_e_real(); // compare args[0] & args[1] + int compare_e_decimal(); // compare args[0] & args[1] int compare_e_int(); // compare args[0] & args[1] int compare_e_int_diff_signedness(); int compare_e_row(); // compare args[0] & args[1] int compare_real_fixed(); int compare_e_real_fixed(); - static arg_cmp_func comparator_matrix [4][2]; + static arg_cmp_func comparator_matrix [5][2]; friend class Item_func; }; @@ -95,28 +98,134 @@ public: Item_bool_func(THD *thd, Item_bool_func *item) :Item_int_func(thd, item) {} bool is_bool_func() { return 1; } void fix_length_and_dec() { decimals=0; max_length=1; } + uint decimal_precision() const { return 1; } }; + +/** + Abstract Item class, to represent <code>X IS [NOT] (TRUE | FALSE)</code> + boolean predicates. +*/ + +class Item_func_truth : public Item_bool_func +{ +public: + virtual bool val_bool(); + virtual longlong val_int(); + virtual void fix_length_and_dec(); + virtual void print(String *str); + +protected: + Item_func_truth(Item *a, bool a_value, bool a_affirmative) + : Item_bool_func(a), value(a_value), affirmative(a_affirmative) + {} + + ~Item_func_truth() + {} +private: + /** + True for <code>X IS [NOT] TRUE</code>, + false for <code>X IS [NOT] FALSE</code> predicates. + */ + const bool value; + /** + True for <code>X IS Y</code>, false for <code>X IS NOT Y</code> predicates. + */ + const bool affirmative; +}; + + +/** + This Item represents a <code>X IS TRUE</code> boolean predicate. +*/ + +class Item_func_istrue : public Item_func_truth +{ +public: + Item_func_istrue(Item *a) : Item_func_truth(a, true, true) {} + ~Item_func_istrue() {} + virtual const char* func_name() const { return "istrue"; } +}; + + +/** + This Item represents a <code>X IS NOT TRUE</code> boolean predicate. +*/ + +class Item_func_isnottrue : public Item_func_truth +{ +public: + Item_func_isnottrue(Item *a) : Item_func_truth(a, true, false) {} + ~Item_func_isnottrue() {} + virtual const char* func_name() const { return "isnottrue"; } +}; + + +/** + This Item represents a <code>X IS FALSE</code> boolean predicate. +*/ + +class Item_func_isfalse : public Item_func_truth +{ +public: + Item_func_isfalse(Item *a) : Item_func_truth(a, false, true) {} + ~Item_func_isfalse() {} + virtual const char* func_name() const { return "isfalse"; } +}; + + +/** + This Item represents a <code>X IS NOT FALSE</code> boolean predicate. +*/ + +class Item_func_isnotfalse : public Item_func_truth +{ +public: + Item_func_isnotfalse(Item *a) : Item_func_truth(a, false, false) {} + ~Item_func_isnotfalse() {} + virtual const char* func_name() const { return "isnotfalse"; } +}; + + class Item_cache; +#define UNKNOWN ((my_bool)-1) + + +/* + Item_in_optimizer(left_expr, Item_in_subselect(...)) + + Item_in_optimizer is used to wrap an instance of Item_in_subselect. This + class does the following: + - Evaluate the left expression and store it in Item_cache_* object (to + avoid re-evaluating it many times during subquery execution) + - Shortcut the evaluation of "NULL IN (...)" to NULL in the cases where we + don't care if the result is NULL or FALSE. + + NOTE + It is not quite clear why the above listed functionality should be + placed into a separate class called 'Item_in_optimizer'. +*/ + class Item_in_optimizer: public Item_bool_func { protected: Item_cache *cache; bool save_cache; + /* + Stores the value of "NULL IN (SELECT ...)" for uncorrelated subqueries: + UNKNOWN - "NULL in (SELECT ...)" has not yet been evaluated + FALSE - result is FALSE + TRUE - result is NULL + */ + my_bool result_for_null_param; public: Item_in_optimizer(Item *a, Item_in_subselect *b): - Item_bool_func(a, my_reinterpret_cast(Item *)(b)), cache(0), save_cache(0) + Item_bool_func(a, my_reinterpret_cast(Item *)(b)), cache(0), + save_cache(0), result_for_null_param(UNKNOWN) {} - bool fix_fields(THD *, struct st_table_list *, Item **); - bool fix_left(THD *thd, struct st_table_list *tables, Item **ref); + bool fix_fields(THD *, Item **); + bool fix_left(THD *thd, Item **ref); bool is_null(); - /* - Item_in_optimizer item is special boolean function. On value request - (one of val, val_int or val_str methods) it evaluate left expression - of IN by storing it value in cache item (one of Item_cache* items), - then it test cache is it NULL. If left expression (cache) is NULL then - Item_in_optimizer return NULL, else it evaluate Item_in_subselect. - */ longlong val_int(); void cleanup(); const char *func_name() const { return "<in_optimizer>"; } @@ -127,8 +236,8 @@ public: class Comp_creator { public: - Comp_creator() {} /* Remove gcc warning */ - virtual ~Comp_creator() {} /* Remove gcc warning */ + Comp_creator() {} /* Remove gcc warning */ + virtual ~Comp_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const = 0; virtual const char* symbol(bool invert) const = 0; virtual bool eqne_op() const = 0; @@ -138,8 +247,8 @@ public: class Eq_creator :public Comp_creator { public: - Eq_creator() {} /* Remove gcc warning */ - virtual ~Eq_creator() {} /* Remove gcc warning */ + Eq_creator() {} /* Remove gcc warning */ + virtual ~Eq_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "<>" : "="; } virtual bool eqne_op() const { return 1; } @@ -149,8 +258,8 @@ public: class Ne_creator :public Comp_creator { public: - Ne_creator() {} /* Remove gcc warning */ - virtual ~Ne_creator() {} /* Remove gcc warning */ + Ne_creator() {} /* Remove gcc warning */ + virtual ~Ne_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "=" : "<>"; } virtual bool eqne_op() const { return 1; } @@ -160,8 +269,8 @@ public: class Gt_creator :public Comp_creator { public: - Gt_creator() {} /* Remove gcc warning */ - virtual ~Gt_creator() {} /* Remove gcc warning */ + Gt_creator() {} /* Remove gcc warning */ + virtual ~Gt_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "<=" : ">"; } virtual bool eqne_op() const { return 0; } @@ -171,8 +280,8 @@ public: class Lt_creator :public Comp_creator { public: - Lt_creator() {} /* Remove gcc warning */ - virtual ~Lt_creator() {} /* Remove gcc warning */ + Lt_creator() {} /* Remove gcc warning */ + virtual ~Lt_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? ">=" : "<"; } virtual bool eqne_op() const { return 0; } @@ -182,8 +291,8 @@ public: class Ge_creator :public Comp_creator { public: - Ge_creator() {} /* Remove gcc warning */ - virtual ~Ge_creator() {} /* Remove gcc warning */ + Ge_creator() {} /* Remove gcc warning */ + virtual ~Ge_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "<" : ">="; } virtual bool eqne_op() const { return 0; } @@ -193,8 +302,8 @@ public: class Le_creator :public Comp_creator { public: - Le_creator() {} /* Remove gcc warning */ - virtual ~Le_creator() {} /* Remove gcc warning */ + Le_creator() {} /* Remove gcc warning */ + virtual ~Le_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? ">" : "<="; } virtual bool eqne_op() const { return 0; } @@ -206,10 +315,11 @@ class Item_bool_func2 :public Item_int_func protected: Arg_comparator cmp; String tmp_value1,tmp_value2; + bool abort_on_null; public: Item_bool_func2(Item *a,Item *b) - :Item_int_func(a,b), cmp(tmp_arg, tmp_arg+1) {} + :Item_int_func(a,b), cmp(tmp_arg, tmp_arg+1), abort_on_null(FALSE) {} void fix_length_and_dec(); void set_cmp_func() { @@ -222,6 +332,8 @@ public: bool is_null() { return test(args[0]->is_null() || args[1]->is_null()); } bool is_bool_func() { return 1; } CHARSET_INFO *compare_collation() { return cmp.cmp_collation.collation; } + uint decimal_precision() const { return 1; } + void top_level_item() { abort_on_null=1; } friend class Arg_comparator; }; @@ -235,6 +347,7 @@ public: } Item *neg_transformer(THD *thd); virtual Item *negated_item(); + bool subst_argument_checker(byte **arg) { return TRUE; } }; class Item_func_not :public Item_bool_func @@ -245,12 +358,55 @@ public: enum Functype functype() const { return NOT_FUNC; } const char *func_name() const { return "not"; } Item *neg_transformer(THD *thd); + void print(String *str); }; class Item_maxmin_subselect; + +/* + trigcond<param>(arg) ::= param? arg : TRUE + + The class Item_func_trig_cond is used for guarded predicates + which are employed only for internal purposes. + A guarded predicate is an object consisting of an a regular or + a guarded predicate P and a pointer to a boolean guard variable g. + A guarded predicate P/g is evaluated to true if the value of the + guard g is false, otherwise it is evaluated to the same value that + the predicate P: val(P/g)= g ? val(P):true. + Guarded predicates allow us to include predicates into a conjunction + conditionally. Currently they are utilized for pushed down predicates + in queries with outer join operations. + + In the future, probably, it makes sense to extend this class to + the objects consisting of three elements: a predicate P, a pointer + to a variable g and a firing value s with following evaluation + rule: val(P/g,s)= g==s? val(P) : true. It will allow us to build only + one item for the objects of the form P/g1/g2... + + Objects of this class are built only for query execution after + the execution plan has been already selected. That's why this + class needs only val_int out of generic methods. + + Current uses of Item_func_trig_cond objects: + - To wrap selection conditions when executing outer joins + - To wrap condition that is pushed down into subquery +*/ + +class Item_func_trig_cond: public Item_bool_func +{ + bool *trig_var; +public: + Item_func_trig_cond(Item *a, bool *f) : Item_bool_func(a) { trig_var= f; } + longlong val_int() { return *trig_var ? args[0]->val_int() : 1; } + enum Functype functype() const { return TRIG_COND_FUNC; }; + const char *func_name() const { return "trigcond"; }; + bool const_item() const { return FALSE; } + bool *get_trig_var() { return trig_var; } +}; + class Item_func_not_all :public Item_func_not { - /* allow to check presence od values in max/min optimisation */ + /* allow to check presence of values in max/min optimization */ Item_sum_hybrid *test_sum_item; Item_maxmin_subselect *test_sub_item; @@ -405,6 +561,7 @@ public: negated= !negated; return this; } + bool subst_argument_checker(byte **arg) { return TRUE; } }; @@ -420,10 +577,12 @@ public: optimize_type select_optimize() const { return OPTIMIZE_KEY; } enum Functype functype() const { return BETWEEN; } const char *func_name() const { return "between"; } - bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_fields(THD *, Item **); void fix_length_and_dec(); void print(String *str); + bool is_bool_func() { return 1; } CHARSET_INFO *compare_collation() { return cmp_collation.collation; } + uint decimal_precision() const { return 1; } }; @@ -434,13 +593,22 @@ public: longlong val_int(); optimize_type select_optimize() const { return OPTIMIZE_NONE; } const char *func_name() const { return "strcmp"; } + void print(String *str) { Item_func::print(str); } }; +struct interval_range +{ + Item_result type; + double dbl; + my_decimal dec; +}; + class Item_func_interval :public Item_int_func { Item_row *row; - double *intervals; + my_bool use_decimal_comparison; + interval_range *intervals; public: Item_func_interval(Item_row *a) :Item_int_func(a),row(a),intervals(0) @@ -450,27 +618,44 @@ public: longlong val_int(); void fix_length_and_dec(); const char *func_name() const { return "interval"; } + uint decimal_precision() const { return 2; } }; -class Item_func_ifnull :public Item_func +class Item_func_coalesce :public Item_func_numhybrid { - enum Item_result cached_result_type; +protected: + Item_func_coalesce(Item *a, Item *b) :Item_func_numhybrid(a, b) {} +public: + Item_func_coalesce(List<Item> &list) :Item_func_numhybrid(list) {} + double real_op(); + longlong int_op(); + String *str_op(String *); + my_decimal *decimal_op(my_decimal *); + void fix_length_and_dec(); + void find_num_type() {} + enum Item_result result_type () const { return hybrid_type; } + const char *func_name() const { return "coalesce"; } + table_map not_null_tables() const { return 0; } +}; + + +class Item_func_ifnull :public Item_func_coalesce +{ +protected: enum_field_types cached_field_type; bool field_type_defined; public: - Item_func_ifnull(Item *a,Item *b) - :Item_func(a,b), cached_result_type(INT_RESULT) - {} - double val(); - longlong val_int(); - String *val_str(String *str); - enum Item_result result_type () const { return cached_result_type; } + Item_func_ifnull(Item *a, Item *b) :Item_func_coalesce(a,b) {} + double real_op(); + longlong int_op(); + String *str_op(String *str); + my_decimal *decimal_op(my_decimal *); enum_field_types field_type() const; void fix_length_and_dec(); const char *func_name() const { return "ifnull"; } Field *tmp_table_field(TABLE *table); - table_map not_null_tables() const { return 0; } + uint decimal_precision() const; }; @@ -481,12 +666,14 @@ public: Item_func_if(Item *a,Item *b,Item *c) :Item_func(a,b,c), cached_result_type(INT_RESULT) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); + my_decimal *val_decimal(my_decimal *); enum Item_result result_type () const { return cached_result_type; } - bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_fields(THD *, Item **); void fix_length_and_dec(); + uint decimal_precision() const; const char *func_name() const { return "if"; } }; @@ -498,11 +685,13 @@ public: Item_func_nullif(Item *a,Item *b) :Item_bool_func2(a,b), cached_result_type(INT_RESULT) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); + my_decimal *val_decimal(my_decimal *); enum Item_result result_type () const { return cached_result_type; } void fix_length_and_dec(); + uint decimal_precision() const { return args[0]->decimal_precision(); } const char *func_name() const { return "nullif"; } void print(String *str) { Item_func::print(str); } table_map not_null_tables() const { return 0; } @@ -510,23 +699,6 @@ public: }; -class Item_func_coalesce :public Item_func -{ - enum Item_result cached_result_type; -public: - Item_func_coalesce(List<Item> &list) - :Item_func(list),cached_result_type(INT_RESULT) - {} - double val(); - longlong val_int(); - String *val_str(String *); - void fix_length_and_dec(); - enum Item_result result_type () const { return cached_result_type; } - const char *func_name() const { return "coalesce"; } - table_map not_null_tables() const { return 0; } -}; - - class Item_func_case :public Item_func { int first_expr_num, else_expr_num; @@ -539,7 +711,7 @@ public: Item_func_case(List<Item> &list, Item *first_expr_arg, Item *else_expr_arg) :Item_func(), first_expr_num(-1), else_expr_num(-1), cached_result_type(INT_RESULT) - { + { ncases= list.elements; if (first_expr_arg) { @@ -553,10 +725,13 @@ public: } set_arguments(list); } - double val(); + double val_real(); longlong val_int(); String *val_str(String *); + my_decimal *val_decimal(my_decimal *); + bool fix_fields(THD *thd, Item **ref); void fix_length_and_dec(); + uint decimal_precision() const; table_map not_null_tables() const { return 0; } enum Item_result result_type () const { return cached_result_type; } const char *func_name() const { return "case"; } @@ -568,15 +743,17 @@ public: /* Functions to handle the optimized IN */ + +/* A vector of values of some type */ + class in_vector :public Sql_alloc { - protected: +public: char *base; uint size; qsort2_cmp compare; CHARSET_INFO *collation; uint count; -public: uint used_count; in_vector() {} in_vector(uint elements,uint element_length,qsort2_cmp cmp_func, @@ -592,26 +769,88 @@ public: qsort2(base,used_count,size,compare,collation); } int find(Item *item); + + /* + Create an instance of Item_{type} (e.g. Item_decimal) constant object + which type allows it to hold an element of this vector without any + conversions. + The purpose of this function is to be able to get elements of this + vector in form of Item_xxx constants without creating Item_xxx object + for every array element you get (i.e. this implements "FlyWeight" pattern) + */ + virtual Item* create_item() { return NULL; } + + /* + Store the value at position #pos into provided item object + SYNOPSIS + value_to_item() + pos Index of value to store + item Constant item to store value into. The item must be of the same + type that create_item() returns. + */ + virtual void value_to_item(uint pos, Item *item) { } + + /* Compare values number pos1 and pos2 for equality */ + bool compare_elems(uint pos1, uint pos2) + { + return test(compare(collation, base + pos1*size, base + pos2*size)); + } }; class in_string :public in_vector { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String tmp; public: in_string(uint elements,qsort2_cmp cmp_func, CHARSET_INFO *cs); ~in_string(); void set(uint pos,Item *item); byte *get_value(Item *item); + Item* create_item() + { + return new Item_string(collation); + } + void value_to_item(uint pos, Item *item) + { + String *str=((String*) base)+pos; + Item_string *to= (Item_string*)item; + to->str_value= *str; + } }; class in_longlong :public in_vector { - longlong tmp; + /* + Here we declare a temporary variable (tmp) of the same type as the + elements of this vector. tmp is used in finding if a given value is in + the list. + */ + struct packed_longlong + { + longlong val; + longlong unsigned_flag; // Use longlong, not bool, to preserve alignment + } tmp; public: in_longlong(uint elements); void set(uint pos,Item *item); byte *get_value(Item *item); + + Item* create_item() + { + /* + We're created a signed INT, this may not be correct in + general case (see BUG#19342). + */ + return new Item_int((longlong)0); + } + void value_to_item(uint pos, Item *item) + { + ((Item_int*) item)->value= ((packed_longlong*) base)[pos].val; + ((Item_int*) item)->unsigned_flag= (my_bool) + ((packed_longlong*) base)[pos].unsigned_flag; + } + + friend int cmp_longlong(void *cmp_arg, packed_longlong *a,packed_longlong *b); }; class in_double :public in_vector @@ -621,8 +860,37 @@ public: in_double(uint elements); void set(uint pos,Item *item); byte *get_value(Item *item); + Item *create_item() + { + return new Item_float(0.0); + } + void value_to_item(uint pos, Item *item) + { + ((Item_float*)item)->value= ((double*) base)[pos]; + } + +}; + +class in_decimal :public in_vector +{ + my_decimal val; +public: + in_decimal(uint elements); + void set(uint pos, Item *item); + byte *get_value(Item *item); + Item *create_item() + { + return new Item_decimal(0, FALSE); + } + void value_to_item(uint pos, Item *item) + { + my_decimal *dec= ((my_decimal *)base) + pos; + Item_decimal *item_dec= (Item_decimal*)item; + item_dec->set_decimal_value(dec); + } }; + /* ** Classes for easy comparing of non const items */ @@ -637,7 +905,7 @@ public: virtual int cmp(Item *item)= 0; // for optimized IN with row virtual int compare(cmp_item *item)= 0; - static cmp_item* get_comparator(Item *); + static cmp_item* get_comparator(Item_result type, CHARSET_INFO *cs); virtual cmp_item *make_same()= 0; virtual void store_value_by_template(cmp_item *tmpl, Item *item) { @@ -658,7 +926,7 @@ public: class cmp_item_sort_string :public cmp_item_string { protected: - char value_buff[80]; + char value_buff[STRING_BUFFER_USUAL_SIZE]; String value; public: cmp_item_sort_string(CHARSET_INFO *cs): @@ -670,16 +938,16 @@ public: } int cmp(Item *arg) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String tmp(buff, sizeof(buff), cmp_charset), *res; - if (!(res= arg->val_str(&tmp))) - return 1; /* Can't be right */ - return sortcmp(value_res, res, cmp_charset); + res= arg->val_str(&tmp); + return (value_res ? (res ? sortcmp(value_res, res, cmp_charset) : 1) : + (res ? -1 : 0)); } - int compare(cmp_item *c) + int compare(cmp_item *ci) { - cmp_item_string *cmp= (cmp_item_string *)c; - return sortcmp(value_res, cmp->value_res, cmp_charset); + cmp_item_string *l_cmp= (cmp_item_string *) ci; + return sortcmp(value_res, l_cmp->value_res, cmp_charset); } cmp_item *make_same(); }; @@ -688,6 +956,7 @@ class cmp_item_int :public cmp_item { longlong value; public: + cmp_item_int() {} /* Remove gcc warning */ void store_value(Item *item) { value= item->val_int(); @@ -696,10 +965,10 @@ public: { return value != arg->val_int(); } - int compare(cmp_item *c) + int compare(cmp_item *ci) { - cmp_item_int *cmp= (cmp_item_int *)c; - return (value < cmp->value) ? -1 : ((value == cmp->value) ? 0 : 1); + cmp_item_int *l_cmp= (cmp_item_int *)ci; + return (value < l_cmp->value) ? -1 : ((value == l_cmp->value) ? 0 : 1); } cmp_item *make_same(); }; @@ -708,22 +977,36 @@ class cmp_item_real :public cmp_item { double value; public: + cmp_item_real() {} /* Remove gcc warning */ void store_value(Item *item) { - value= item->val(); + value= item->val_real(); } int cmp(Item *arg) { - return value != arg->val(); + return value != arg->val_real(); } - int compare(cmp_item *c) + int compare(cmp_item *ci) { - cmp_item_real *cmp= (cmp_item_real *)c; - return (value < cmp->value)? -1 : ((value == cmp->value) ? 0 : 1); + cmp_item_real *l_cmp= (cmp_item_real *) ci; + return (value < l_cmp->value)? -1 : ((value == l_cmp->value) ? 0 : 1); } cmp_item *make_same(); }; + +class cmp_item_decimal :public cmp_item +{ + my_decimal value; +public: + cmp_item_decimal() {} /* Remove gcc warning */ + void store_value(Item *item); + int cmp(Item *arg); + int compare(cmp_item *c); + cmp_item *make_same(); +}; + + class cmp_item_row :public cmp_item { cmp_item **comparators; @@ -771,10 +1054,10 @@ public: DBUG_ASSERT(0); return 1; } - int compare(cmp_item *c) + int compare(cmp_item *ci) { - cmp_item_string *cmp= (cmp_item_string *)c; - return sortcmp(value_res, cmp->value_res, cmp_charset); + cmp_item_string *l_cmp= (cmp_item_string *) ci; + return sortcmp(value_res, l_cmp->value_res, cmp_charset); } cmp_item *make_same() { @@ -784,20 +1067,26 @@ public: class Item_func_in :public Item_func_opt_neg { +public: Item_result cmp_type; + /* + an array of values when the right hand arguments of IN + are all SQL constant and there are no nulls + */ in_vector *array; cmp_item *in_item; bool have_null; DTCollation cmp_collation; - public: + Item_func_in(List<Item> &list) :Item_func_opt_neg(list), array(0), in_item(0), have_null(0) { allowed_arg_cols= 0; // Fetch this value from first argument } longlong val_int(); - bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_fields(THD *, Item **); void fix_length_and_dec(); + uint decimal_precision() const { return 1; } void cleanup() { DBUG_ENTER("Item_func_in::cleanup"); @@ -809,7 +1098,7 @@ class Item_func_in :public Item_func_opt_neg DBUG_VOID_RETURN; } optimize_type select_optimize() const - { return array ? OPTIMIZE_KEY : OPTIMIZE_NONE; } + { return OPTIMIZE_KEY; } void print(String *str); enum Functype functype() const { return IN_FUNC; } const char *func_name() const { return " IN "; } @@ -847,7 +1136,7 @@ public: { args[0]->update_used_tables(); if ((const_item_cache= !(used_tables_cache= args[0]->used_tables())) && - !with_subselect) + !with_subselect) { /* Remember if the value is always NULL or never NULL */ cached_value= (longlong) args[0]->is_null(); @@ -863,6 +1152,11 @@ public: /* Functions used by HAVING for rewriting IN subquery */ class Item_in_subselect; + +/* + This is like IS NOT NULL but it also remembers if it ever has + encountered a NULL. +*/ class Item_is_not_null_test :public Item_func_isnull { Item_in_subselect* owner; @@ -884,8 +1178,9 @@ public: class Item_func_isnotnull :public Item_bool_func { + bool abort_on_null; public: - Item_func_isnotnull(Item *a) :Item_bool_func(a) {} + Item_func_isnotnull(Item *a) :Item_bool_func(a), abort_on_null(0) {} longlong val_int(); enum Functype functype() const { return ISNOTNULL_FUNC; } void fix_length_and_dec() @@ -894,10 +1189,12 @@ public: } const char *func_name() const { return "isnotnull"; } optimize_type select_optimize() const { return OPTIMIZE_NULL; } - table_map not_null_tables() const { return 0; } + table_map not_null_tables() const + { return abort_on_null ? not_null_tables_cache : 0; } Item *neg_transformer(THD *thd); void print(String *str); CHARSET_INFO *compare_collation() { return args[0]->collation.collation; } + void top_level_item() { abort_on_null=1; } }; @@ -919,19 +1216,22 @@ class Item_func_like :public Item_bool_func2 enum { alphabet_size = 256 }; Item *escape_item; + + bool escape_used_in_parsing; public: int escape; - Item_func_like(Item *a,Item *b, Item *escape_arg) + Item_func_like(Item *a,Item *b, Item *escape_arg, bool escape_used) :Item_bool_func2(a,b), canDoTurboBM(FALSE), pattern(0), pattern_len(0), - bmGs(0), bmBc(0), escape_item(escape_arg) {} + bmGs(0), bmBc(0), escape_item(escape_arg), + escape_used_in_parsing(escape_used) {} longlong val_int(); enum Functype functype() const { return LIKE_FUNC; } optimize_type select_optimize() const; cond_result eq_cmp_result() const { return COND_TRUE; } const char *func_name() const { return "like"; } - bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref); + bool fix_fields(THD *thd, Item **ref); void cleanup(); }; @@ -951,7 +1251,7 @@ public: regex_compiled(0),regex_is_const(0) {} void cleanup(); longlong val_int(); - bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref); + bool fix_fields(THD *thd, Item **ref); const char *func_name() const { return "regexp"; } void print(String *str) { print_op(str); } CHARSET_INFO *compare_collation() { return cmp_collation.collation; } @@ -984,7 +1284,7 @@ public: /* Item_cond() is only used to create top level items */ Item_cond(): Item_bool_func(), abort_on_null(1) { const_item_cache=0; } - Item_cond(Item *i1,Item *i2) + Item_cond(Item *i1,Item *i2) :Item_bool_func(), abort_on_null(0) { list.push_back(i1); @@ -994,7 +1294,8 @@ public: Item_cond(List<Item> &nlist) :Item_bool_func(), list(nlist), abort_on_null(0) {} bool add(Item *item) { return list.push_back(item); } - bool fix_fields(THD *, struct st_table_list *, Item **ref); + void add_at_head(List<Item> *nlist) { list.prepand(nlist); } + bool fix_fields(THD *, Item **ref); enum Type type() const { return COND_ITEM; } List<Item>* argument_list() { return &list; } @@ -1002,21 +1303,175 @@ public: void update_used_tables(); void print(String *str); void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); - friend int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds); + friend int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, + COND **conds); void top_level_item() { abort_on_null=1; } void copy_andor_arguments(THD *thd, Item_cond *item); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); + void traverse_cond(Cond_traverser, void *arg, traverse_order order); void neg_arguments(THD *thd); + bool subst_argument_checker(byte **arg) { return TRUE; } + Item *compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t); }; +/* + The class Item_equal is used to represent conjunctions of equality + predicates of the form field1 = field2, and field=const in where + conditions and on expressions. + + All equality predicates of the form field1=field2 contained in a + conjunction are substituted for a sequence of items of this class. + An item of this class Item_equal(f1,f2,...fk) represents a + multiple equality f1=f2=...=fk. + + If a conjunction contains predicates f1=f2 and f2=f3, a new item of + this class is created Item_equal(f1,f2,f3) representing the multiple + equality f1=f2=f3 that substitutes the above equality predicates in + the conjunction. + A conjunction of the predicates f2=f1 and f3=f1 and f3=f2 will be + substituted for the item representing the same multiple equality + f1=f2=f3. + An item Item_equal(f1,f2) can appear instead of a conjunction of + f2=f1 and f1=f2, or instead of just the predicate f1=f2. + + An item of the class Item_equal inherits equalities from outer + conjunctive levels. + + Suppose we have a where condition of the following form: + WHERE f1=f2 AND f3=f4 AND f3=f5 AND ... AND (...OR (f1=f3 AND ...)). + In this case: + f1=f2 will be substituted for Item_equal(f1,f2); + f3=f4 and f3=f5 will be substituted for Item_equal(f3,f4,f5); + f1=f3 will be substituted for Item_equal(f1,f2,f3,f4,f5); + + An object of the class Item_equal can contain an optional constant + item c. Then it represents a multiple equality of the form + c=f1=...=fk. + + Objects of the class Item_equal are used for the following: + + 1. An object Item_equal(t1.f1,...,tk.fk) allows us to consider any + pair of tables ti and tj as joined by an equi-condition. + Thus it provide us with additional access paths from table to table. + + 2. An object Item_equal(t1.f1,...,tk.fk) is applied to deduce new + SARGable predicates: + f1=...=fk AND P(fi) => f1=...=fk AND P(fi) AND P(fj). + It also can give us additional index scans and can allow us to + improve selectivity estimates. + + 3. An object Item_equal(t1.f1,...,tk.fk) is used to optimize the + selected execution plan for the query: if table ti is accessed + before the table tj then in any predicate P in the where condition + the occurrence of tj.fj is substituted for ti.fi. This can allow + an evaluation of the predicate at an earlier step. + + When feature 1 is supported they say that join transitive closure + is employed. + When feature 2 is supported they say that search argument transitive + closure is employed. + Both features are usually supported by preprocessing original query and + adding additional predicates. + We do not just add predicates, we rather dynamically replace some + predicates that can not be used to access tables in the investigated + plan for those, obtained by substitution of some fields for equal fields, + that can be used. + + Prepared Statements/Stored Procedures note: instances of class + Item_equal are created only at the time a PS/SP is executed and + are deleted in the end of execution. All changes made to these + objects need not be registered in the list of changes of the parse + tree and do not harm PS/SP re-execution. + + Item equal objects are employed only at the optimize phase. Usually they are + not supposed to be evaluated. Yet in some cases we call the method val_int() + for them. We have to take care of restricting the predicate such an + object represents f1=f2= ...=fn to the projection of known fields fi1=...=fik. +*/ + +class Item_equal: public Item_bool_func +{ + List<Item_field> fields; /* list of equal field items */ + Item *const_item; /* optional constant item equal to fields items */ + cmp_item *eval_item; + bool cond_false; +public: + inline Item_equal() + : Item_bool_func(), const_item(0), eval_item(0), cond_false(0) + { const_item_cache=0 ;} + Item_equal(Item_field *f1, Item_field *f2); + Item_equal(Item *c, Item_field *f); + Item_equal(Item_equal *item_equal); + inline Item* get_const() { return const_item; } + void add(Item *c); + void add(Item_field *f); + uint members(); + bool contains(Field *field); + Item_field* get_first() { return fields.head(); } + void merge(Item_equal *item); + void update_const(); + enum Functype functype() const { return MULT_EQUAL_FUNC; } + longlong val_int(); + const char *func_name() const { return "multiple equal"; } + optimize_type select_optimize() const { return OPTIMIZE_EQUAL; } + void sort(Item_field_cmpfunc cmp, void *arg); + friend class Item_equal_iterator; + void fix_length_and_dec(); + bool fix_fields(THD *thd, Item **ref); + void update_used_tables(); + bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); + void print(String *str); + CHARSET_INFO *compare_collation() + { return fields.head()->collation.collation; } +}; + +class COND_EQUAL: public Sql_alloc +{ +public: + uint max_members; /* max number of members the current level + list and all lower level lists */ + COND_EQUAL *upper_levels; /* multiple equalities of upper and levels */ + List<Item_equal> current_level; /* list of multiple equalities of + the current and level */ + COND_EQUAL() + { + max_members= 0; + upper_levels= 0; + } +}; + + +class Item_equal_iterator : public List_iterator_fast<Item_field> +{ +public: + inline Item_equal_iterator(Item_equal &item_equal) + :List_iterator_fast<Item_field> (item_equal.fields) + {} + inline Item_field* operator++(int) + { + Item_field *item= (*(List_iterator_fast<Item_field> *) this)++; + return item; + } + inline void rewind(void) + { + List_iterator_fast<Item_field>::rewind(); + } +}; + class Item_cond_and :public Item_cond { public: + COND_EQUAL cond_equal; /* contains list of Item_equal objects for + the current and level and reference + to multiple equalities of upper and levels */ Item_cond_and() :Item_cond() {} Item_cond_and(Item *i1,Item *i2) :Item_cond(i1,i2) {} Item_cond_and(THD *thd, Item_cond_and *item) :Item_cond(thd, item) {} - Item_cond_and(List<Item> &list): Item_cond(list) {} + Item_cond_and(List<Item> &list_arg): Item_cond(list_arg) {} enum Functype functype() const { return COND_AND_FUNC; } longlong val_int(); const char *func_name() const { return "and"; } @@ -1038,7 +1493,7 @@ public: Item_cond_or() :Item_cond() {} Item_cond_or(Item *i1,Item *i2) :Item_cond(i1,i2) {} Item_cond_or(THD *thd, Item_cond_or *item) :Item_cond(thd, item) {} - Item_cond_or(List<Item> &list): Item_cond(list) {} + Item_cond_or(List<Item> &list_arg): Item_cond(list_arg) {} enum Functype functype() const { return COND_OR_FUNC; } longlong val_int(); const char *func_name() const { return "or"; } @@ -1055,7 +1510,7 @@ public: /* - XOR is Item_cond, not an Item_int_func bevause we could like to + XOR is Item_cond, not an Item_int_func because we could like to optimize (a XOR b) later on. It's low prio, though */ @@ -1073,7 +1528,7 @@ public: }; -/* Some usefull inline functions */ +/* Some useful inline functions */ inline Item *and_conds(Item *a, Item *b) { diff --git a/sql/item_create.cc b/sql/item_create.cc index 2b12a1310b9..c1a81da0285 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -18,10 +17,6 @@ #include "mysql_priv.h" -#ifndef M_PI -#define M_PI 3.14159265358979323846 -#endif - Item *create_func_abs(Item* a) { return new Item_func_abs(a); @@ -75,14 +70,9 @@ Item *create_func_ceiling(Item* a) Item *create_func_connection_id(void) { - THD *thd=current_thd; - thd->lex->safe_to_cache_query= 0; - return new Item_int(NullS,(longlong) - ((thd->slave_thread) ? - thd->variables.pseudo_thread_id : - thd->thread_id), - 10); -} + current_thd->lex->safe_to_cache_query= 0; + return new Item_func_connection_id(); +} Item *create_func_conv(Item* a, Item *b, Item *c) { @@ -112,7 +102,7 @@ Item *create_func_dayofmonth(Item* a) Item *create_func_dayofweek(Item* a) { - return new Item_func_weekday(new Item_func_to_days(a),1); + return new Item_func_weekday(a, 1); } Item *create_func_dayofyear(Item* a) @@ -122,7 +112,7 @@ Item *create_func_dayofyear(Item* a) Item *create_func_dayname(Item* a) { - return new Item_func_dayname(new Item_func_to_days(a)); + return new Item_func_dayname(a); } Item *create_func_degrees(Item *a) @@ -264,6 +254,11 @@ Item *create_func_mod(Item* a, Item *b) return new Item_func_mod(a,b); } +Item *create_func_name_const(Item *a, Item *b) +{ + return new Item_name_const(a,b); +} + Item *create_func_monthname(Item* a) { return new Item_func_monthname(a); @@ -292,7 +287,7 @@ Item *create_func_period_diff(Item* a, Item *b) Item *create_func_pi(void) { - return new Item_real("pi()",M_PI,6,8); + return new Item_static_float_func("pi()", M_PI, 6, 8); } Item *create_func_pow(Item* a, Item *b) @@ -300,24 +295,6 @@ Item *create_func_pow(Item* a, Item *b) return new Item_func_pow(a,b); } -Item *create_func_current_user() -{ - THD *thd=current_thd; - char buff[HOSTNAME_LENGTH+USERNAME_LENGTH+2]; - uint length; - - thd->lex->safe_to_cache_query= 0; - length= (uint) (strxmov(buff, thd->priv_user, "@", thd->priv_host, NullS) - - buff); - return new Item_string(NullS, thd->memdup(buff, length), length, - system_charset_info); -} - -Item *create_func_quarter(Item* a) -{ - return new Item_func_quarter(a); -} - Item *create_func_radians(Item *a) { return new Item_func_units((char*) "radians",a,M_PI/180,0.0); @@ -369,6 +346,12 @@ Item *create_func_sha(Item* a) return new Item_func_sha(a); } +Item *create_func_sleep(Item* a) +{ + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + return new Item_func_sleep(a); +} + Item *create_func_space(Item *a) { CHARSET_INFO *cs= current_thd->variables.collation_connection; @@ -440,14 +423,14 @@ Item *create_func_uuid(void) Item *create_func_version(void) { - return new Item_string(NullS,server_version, + return new Item_static_string_func("version()", server_version, (uint) strlen(server_version), system_charset_info, DERIVATION_SYSCONST); } Item *create_func_weekday(Item* a) { - return new Item_func_weekday(new Item_func_to_days(a),0); + return new Item_func_weekday(a, 0); } Item *create_func_year(Item* a) @@ -462,10 +445,11 @@ Item *create_load_file(Item* a) } -Item *create_func_cast(Item *a, Cast_target cast_type, int len, +Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec, CHARSET_INFO *cs) { Item *res; + int tmp_len; LINT_INIT(res); switch (cast_type) { @@ -475,6 +459,15 @@ Item *create_func_cast(Item *a, Cast_target cast_type, int len, case ITEM_CAST_DATE: res= new Item_date_typecast(a); break; case ITEM_CAST_TIME: res= new Item_time_typecast(a); break; case ITEM_CAST_DATETIME: res= new Item_datetime_typecast(a); break; + case ITEM_CAST_DECIMAL: + tmp_len= (len>0) ? len : 10; + if (tmp_len < dec) + { + my_error(ER_M_BIGGER_THAN_D, MYF(0), ""); + return 0; + } + res= new Item_decimal_typecast(a, tmp_len, dec ? dec : 2); + break; case ITEM_CAST_CHAR: res= new Item_char_typecast(a, len, cs ? cs : current_thd->variables.collation_connection); diff --git a/sql/item_create.h b/sql/item_create.h index faff6f45220..2ff849263c6 100644 --- a/sql/item_create.h +++ b/sql/item_create.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -28,7 +27,8 @@ Item *create_func_bit_length(Item* a); Item *create_func_coercibility(Item* a); Item *create_func_ceiling(Item* a); Item *create_func_char_length(Item* a); -Item *create_func_cast(Item *a, Cast_target cast_type, int len, CHARSET_INFO *cs); +Item *create_func_cast(Item *a, Cast_target cast_type, int len, int dec, + CHARSET_INFO *cs); Item *create_func_connection_id(void); Item *create_func_conv(Item* a, Item *b, Item *c); Item *create_func_cos(Item* a); @@ -64,6 +64,7 @@ Item *create_func_ltrim(Item* a); Item *create_func_md5(Item* a); Item *create_func_mod(Item* a, Item *b); Item *create_func_monthname(Item* a); +Item *create_func_name_const(Item *a, Item *b); Item *create_func_nullif(Item* a, Item *b); Item *create_func_oct(Item *); Item *create_func_ord(Item* a); @@ -71,8 +72,6 @@ Item *create_func_period_add(Item* a, Item *b); Item *create_func_period_diff(Item* a, Item *b); Item *create_func_pi(void); Item *create_func_pow(Item* a, Item *b); -Item *create_func_current_user(void); -Item *create_func_quarter(Item* a); Item *create_func_radians(Item *a); Item *create_func_release_lock(Item* a); Item *create_func_repeat(Item* a, Item *b); @@ -83,6 +82,7 @@ Item *create_func_sec_to_time(Item* a); Item *create_func_sign(Item* a); Item *create_func_sin(Item* a); Item *create_func_sha(Item* a); +Item *create_func_sleep(Item* a); Item *create_func_soundex(Item* a); Item *create_func_space(Item *); Item *create_func_sqrt(Item* a); diff --git a/sql/item_func.cc b/sql/item_func.cc index 88c3dfcdfc0..3d92be5e9d2 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -28,6 +27,14 @@ #include <time.h> #include <ft_global.h> +#include "sp_head.h" +#include "sp_rcontext.h" +#include "sp.h" + +#ifdef NO_EMBEDDED_ACCESS_CHECKS +#define sp_restore_security_context(A,B) while (0) {} +#endif + bool check_reserved_words(LEX_STRING *name) { @@ -52,15 +59,16 @@ void Item_func::set_arguments(List<Item> &list) { allowed_arg_cols= 1; arg_count=list.elements; - if ((args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) + args= tmp_arg; // If 2 arguments + if (arg_count <= 2 || (args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) { - uint i=0; List_iterator_fast<Item> li(list); Item *item; + Item **save_args= args; while ((item=li++)) { - args[i++]= item; + *(save_args++)= item; with_sum_func|=item->with_sum_func; } } @@ -101,7 +109,6 @@ Item_func::Item_func(THD *thd, Item_func *item) SYNOPSIS: fix_fields() thd Thread object - tables List of all open tables involved in the query ref Pointer to where this object is used. This reference is used if we want to replace this object with another one (for example in the summary functions). @@ -125,12 +132,12 @@ Item_func::Item_func(THD *thd, Item_func *item) item. RETURN VALUES - 0 ok - 1 Got error. Stored with my_error(). + FALSE ok + TRUE Got error. Stored with my_error(). */ bool -Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_func::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); Item **arg,**arg_end; @@ -141,8 +148,8 @@ Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) used_tables_cache= not_null_tables_cache= 0; const_item_cache=1; - if (check_stack_overrun(thd, buff)) - return 1; // Fatal error if flag is set! + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + return TRUE; // Fatal error if flag is set! if (arg_count) { // Print purify happy for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++) @@ -152,9 +159,8 @@ Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) We can't yet set item to *arg as fix_fields may change *arg We shouldn't call fix_fields() twice, so check 'fixed' field first */ - if ((!(*arg)->fixed && (*arg)->fix_fields(thd, tables, arg))) - return 1; /* purecov: inspected */ - + if ((!(*arg)->fixed && (*arg)->fix_fields(thd, arg))) + return TRUE; /* purecov: inspected */ item= *arg; if (allowed_arg_cols) @@ -181,10 +187,10 @@ Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) } } fix_length_and_dec(); - if (thd->net.last_errno) // An error inside fix_length_and_dec occured - return 1; + if (thd->net.report_error) // An error inside fix_length_and_dec occured + return TRUE; fixed= 1; - return 0; + return FALSE; } bool Item_func::walk (Item_processor processor, byte *argument) @@ -201,6 +207,128 @@ bool Item_func::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } +void Item_func::traverse_cond(Cond_traverser traverser, + void *argument, traverse_order order) +{ + if (arg_count) + { + Item **arg,**arg_end; + + switch (order) { + case(PREFIX): + (*traverser)(this, argument); + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + (*arg)->traverse_cond(traverser, argument, order); + } + break; + case (POSTFIX): + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + (*arg)->traverse_cond(traverser, argument, order); + } + (*traverser)(this, argument); + } + } +} + + +/* + Transform an Item_func object with a transformer callback function + + SYNOPSIS + transform() + transformer the transformer callback function to be applied to the nodes + of the tree of the object + argument parameter to be passed to the transformer + + DESCRIPTION + The function recursively applies the transform method to each + argument of the Item_func node. + If the call of the method for an argument item returns a new item + the old item is substituted for a new one. + After this the transformer is applied to the root node + of the Item_func object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_func::transform(Item_transformer transformer, byte *argument) +{ + DBUG_ASSERT(!current_thd->is_stmt_prepare()); + + if (arg_count) + { + Item **arg,**arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + Item *new_item= (*arg)->transform(transformer, argument); + if (!new_item) + return 0; + + /* + THD::change_item_tree() should be called only if the tree was + really transformed, i.e. when a new item has been created. + Otherwise we'll be allocating a lot of unnecessary memory for + change records at each execution. + */ + if (*arg != new_item) + current_thd->change_item_tree(arg, new_item); + } + } + return (this->*transformer)(argument); +} + + +/* + Compile Item_func object with a processor and a transformer callback functions + + SYNOPSIS + compile() + analyzer the analyzer callback function to be applied to the nodes + of the tree of the object + arg_p in/out parameter to be passed to the processor + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg_t parameter to be passed to the transformer + + DESCRIPTION + First the function applies the analyzer to the root node of + the Item_func object. Then if the analizer succeeeds (returns TRUE) + the function recursively applies the compile method to each argument + of the Item_func node. + If the call of the method for an argument item returns a new item + the old item is substituted for a new one. + After this the transformer is applied to the root node + of the Item_func object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_func::compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t) +{ + if (!(this->*analyzer)(arg_p)) + return 0; + if (arg_count) + { + Item **arg,**arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + /* + The same parameter value of arg_p must be passed + to analyze any argument of the condition formula. + */ + byte *arg_v= *arg_p; + Item *new_item= (*arg)->compile(analyzer, &arg_v, transformer, arg_t); + if (new_item && *arg != new_item) + current_thd->change_item_tree(arg, new_item); + } + } + return (this->*transformer)(arg_t); +} /* See comments in Item_cmp_func::split_sum_func() */ @@ -209,7 +337,7 @@ void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, { Item **arg, **arg_end; for (arg= args, arg_end= args+arg_count; arg != arg_end ; arg++) - (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg); + (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg, TRUE); } @@ -281,8 +409,13 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const if (item->type() != FUNC_ITEM) return 0; Item_func *item_func=(Item_func*) item; - if (arg_count != item_func->arg_count || - func_name() != item_func->func_name()) + Item_func::Functype func_type; + if ((func_type= functype()) != item_func->functype() || + arg_count != item_func->arg_count || + (func_type != Item_func::FUNC_SP && + func_name() != item_func->func_name()) || + (func_type == Item_func::FUNC_SP && + my_strcasecmp(system_charset_info, func_name(), item_func->func_name()))) return 0; for (uint i=0; i < arg_count ; i++) if (!args[i]->eq(item_func->args[i], binary_cmp)) @@ -290,6 +423,7 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const return 1; } + Field *Item_func::tmp_table_field(TABLE *t_arg) { Field *res; @@ -297,7 +431,7 @@ Field *Item_func::tmp_table_field(TABLE *t_arg) switch (result_type()) { case INT_RESULT: - if (max_length > 11) + if (max_length > MY_INT32_NUM_DECIMAL_DIGITS) res= new Field_longlong(max_length, maybe_null, name, t_arg, unsigned_flag); else @@ -308,14 +442,17 @@ Field *Item_func::tmp_table_field(TABLE *t_arg) res= new Field_double(max_length, maybe_null, name, t_arg, decimals); break; case STRING_RESULT: - if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB) - res= new Field_blob(max_length, maybe_null, name, t_arg, collation.collation); - else - res= new Field_string(max_length, maybe_null, name, t_arg, collation.collation); + res= make_string_field(t_arg); + break; + case DECIMAL_RESULT: + res= new Field_new_decimal(my_decimal_precision_to_length(decimal_precision(), + decimals, + unsigned_flag), + maybe_null, name, t_arg, decimals, unsigned_flag); break; case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); break; } @@ -323,10 +460,24 @@ Field *Item_func::tmp_table_field(TABLE *t_arg) } +bool Item_func::is_expensive_processor(byte *arg) +{ + return is_expensive(); +} + + +my_decimal *Item_func::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed); + int2my_decimal(E_DEC_FATAL_ERROR, val_int(), unsigned_flag, decimal_value); + return decimal_value; +} + + String *Item_real_func::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals, &my_charset_bin); @@ -334,27 +485,14 @@ String *Item_real_func::val_str(String *str) } -String *Item_num_func::val_str(String *str) +my_decimal *Item_real_func::val_decimal(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); - if (hybrid_type == INT_RESULT) - { - longlong nr=val_int(); - if (null_value) - return 0; /* purecov: inspected */ - if (!unsigned_flag) - str->set(nr,&my_charset_bin); - else - str->set((ulonglong) nr,&my_charset_bin); - } - else - { - double nr=val(); - if (null_value) - return 0; /* purecov: inspected */ - str->set(nr,decimals,&my_charset_bin); - } - return str; + DBUG_ASSERT(fixed); + double nr= val_real(); + if (null_value) + return 0; /* purecov: inspected */ + double2my_decimal(E_DEC_FATAL_ERROR, nr, decimal_value); + return decimal_value; } @@ -375,9 +513,103 @@ void Item_func::fix_num_length_and_dec() } } + +void Item_func_numhybrid::fix_num_length_and_dec() +{} + + +/* + Set max_length/decimals of function if function is fixed point and + result length/precision depends on argument ones + + SYNOPSIS + Item_func::count_decimal_length() +*/ + +void Item_func::count_decimal_length() +{ + int max_int_part= 0; + decimals= 0; + unsigned_flag= 1; + for (uint i=0 ; i < arg_count ; i++) + { + set_if_bigger(decimals, args[i]->decimals); + set_if_bigger(max_int_part, args[i]->decimal_int_part()); + set_if_smaller(unsigned_flag, args[i]->unsigned_flag); + } + int precision= min(max_int_part + decimals, DECIMAL_MAX_PRECISION); + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); +} + + +/* + Set max_length of if it is maximum length of its arguments + + SYNOPSIS + Item_func::count_only_length() +*/ + +void Item_func::count_only_length() +{ + max_length= 0; + unsigned_flag= 0; + for (uint i=0 ; i < arg_count ; i++) + { + set_if_bigger(max_length, args[i]->max_length); + set_if_bigger(unsigned_flag, args[i]->unsigned_flag); + } +} + + +/* + Set max_length/decimals of function if function is floating point and + result length/precision depends on argument ones + + SYNOPSIS + Item_func::count_real_length() +*/ + +void Item_func::count_real_length() +{ + uint32 length= 0; + decimals= 0; + max_length= 0; + for (uint i=0 ; i < arg_count ; i++) + { + if (decimals != NOT_FIXED_DEC) + { + set_if_bigger(decimals, args[i]->decimals); + set_if_bigger(length, (args[i]->max_length - args[i]->decimals)); + } + set_if_bigger(max_length, args[i]->max_length); + } + if (decimals != NOT_FIXED_DEC) + { + max_length= length; + length+= decimals; + if (length < max_length) // If previous operation gave overflow + max_length= UINT_MAX32; + else + max_length= length; + } +} + + + +void Item_func::signal_divide_by_null() +{ + THD *thd= current_thd; + if (thd->variables.sql_mode & MODE_ERROR_FOR_DIVISION_BY_ZERO) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DIVISION_BY_ZERO, + ER(ER_DIVISION_BY_ZERO)); + null_value= 1; +} + + Item *Item_func::get_tmp_table_item(THD *thd) { - if (!with_sum_func && !const_item()) + if (!with_sum_func && !const_item() && functype() != SUSERVAR_FUNC) return new Item_field(result_field); return copy_or_same(thd); } @@ -395,57 +627,285 @@ String *Item_int_func::val_str(String *str) return str; } + +void Item_func_connection_id::fix_length_and_dec() +{ + Item_int_func::fix_length_and_dec(); + max_length= 10; +} + + +bool Item_func_connection_id::fix_fields(THD *thd, Item **ref) +{ + if (Item_int_func::fix_fields(thd, ref)) + return TRUE; + + /* + To replicate CONNECTION_ID() properly we should use + pseudo_thread_id on slave, which contains the value of thread_id + on master. + */ + value= ((thd->slave_thread) ? + thd->variables.pseudo_thread_id : + thd->thread_id); + + return FALSE; +} + + /* - Change from REAL_RESULT (default) to INT_RESULT if both arguments are - integers + Check arguments here to determine result's type for a numeric + function of two arguments. + + SYNOPSIS + Item_num_op::find_num_type() */ void Item_num_op::find_num_type(void) { - if (args[0]->result_type() == INT_RESULT && - args[1]->result_type() == INT_RESULT) + DBUG_ENTER("Item_num_op::find_num_type"); + DBUG_PRINT("info", ("name %s", func_name())); + DBUG_ASSERT(arg_count == 2); + Item_result r0= args[0]->result_type(); + Item_result r1= args[1]->result_type(); + + if (r0 == REAL_RESULT || r1 == REAL_RESULT || + r0 == STRING_RESULT || r1 ==STRING_RESULT) + { + count_real_length(); + max_length= float_length(decimals); + hybrid_type= REAL_RESULT; + } + else if (r0 == DECIMAL_RESULT || r1 == DECIMAL_RESULT) + { + hybrid_type= DECIMAL_RESULT; + result_precision(); + } + else { + DBUG_ASSERT(r0 == INT_RESULT && r1 == INT_RESULT); + decimals= 0; hybrid_type=INT_RESULT; - unsigned_flag=args[0]->unsigned_flag | args[1]->unsigned_flag; + result_precision(); } + DBUG_PRINT("info", ("Type: %s", + (hybrid_type == REAL_RESULT ? "REAL_RESULT" : + hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" : + hybrid_type == INT_RESULT ? "INT_RESULT" : + "--ILLEGAL!!!--"))); + DBUG_VOID_RETURN; } -String *Item_num_op::val_str(String *str) + +/* + Set result type for a numeric function of one argument + (can be also used by a numeric function of many arguments, if the result + type depends only on the first argument) + + SYNOPSIS + Item_func_num1::find_num_type() +*/ + +void Item_func_num1::find_num_type() +{ + DBUG_ENTER("Item_func_num1::find_num_type"); + DBUG_PRINT("info", ("name %s", func_name())); + switch (hybrid_type= args[0]->result_type()) { + case INT_RESULT: + unsigned_flag= args[0]->unsigned_flag; + break; + case STRING_RESULT: + case REAL_RESULT: + hybrid_type= REAL_RESULT; + max_length= float_length(decimals); + break; + case DECIMAL_RESULT: + break; + default: + DBUG_ASSERT(0); + } + DBUG_PRINT("info", ("Type: %s", + (hybrid_type == REAL_RESULT ? "REAL_RESULT" : + hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" : + hybrid_type == INT_RESULT ? "INT_RESULT" : + "--ILLEGAL!!!--"))); + DBUG_VOID_RETURN; +} + + +void Item_func_num1::fix_num_length_and_dec() +{ + decimals= args[0]->decimals; + max_length= args[0]->max_length; +} + + +void Item_func_numhybrid::fix_length_and_dec() +{ + fix_num_length_and_dec(); + find_num_type(); +} + + +String *Item_func_numhybrid::val_str(String *str) { DBUG_ASSERT(fixed == 1); - if (hybrid_type == INT_RESULT) + switch (hybrid_type) { + case DECIMAL_RESULT: { - longlong nr=val_int(); + my_decimal decimal_value, *val; + if (!(val= decimal_op(&decimal_value))) + return 0; // null is set + my_decimal_round(E_DEC_FATAL_ERROR, val, decimals, FALSE, val); + my_decimal2string(E_DEC_FATAL_ERROR, val, 0, 0, 0, str); + break; + } + case INT_RESULT: + { + longlong nr= int_op(); if (null_value) return 0; /* purecov: inspected */ if (!unsigned_flag) str->set(nr,&my_charset_bin); else str->set((ulonglong) nr,&my_charset_bin); + break; } - else + case REAL_RESULT: { - double nr=val(); + double nr= real_op(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals,&my_charset_bin); + break; + } + case STRING_RESULT: + return str_op(&str_value); + default: + DBUG_ASSERT(0); } return str; } +double Item_func_numhybrid::val_real() +{ + DBUG_ASSERT(fixed == 1); + switch (hybrid_type) { + case DECIMAL_RESULT: + { + my_decimal decimal_value, *val; + double result; + if (!(val= decimal_op(&decimal_value))) + return 0.0; // null is set + my_decimal2double(E_DEC_FATAL_ERROR, val, &result); + return result; + } + case INT_RESULT: + return (double)int_op(); + case REAL_RESULT: + return real_op(); + case STRING_RESULT: + { + char *end_not_used; + int err_not_used; + String *res= str_op(&str_value); + return (res ? my_strntod(res->charset(), (char*) res->ptr(), res->length(), + &end_not_used, &err_not_used) : 0.0); + } + default: + DBUG_ASSERT(0); + } + return 0.0; +} + + +longlong Item_func_numhybrid::val_int() +{ + DBUG_ASSERT(fixed == 1); + switch (hybrid_type) { + case DECIMAL_RESULT: + { + my_decimal decimal_value, *val; + if (!(val= decimal_op(&decimal_value))) + return 0; // null is set + longlong result; + my_decimal2int(E_DEC_FATAL_ERROR, val, unsigned_flag, &result); + return result; + } + case INT_RESULT: + return int_op(); + case REAL_RESULT: + return (longlong) rint(real_op()); + case STRING_RESULT: + { + int err_not_used; + String *res; + if (!(res= str_op(&str_value))) + return 0; + + char *end= (char*) res->ptr() + res->length(); + CHARSET_INFO *cs= str_value.charset(); + return (*(cs->cset->strtoll10))(cs, res->ptr(), &end, &err_not_used); + } + default: + DBUG_ASSERT(0); + } + return 0; +} + + +my_decimal *Item_func_numhybrid::val_decimal(my_decimal *decimal_value) +{ + my_decimal *val= decimal_value; + DBUG_ASSERT(fixed == 1); + switch (hybrid_type) { + case DECIMAL_RESULT: + val= decimal_op(decimal_value); + break; + case INT_RESULT: + { + longlong result= int_op(); + int2my_decimal(E_DEC_FATAL_ERROR, result, unsigned_flag, decimal_value); + break; + } + case REAL_RESULT: + { + double result= (double)real_op(); + double2my_decimal(E_DEC_FATAL_ERROR, result, decimal_value); + break; + } + case STRING_RESULT: + { + String *res; + if (!(res= str_op(&str_value))) + return NULL; + + str2my_decimal(E_DEC_FATAL_ERROR, (char*) res->ptr(), + res->length(), res->charset(), decimal_value); + break; + } + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } + return val; +} + + void Item_func_signed::print(String *str) { - str->append("cast(", 5); + str->append(STRING_WITH_LEN("cast(")); args[0]->print(str); - str->append(" as signed)", 11); + str->append(STRING_WITH_LEN(" as signed)")); } longlong Item_func_signed::val_int_from_str(int *error) { - char buff[MAX_FIELD_WIDTH], *end; + char buff[MAX_FIELD_WIDTH], *end, *start; + uint32 length; String tmp(buff,sizeof(buff), &my_charset_bin), *res; longlong value; @@ -461,13 +921,21 @@ longlong Item_func_signed::val_int_from_str(int *error) return 0; } null_value= 0; - end= (char*) res->ptr()+ res->length(); - value= my_strtoll10(res->ptr(), &end, error); - if (*error > 0 || end != res->ptr()+ res->length()) + start= (char *)res->ptr(); + length= res->length(); + + end= start + length; + value= my_strtoll10(start, &end, error); + if (*error > 0 || end != start+ length) + { + char err_buff[128]; + String err_tmp(err_buff,(uint32) sizeof(err_buff), system_charset_info); + err_tmp.copy(start, length, system_charset_info); push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER", - res->c_ptr()); + err_tmp.c_ptr()); + } return value; } @@ -497,9 +965,9 @@ longlong Item_func_signed::val_int() void Item_func_unsigned::print(String *str) { - str->append("cast(", 5); + str->append(STRING_WITH_LEN("cast(")); args[0]->print(str); - str->append(" as unsigned)", 13); + str->append(STRING_WITH_LEN(" as unsigned)")); } @@ -509,27 +977,14 @@ longlong Item_func_unsigned::val_int() longlong value; int error; - if (args[0]->result_type() == REAL_RESULT) + if (args[0]->cast_to_int_type() == DECIMAL_RESULT) { - double dvalue= args[0]->val(); - if ((null_value= args[0]->null_value)) - return 0; - if (dvalue <= (double) LONGLONG_MIN) - { - return LONGLONG_MIN; - } - if (dvalue >= (double) (ulonglong) ULONGLONG_MAX) - { - return (longlong) ULONGLONG_MAX; - } - if (dvalue >= (double) (ulonglong) LONGLONG_MAX) - { - return (ulonglong) (dvalue + (dvalue > 0 ? 0.5 : -0.5)); - } - return (longlong) (dvalue + (dvalue > 0 ? 0.5 : -0.5)); + my_decimal tmp, *dec= args[0]->val_decimal(&tmp); + if (!(null_value= args[0]->null_value)) + my_decimal2int(E_DEC_FATAL_ERROR, dec, 1, &value); + return value; } - - if (args[0]->cast_to_int_type() != STRING_RESULT) + else if (args[0]->cast_to_int_type() != STRING_RESULT) { value= args[0]->val_int(); null_value= args[0]->null_value; @@ -545,26 +1000,121 @@ longlong Item_func_unsigned::val_int() } -double Item_func_plus::val() +String *Item_decimal_typecast::val_str(String *str) { - DBUG_ASSERT(fixed == 1); - double value=args[0]->val()+args[1]->val(); + my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf); + if (null_value) + return NULL; + my_decimal2string(E_DEC_FATAL_ERROR, tmp, 0, 0, 0, str); + return str; +} + + +double Item_decimal_typecast::val_real() +{ + my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf); + double res; + if (null_value) + return 0.0; + my_decimal2double(E_DEC_FATAL_ERROR, tmp, &res); + return res; +} + + +longlong Item_decimal_typecast::val_int() +{ + my_decimal tmp_buf, *tmp= val_decimal(&tmp_buf); + longlong res; + if (null_value) + return 0; + my_decimal2int(E_DEC_FATAL_ERROR, tmp, unsigned_flag, &res); + return res; +} + + +my_decimal *Item_decimal_typecast::val_decimal(my_decimal *dec) +{ + my_decimal tmp_buf, *tmp= args[0]->val_decimal(&tmp_buf); + if ((null_value= args[0]->null_value)) + return NULL; + my_decimal_round(E_DEC_FATAL_ERROR, tmp, decimals, FALSE, dec); + return dec; +} + + +void Item_decimal_typecast::print(String *str) +{ + str->append(STRING_WITH_LEN("cast(")); + args[0]->print(str); + str->append(STRING_WITH_LEN(" as decimal)")); +} + + +double Item_func_plus::real_op() +{ + double value= args[0]->val_real() + args[1]->val_real(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; return value; } -longlong Item_func_plus::val_int() + +longlong Item_func_plus::int_op() { - DBUG_ASSERT(fixed == 1); - if (hybrid_type == INT_RESULT) - { - longlong value=args[0]->val_int()+args[1]->val_int(); - if ((null_value=args[0]->null_value || args[1]->null_value)) - return 0; - return value; - } - return (longlong) Item_func_plus::val(); + longlong value=args[0]->val_int()+args[1]->val_int(); + if ((null_value=args[0]->null_value || args[1]->null_value)) + return 0; + return value; +} + + +/* + Calculate plus of two decimail's + + SYNOPSIS + decimal_op() + decimal_value Buffer that can be used to store result + + RETURN + 0 Value was NULL; In this case null_value is set + # Value of operation as a decimal +*/ + +my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value) +{ + my_decimal value1, *val1; + my_decimal value2, *val2; + val1= args[0]->val_decimal(&value1); + if ((null_value= args[0]->null_value)) + return 0; + val2= args[1]->val_decimal(&value2); + if (!(null_value= (args[1]->null_value || + (my_decimal_add(E_DEC_FATAL_ERROR, decimal_value, val1, + val2) > 3)))) + return decimal_value; + return 0; +} + +/* + Set precision of results for additive operations (+ and -) + + SYNOPSIS + Item_func_additive_op::result_precision() +*/ +void Item_func_additive_op::result_precision() +{ + decimals= max(args[0]->decimals, args[1]->decimals); + int max_int_part= max(args[0]->decimal_precision() - args[0]->decimals, + args[1]->decimal_precision() - args[1]->decimals); + int precision= min(max_int_part + 1 + decimals, DECIMAL_MAX_PRECISION); + + /* Integer operations keep unsigned_flag if one of arguments is unsigned */ + if (result_type() == INT_RESULT) + unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; + else + unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); } @@ -582,84 +1132,178 @@ void Item_func_minus::fix_length_and_dec() } -double Item_func_minus::val() +double Item_func_minus::real_op() { - DBUG_ASSERT(fixed == 1); - double value=args[0]->val() - args[1]->val(); + double value= args[0]->val_real() - args[1]->val_real(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; return value; } -longlong Item_func_minus::val_int() + +longlong Item_func_minus::int_op() { - DBUG_ASSERT(fixed == 1); - if (hybrid_type == INT_RESULT) - { - longlong value=args[0]->val_int() - args[1]->val_int(); - if ((null_value=args[0]->null_value || args[1]->null_value)) - return 0; - return value; - } - return (longlong) Item_func_minus::val(); + longlong value=args[0]->val_int() - args[1]->val_int(); + if ((null_value=args[0]->null_value || args[1]->null_value)) + return 0; + return value; } -double Item_func_mul::val() +/* See Item_func_plus::decimal_op for comments */ + +my_decimal *Item_func_minus::decimal_op(my_decimal *decimal_value) +{ + my_decimal value1, *val1; + my_decimal value2, *val2= + + val1= args[0]->val_decimal(&value1); + if ((null_value= args[0]->null_value)) + return 0; + val2= args[1]->val_decimal(&value2); + if (!(null_value= (args[1]->null_value || + (my_decimal_sub(E_DEC_FATAL_ERROR, decimal_value, val1, + val2) > 3)))) + return decimal_value; + return 0; +} + + +double Item_func_mul::real_op() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val()*args[1]->val(); + double value= args[0]->val_real() * args[1]->val_real(); if ((null_value=args[0]->null_value || args[1]->null_value)) - return 0.0; /* purecov: inspected */ + return 0.0; return value; } -longlong Item_func_mul::val_int() + +longlong Item_func_mul::int_op() { DBUG_ASSERT(fixed == 1); - if (hybrid_type == INT_RESULT) - { - longlong value=args[0]->val_int()*args[1]->val_int(); - if ((null_value=args[0]->null_value || args[1]->null_value)) - return 0; /* purecov: inspected */ - return value; - } - return (longlong) Item_func_mul::val(); + longlong value=args[0]->val_int()*args[1]->val_int(); + if ((null_value=args[0]->null_value || args[1]->null_value)) + return 0; + return value; } -double Item_func_div::val() +/* See Item_func_plus::decimal_op for comments */ + +my_decimal *Item_func_mul::decimal_op(my_decimal *decimal_value) +{ + my_decimal value1, *val1; + my_decimal value2, *val2; + val1= args[0]->val_decimal(&value1); + if ((null_value= args[0]->null_value)) + return 0; + val2= args[1]->val_decimal(&value2); + if (!(null_value= (args[1]->null_value || + (my_decimal_mul(E_DEC_FATAL_ERROR, decimal_value, val1, + val2) > 3)))) + return decimal_value; + return 0; +} + + +void Item_func_mul::result_precision() +{ + /* Integer operations keep unsigned_flag if one of arguments is unsigned */ + if (result_type() == INT_RESULT) + unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; + else + unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; + decimals= min(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE); + int precision= min(args[0]->decimal_precision() + args[1]->decimal_precision(), + DECIMAL_MAX_PRECISION); + max_length= my_decimal_precision_to_length(precision, decimals,unsigned_flag); +} + + +double Item_func_div::real_op() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - double val2=args[1]->val(); - if ((null_value= val2 == 0.0 || args[0]->null_value || args[1]->null_value)) + double value= args[0]->val_real(); + double val2= args[1]->val_real(); + if ((null_value= args[0]->null_value || args[1]->null_value)) + return 0.0; + if (val2 == 0.0) + { + signal_divide_by_null(); return 0.0; + } return value/val2; } -longlong Item_func_div::val_int() + +my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value) { - DBUG_ASSERT(fixed == 1); - if (hybrid_type == INT_RESULT) + my_decimal value1, *val1; + my_decimal value2, *val2; + int err; + + val1= args[0]->val_decimal(&value1); + if ((null_value= args[0]->null_value)) + return 0; + val2= args[1]->val_decimal(&value2); + if ((null_value= args[1]->null_value)) + return 0; + if ((err= my_decimal_div(E_DEC_FATAL_ERROR & ~E_DEC_DIV_ZERO, decimal_value, + val1, val2, prec_increment)) > 3) { - longlong value=args[0]->val_int(); - longlong val2=args[1]->val_int(); - if ((null_value= val2 == 0 || args[0]->null_value || args[1]->null_value)) - return 0; - return value/val2; + if (err == E_DEC_DIV_ZERO) + signal_divide_by_null(); + null_value= 1; + return 0; } - return (longlong) Item_func_div::val(); + return decimal_value; +} + + +void Item_func_div::result_precision() +{ + uint precision=min(args[0]->decimal_precision() + prec_increment, + DECIMAL_MAX_PRECISION); + /* Integer operations keep unsigned_flag if one of arguments is unsigned */ + if (result_type() == INT_RESULT) + unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag; + else + unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag; + decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); } + void Item_func_div::fix_length_and_dec() { - decimals=max(args[0]->decimals,args[1]->decimals)+2; - set_if_smaller(decimals, NOT_FIXED_DEC); - max_length=args[0]->max_length - args[0]->decimals + decimals; - uint tmp=float_length(decimals); - set_if_smaller(max_length,tmp); - maybe_null=1; + DBUG_ENTER("Item_func_div::fix_length_and_dec"); + prec_increment= current_thd->variables.div_precincrement; + Item_num_op::fix_length_and_dec(); + switch(hybrid_type) { + case REAL_RESULT: + { + decimals=max(args[0]->decimals,args[1]->decimals)+prec_increment; + set_if_smaller(decimals, NOT_FIXED_DEC); + max_length=args[0]->max_length - args[0]->decimals + decimals; + uint tmp=float_length(decimals); + set_if_smaller(max_length,tmp); + break; + } + case INT_RESULT: + hybrid_type= DECIMAL_RESULT; + DBUG_PRINT("info", ("Type changed: DECIMAL_RESULT")); + result_precision(); + break; + case DECIMAL_RESULT: + result_precision(); + break; + default: + DBUG_ASSERT(0); + } + maybe_null= 1; // devision by zero + DBUG_VOID_RETURN; } @@ -669,8 +1313,13 @@ longlong Item_func_int_div::val_int() DBUG_ASSERT(fixed == 1); longlong value=args[0]->val_int(); longlong val2=args[1]->val_int(); - if ((null_value= val2 == 0 || args[0]->null_value || args[1]->null_value)) + if ((null_value= (args[0]->null_value || args[1]->null_value))) + return 0; + if (val2 == 0) + { + signal_divide_by_null(); return 0; + } return (unsigned_flag ? (ulonglong) value / (ulonglong) val2 : value / val2); @@ -679,32 +1328,79 @@ longlong Item_func_int_div::val_int() void Item_func_int_div::fix_length_and_dec() { - find_num_type(); max_length=args[0]->max_length - args[0]->decimals; maybe_null=1; + unsigned_flag=args[0]->unsigned_flag | args[1]->unsigned_flag; } -double Item_func_mod::val() -{ - DBUG_ASSERT(fixed == 1); - double x= args[0]->val(); - double y= args[1]->val(); - if ((null_value= (y == 0.0) || args[0]->null_value || args[1]->null_value)) - return 0.0; /* purecov: inspected */ - return fmod(x, y); -} - -longlong Item_func_mod::val_int() +longlong Item_func_mod::int_op() { DBUG_ASSERT(fixed == 1); longlong value= args[0]->val_int(); longlong val2= args[1]->val_int(); - if ((null_value=val2 == 0 || args[0]->null_value || args[1]->null_value)) + if ((null_value= args[0]->null_value || args[1]->null_value)) return 0; /* purecov: inspected */ + if (val2 == 0) + { + signal_divide_by_null(); + return 0; + } + + if (args[0]->unsigned_flag) + return ((ulonglong) value) % val2; + return value % val2; } +double Item_func_mod::real_op() +{ + DBUG_ASSERT(fixed == 1); + double value= args[0]->val_real(); + double val2= args[1]->val_real(); + if ((null_value= args[0]->null_value || args[1]->null_value)) + return 0.0; /* purecov: inspected */ + if (val2 == 0.0) + { + signal_divide_by_null(); + return 0.0; + } + return fmod(value,val2); +} + + +my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value) +{ + my_decimal value1, *val1; + my_decimal value2, *val2; + + val1= args[0]->val_decimal(&value1); + if ((null_value= args[0]->null_value)) + return 0; + val2= args[1]->val_decimal(&value2); + if ((null_value= args[1]->null_value)) + return 0; + switch (my_decimal_mod(E_DEC_FATAL_ERROR & ~E_DEC_DIV_ZERO, decimal_value, + val1, val2)) { + case E_DEC_TRUNCATED: + case E_DEC_OK: + return decimal_value; + case E_DEC_DIV_ZERO: + signal_divide_by_null(); + default: + null_value= 1; + return 0; + } +} + + +void Item_func_mod::result_precision() +{ + decimals= max(args[0]->decimals, args[1]->decimals); + max_length= max(args[0]->max_length, args[1]->max_length); +} + + void Item_func_mod::fix_length_and_dec() { Item_num_op::fix_length_and_dec(); @@ -712,101 +1408,118 @@ void Item_func_mod::fix_length_and_dec() } -double Item_func_neg::val() +double Item_func_neg::real_op() { - DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - null_value=args[0]->null_value; + double value= args[0]->val_real(); + null_value= args[0]->null_value; return -value; } -longlong Item_func_neg::val_int() +longlong Item_func_neg::int_op() { - DBUG_ASSERT(fixed == 1); - longlong value=args[0]->val_int(); - null_value=args[0]->null_value; + longlong value= args[0]->val_int(); + null_value= args[0]->null_value; return -value; } +my_decimal *Item_func_neg::decimal_op(my_decimal *decimal_value) +{ + my_decimal val, *value= args[0]->val_decimal(&val); + if (!(null_value= args[0]->null_value)) + { + my_decimal2decimal(value, decimal_value); + my_decimal_neg(decimal_value); + return decimal_value; + } + return 0; +} + + +void Item_func_neg::fix_num_length_and_dec() +{ + decimals= args[0]->decimals; + /* 1 add because sign can appear */ + max_length= args[0]->max_length + 1; +} + + void Item_func_neg::fix_length_and_dec() { - enum Item_result arg_result= args[0]->result_type(); - enum Item::Type arg_type= args[0]->type(); - decimals=args[0]->decimals; - max_length=args[0]->max_length; - hybrid_type= REAL_RESULT; - + DBUG_ENTER("Item_func_neg::fix_length_and_dec"); + Item_func_num1::fix_length_and_dec(); + /* - We need to account for added '-' in the following cases: - A) argument is a real or integer positive constant - in this case - argument's max_length is set to actual number of bytes occupied, and not - maximum number of bytes real or integer may require. Note that all - constants are non negative so we don't need to account for removed '-'. - B) argument returns a string. + If this is in integer context keep the context as integer if possible + (This is how multiplication and other integer functions works) Use val() to get value as arg_type doesn't mean that item is Item_int or Item_real due to existence of Item_param. */ - if (arg_result == STRING_RESULT || - (arg_type == REAL_ITEM && args[0]->val() >= 0) || - (arg_type == INT_ITEM && args[0]->val_int() > 0)) - max_length++; - - if (args[0]->result_type() == INT_RESULT) + if (hybrid_type == INT_RESULT && + args[0]->type() == INT_ITEM && + ((ulonglong) args[0]->val_int() >= (ulonglong) LONGLONG_MIN)) { /* - If this is in integer context keep the context as integer - (This is how multiplication and other integer functions works) - - We must however do a special case in the case where the argument - is a unsigned bigint constant as in this case the only safe - number to convert in integer context is 9223372036854775808. - (This is needed because the lex parser doesn't anymore handle - signed integers) + Ensure that result is converted to DECIMAL, as longlong can't hold + the negated number */ - if (args[0]->type() != INT_ITEM || - (((ulonglong) args[0]->val_int()) <= (ulonglong) LONGLONG_MIN)) - hybrid_type= INT_RESULT; + hybrid_type= DECIMAL_RESULT; + DBUG_PRINT("info", ("Type changed: DECIMAL_RESULT")); } + unsigned_flag= 0; + DBUG_VOID_RETURN; } -double Item_func_abs::val() +double Item_func_abs::real_op() { - DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - null_value=args[0]->null_value; + double value= args[0]->val_real(); + null_value= args[0]->null_value; return fabs(value); } -longlong Item_func_abs::val_int() +longlong Item_func_abs::int_op() { - DBUG_ASSERT(fixed == 1); - longlong value=args[0]->val_int(); - null_value=args[0]->null_value; + longlong value= args[0]->val_int(); + null_value= args[0]->null_value; return value >= 0 ? value : -value; } +my_decimal *Item_func_abs::decimal_op(my_decimal *decimal_value) +{ + my_decimal val, *value= args[0]->val_decimal(&val); + if (!(null_value= args[0]->null_value)) + { + my_decimal2decimal(value, decimal_value); + if (decimal_value->sign()) + my_decimal_neg(decimal_value); + return decimal_value; + } + return 0; +} + + void Item_func_abs::fix_length_and_dec() { - decimals=args[0]->decimals; - max_length=args[0]->max_length; - hybrid_type= REAL_RESULT; - if (args[0]->result_type() == INT_RESULT) - hybrid_type= INT_RESULT; + Item_func_num1::fix_length_and_dec(); } /* Gateway to natural LOG function */ -double Item_func_ln::val() +double Item_func_ln::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - if ((null_value=(args[0]->null_value || value <= 0.0))) + double value= args[0]->val_real(); + if ((null_value= args[0]->null_value)) + return 0.0; + if (value <= 0.0) + { + signal_divide_by_null(); return 0.0; + } return log(value); } @@ -815,63 +1528,84 @@ double Item_func_ln::val() We have to check if all values are > zero and first one is not one as these are the cases then result is not a number. */ -double Item_func_log::val() +double Item_func_log::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - if ((null_value=(args[0]->null_value || value <= 0.0))) + double value= args[0]->val_real(); + if ((null_value= args[0]->null_value)) + return 0.0; + if (value <= 0.0) + { + signal_divide_by_null(); return 0.0; + } if (arg_count == 2) { - double value2= args[1]->val(); - if ((null_value=(args[1]->null_value || value2 <= 0.0 || value == 1.0))) + double value2= args[1]->val_real(); + if ((null_value= args[1]->null_value)) + return 0.0; + if (value2 <= 0.0 || value == 1.0) + { + signal_divide_by_null(); return 0.0; + } return log(value2) / log(value); } return log(value); } -double Item_func_log2::val() +double Item_func_log2::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - if ((null_value=(args[0]->null_value || value <= 0.0))) + double value= args[0]->val_real(); + + if ((null_value=args[0]->null_value)) + return 0.0; + if (value <= 0.0) + { + signal_divide_by_null(); return 0.0; - return log(value) / log(2.0); + } + return log(value) / M_LN2; } -double Item_func_log10::val() +double Item_func_log10::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - if ((null_value=(args[0]->null_value || value <= 0.0))) - return 0.0; /* purecov: inspected */ + double value= args[0]->val_real(); + if ((null_value= args[0]->null_value)) + return 0.0; + if (value <= 0.0) + { + signal_divide_by_null(); + return 0.0; + } return log10(value); } -double Item_func_exp::val() +double Item_func_exp::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; /* purecov: inspected */ return exp(value); } -double Item_func_sqrt::val() +double Item_func_sqrt::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || value < 0))) return 0.0; /* purecov: inspected */ return sqrt(value); } -double Item_func_pow::val() +double Item_func_pow::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - double val2=args[1]->val(); + double value= args[0]->val_real(); + double val2= args[1]->val_real(); if ((null_value=(args[0]->null_value || args[1]->null_value))) return 0.0; /* purecov: inspected */ return pow(value,val2); @@ -879,35 +1613,35 @@ double Item_func_pow::val() // Trigonometric functions -double Item_func_acos::val() +double Item_func_acos::val_real() { DBUG_ASSERT(fixed == 1); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) - volatile double value=args[0]->val(); + volatile double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0)))) return 0.0; return fix_result(acos(value)); } -double Item_func_asin::val() +double Item_func_asin::val_real() { DBUG_ASSERT(fixed == 1); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) - volatile double value=args[0]->val(); + volatile double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0)))) return 0.0; return fix_result(asin(value)); } -double Item_func_atan::val() +double Item_func_atan::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; if (arg_count == 2) { - double val2= args[1]->val(); + double val2= args[1]->val_real(); if ((null_value=args[1]->null_value)) return 0.0; return fix_result(atan2(value,val2)); @@ -915,28 +1649,28 @@ double Item_func_atan::val() return fix_result(atan(value)); } -double Item_func_cos::val() +double Item_func_cos::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; return fix_result(cos(value)); } -double Item_func_sin::val() +double Item_func_sin::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; return fix_result(sin(value)); } -double Item_func_tan::val() +double Item_func_tan::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; return fix_result(tan(value)); @@ -997,54 +1731,219 @@ void Item_func_integer::fix_length_and_dec() decimals=0; } -longlong Item_func_ceiling::val_int() +void Item_func_int_val::fix_num_length_and_dec() { - DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - null_value=args[0]->null_value; - return (longlong) ceil(value); + max_length= args[0]->max_length - (args[0]->decimals ? + args[0]->decimals + 1 : + 0) + 2; + uint tmp= float_length(decimals); + set_if_smaller(max_length,tmp); + decimals= 0; } -longlong Item_func_floor::val_int() + +void Item_func_int_val::find_num_type() { - DBUG_ASSERT(fixed == 1); - // the volatile's for BUG #3051 to calm optimizer down (because of gcc's bug) - volatile double value=args[0]->val(); - null_value=args[0]->null_value; - return (longlong) floor(value); + DBUG_ENTER("Item_func_int_val::find_num_type"); + DBUG_PRINT("info", ("name %s", func_name())); + switch(hybrid_type= args[0]->result_type()) + { + case STRING_RESULT: + case REAL_RESULT: + hybrid_type= REAL_RESULT; + max_length= float_length(decimals); + break; + case INT_RESULT: + case DECIMAL_RESULT: + /* + -2 because in most high position can't be used any digit for longlong + and one position for increasing value during operation + */ + if ((args[0]->max_length - args[0]->decimals) >= + (DECIMAL_LONGLONG_DIGITS - 2)) + { + hybrid_type= DECIMAL_RESULT; + } + else + { + unsigned_flag= args[0]->unsigned_flag; + hybrid_type= INT_RESULT; + } + break; + default: + DBUG_ASSERT(0); + } + DBUG_PRINT("info", ("Type: %s", + (hybrid_type == REAL_RESULT ? "REAL_RESULT" : + hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" : + hybrid_type == INT_RESULT ? "INT_RESULT" : + "--ILLEGAL!!!--"))); + + DBUG_VOID_RETURN; } -void Item_func_round::fix_length_and_dec() + +longlong Item_func_ceiling::int_op() { - max_length=args[0]->max_length; - decimals=args[0]->decimals; - if (args[1]->const_item()) + longlong result; + switch (args[0]->result_type()) { + case INT_RESULT: + result= args[0]->val_int(); + null_value= args[0]->null_value; + break; + case DECIMAL_RESULT: { - int tmp=(int) args[1]->val_int(); - if (tmp < 0) - decimals=0; + my_decimal dec_buf, *dec; + if ((dec= Item_func_ceiling::decimal_op(&dec_buf))) + my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result); else - decimals=min(tmp,NOT_FIXED_DEC); - if ((tmp= decimals - args[0]->decimals) > 0) - max_length+= tmp; + result= 0; + break; } + default: + result= (longlong)Item_func_ceiling::real_op(); + }; + return result; } -double Item_func_round::val() + +double Item_func_ceiling::real_op() +{ + /* + the volatile's for BUG #3051 to calm optimizer down (because of gcc's + bug) + */ + volatile double value= args[0]->val_real(); + null_value= args[0]->null_value; + return ceil(value); +} + + +my_decimal *Item_func_ceiling::decimal_op(my_decimal *decimal_value) +{ + my_decimal val, *value= args[0]->val_decimal(&val); + if (!(null_value= (args[0]->null_value || + my_decimal_ceiling(E_DEC_FATAL_ERROR, value, + decimal_value) > 1))) + return decimal_value; + return 0; +} + + +longlong Item_func_floor::int_op() +{ + longlong result; + switch (args[0]->result_type()) { + case INT_RESULT: + result= args[0]->val_int(); + null_value= args[0]->null_value; + break; + case DECIMAL_RESULT: + { + my_decimal dec_buf, *dec; + if ((dec= Item_func_floor::decimal_op(&dec_buf))) + my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result); + else + result= 0; + break; + } + default: + result= (longlong)Item_func_floor::real_op(); + }; + return result; +} + + +double Item_func_floor::real_op() +{ + /* + the volatile's for BUG #3051 to calm optimizer down (because of gcc's + bug) + */ + volatile double value= args[0]->val_real(); + null_value= args[0]->null_value; + return floor(value); +} + + +my_decimal *Item_func_floor::decimal_op(my_decimal *decimal_value) +{ + my_decimal val, *value= args[0]->val_decimal(&val); + if (!(null_value= (args[0]->null_value || + my_decimal_floor(E_DEC_FATAL_ERROR, value, + decimal_value) > 1))) + return decimal_value; + return 0; +} + + +void Item_func_round::fix_length_and_dec() +{ + unsigned_flag= args[0]->unsigned_flag; + if (!args[1]->const_item()) + { + max_length= args[0]->max_length; + decimals= args[0]->decimals; + hybrid_type= REAL_RESULT; + return; + } + + int decimals_to_set= max((int)args[1]->val_int(), 0); + if (args[0]->decimals == NOT_FIXED_DEC) + { + max_length= args[0]->max_length; + decimals= min(decimals_to_set, NOT_FIXED_DEC); + hybrid_type= REAL_RESULT; + return; + } + + switch (args[0]->result_type()) { + case REAL_RESULT: + case STRING_RESULT: + hybrid_type= REAL_RESULT; + decimals= min(decimals_to_set, NOT_FIXED_DEC); + max_length= float_length(decimals); + break; + case INT_RESULT: + if (!decimals_to_set && + (truncate || (args[0]->decimal_precision() < DECIMAL_LONGLONG_DIGITS))) + { + int length_can_increase= test(!truncate && (args[1]->val_int() < 0)); + max_length= args[0]->max_length + length_can_increase; + /* Here we can keep INT_RESULT */ + hybrid_type= INT_RESULT; + decimals= 0; + break; + } + /* fall through */ + case DECIMAL_RESULT: + { + hybrid_type= DECIMAL_RESULT; + int decimals_delta= args[0]->decimals - decimals_to_set; + int precision= args[0]->decimal_precision(); + int length_increase= ((decimals_delta <= 0) || truncate) ? 0:1; + + precision-= decimals_delta - length_increase; + decimals= decimals_to_set; + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); + break; + } + default: + DBUG_ASSERT(0); /* This result type isn't handled */ + } +} + +double my_double_round(double value, int dec, bool truncate) { - DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - int dec=(int) args[1]->val_int(); - uint abs_dec=abs(dec); double tmp; + uint abs_dec= abs(dec); /* tmp2 is here to avoid return the value with 80 bit precision This will fix that the test round(0.1,1) = round(0.1,1) is true */ volatile double tmp2; - if ((null_value=args[0]->null_value || args[1]->null_value)) - return 0.0; tmp=(abs_dec < array_elements(log_10) ? log_10[abs_dec] : pow(10.0,(double) abs_dec)); @@ -1061,10 +1960,76 @@ double Item_func_round::val() } -bool Item_func_rand::fix_fields(THD *thd, struct st_table_list *tables, - Item **ref) +double Item_func_round::real_op() +{ + double value= args[0]->val_real(); + int dec= (int) args[1]->val_int(); + + if (!(null_value= args[0]->null_value || args[1]->null_value)) + return my_double_round(value, dec, truncate); + + return 0.0; +} + + +longlong Item_func_round::int_op() { - if (Item_real_func::fix_fields(thd, tables, ref)) + longlong value= args[0]->val_int(); + int dec=(int) args[1]->val_int(); + decimals= 0; + uint abs_dec; + if ((null_value= args[0]->null_value || args[1]->null_value)) + return 0; + if (dec >= 0) + return value; // integer have not digits after point + + abs_dec= -dec; + longlong tmp; + + if(abs_dec >= array_elements(log_10_int)) + return 0; + + tmp= log_10_int[abs_dec]; + + if (truncate) + { + if (unsigned_flag) + value= (ulonglong(value)/tmp)*tmp; + else + value= (value/tmp)*tmp; + } + else + { + if (unsigned_flag) + value= ((ulonglong(value)+(tmp>>1))/tmp)*tmp; + else if ( value >= 0) + value= ((value+(tmp>>1))/tmp)*tmp; + else + value= ((value-(tmp>>1))/tmp)*tmp; + } + return value; +} + + +my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value) +{ + my_decimal val, *value= args[0]->val_decimal(&val); + int dec=(int) args[1]->val_int(); + if (dec > 0) + { + decimals= min(dec, DECIMAL_MAX_SCALE); // to get correct output + } + if (!(null_value= (args[0]->null_value || args[1]->null_value || + my_decimal_round(E_DEC_FATAL_ERROR, value, dec, truncate, + decimal_value) > 1))) + return decimal_value; + return 0; +} + + +bool Item_func_rand::fix_fields(THD *thd,Item **ref) +{ + if (Item_real_func::fix_fields(thd, ref)) return TRUE; used_tables_cache|= RAND_TABLE_BIT; if (arg_count) @@ -1075,12 +2040,15 @@ bool Item_func_rand::fix_fields(THD *thd, struct st_table_list *tables, return TRUE; } /* - Allocate rand structure once: we must use thd->current_arena + Allocate rand structure once: we must use thd->stmt_arena to create rand in proper mem_root if it's a prepared statement or stored procedure. + + No need to send a Rand log event if seed was given eg: RAND(seed), + as it will be replicated in the query as such. */ if (!rand && !(rand= (struct rand_struct*) - thd->current_arena->alloc(sizeof(*rand)))) + thd->stmt_arena->alloc(sizeof(*rand)))) return TRUE; /* PARAM_ITEM is returned if we're in statement prepare and consequently @@ -1100,16 +2068,16 @@ bool Item_func_rand::fix_fields(THD *thd, struct st_table_list *tables, else { /* - No need to send a Rand log event if seed was given eg: RAND(seed), - as it will be replicated in the query as such. - Save the seed only the first time RAND() is used in the query Once events are forwarded rather than recreated, the following can be skipped if inside the slave thread */ - thd->rand_used=1; - thd->rand_saved_seed1=thd->rand.seed1; - thd->rand_saved_seed2=thd->rand.seed2; + if (!thd->rand_used) + { + thd->rand_used= 1; + thd->rand_saved_seed1= thd->rand.seed1; + thd->rand_saved_seed2= thd->rand.seed2; + } rand= &thd->rand; } return FALSE; @@ -1122,7 +2090,7 @@ void Item_func_rand::update_used_tables() } -double Item_func_rand::val() +double Item_func_rand::val_real() { DBUG_ASSERT(fixed == 1); return my_rnd(rand); @@ -1131,16 +2099,16 @@ double Item_func_rand::val() longlong Item_func_sign::val_int() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); null_value=args[0]->null_value; return value < 0.0 ? -1 : (value > 0 ? 1 : 0); } -double Item_func_units::val() +double Item_func_units::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0; return value*mul+add; @@ -1149,23 +2117,26 @@ double Item_func_units::val() void Item_func_min_max::fix_length_and_dec() { + int max_int_part=0; decimals=0; max_length=0; - maybe_null=1; + maybe_null=0; cmp_type=args[0]->result_type(); for (uint i=0 ; i < arg_count ; i++) { - if (max_length < args[i]->max_length) - max_length=args[i]->max_length; - if (decimals < args[i]->decimals) - decimals=args[i]->decimals; - if (!args[i]->maybe_null) - maybe_null=0; + set_if_bigger(max_length, args[i]->max_length); + set_if_bigger(decimals, args[i]->decimals); + set_if_bigger(max_int_part, args[i]->decimal_int_part()); + if (args[i]->maybe_null) + maybe_null=1; cmp_type=item_cmp_type(cmp_type,args[i]->result_type()); } if (cmp_type == STRING_RESULT) - agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV); + agg_arg_charsets(collation, args, arg_count, MY_COLL_CMP_CONV, 1); + else if ((cmp_type == DECIMAL_RESULT) || (cmp_type == INT_RESULT)) + max_length= my_decimal_precision_to_length(max_int_part+decimals, decimals, + unsigned_flag); } @@ -1184,9 +2155,17 @@ String *Item_func_min_max::val_str(String *str) str->set((ulonglong) nr,&my_charset_bin); return str; } + case DECIMAL_RESULT: + { + my_decimal dec_buf, *dec_val= val_decimal(&dec_buf); + if (null_value) + return 0; + my_decimal2string(E_DEC_FATAL_ERROR, dec_val, 0, 0, 0, str); + return str; + } case REAL_RESULT: { - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals,&my_charset_bin); @@ -1196,14 +2175,10 @@ String *Item_func_min_max::val_str(String *str) { String *res; LINT_INIT(res); - null_value=1; for (uint i=0; i < arg_count ; i++) { - if (null_value) - { + if (i == 0) res=args[i]->val_str(str); - null_value=args[i]->null_value; - } else { String *res2; @@ -1215,14 +2190,15 @@ String *Item_func_min_max::val_str(String *str) res=res2; } } + if ((null_value= args[i]->null_value)) + return 0; } - if (res) // If !NULL - res->set_charset(collation.collation); + res->set_charset(collation.collation); return res; } case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); return 0; } @@ -1230,24 +2206,22 @@ String *Item_func_min_max::val_str(String *str) } -double Item_func_min_max::val() +double Item_func_min_max::val_real() { DBUG_ASSERT(fixed == 1); double value=0.0; - null_value=1; for (uint i=0; i < arg_count ; i++) { - if (null_value) - { - value=args[i]->val(); - null_value=args[i]->null_value; - } + if (i == 0) + value= args[i]->val_real(); else { - double tmp=args[i]->val(); + double tmp= args[i]->val_real(); if (!args[i]->null_value && (tmp < value ? cmp_sign : -cmp_sign) > 0) value=tmp; } + if ((null_value= args[i]->null_value)) + break; } return value; } @@ -1257,24 +2231,58 @@ longlong Item_func_min_max::val_int() { DBUG_ASSERT(fixed == 1); longlong value=0; - null_value=1; for (uint i=0; i < arg_count ; i++) { - if (null_value) - { + if (i == 0) value=args[i]->val_int(); - null_value=args[i]->null_value; - } else { longlong tmp=args[i]->val_int(); if (!args[i]->null_value && (tmp < value ? cmp_sign : -cmp_sign) > 0) value=tmp; } + if ((null_value= args[i]->null_value)) + break; } return value; } + +my_decimal *Item_func_min_max::val_decimal(my_decimal *dec) +{ + DBUG_ASSERT(fixed == 1); + my_decimal tmp_buf, *tmp, *res; + LINT_INIT(res); + + for (uint i=0; i < arg_count ; i++) + { + if (i == 0) + res= args[i]->val_decimal(dec); + else + { + tmp= args[i]->val_decimal(&tmp_buf); // Zero if NULL + if (tmp && (my_decimal_cmp(tmp, res) * cmp_sign) < 0) + { + if (tmp == &tmp_buf) + { + /* Move value out of tmp_buf as this will be reused on next loop */ + my_decimal2decimal(tmp, dec); + res= dec; + } + else + res= tmp; + } + } + if ((null_value= args[i]->null_value)) + { + res= 0; + break; + } + } + return res; +} + + longlong Item_func_length::val_int() { DBUG_ASSERT(fixed == 1); @@ -1313,8 +2321,9 @@ longlong Item_func_coercibility::val_int() void Item_func_locate::fix_length_and_dec() { - maybe_null=0; max_length=11; - agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV); + maybe_null= 0; + max_length= MY_INT32_NUM_DECIMAL_DIGITS; + agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1); } @@ -1329,34 +2338,41 @@ longlong Item_func_locate::val_int() return 0; /* purecov: inspected */ } null_value=0; - uint start=0; - uint start0=0; + /* must be longlong to avoid truncation */ + longlong start= 0; + longlong start0= 0; my_match_t match; if (arg_count == 3) { - start0= start =(uint) args[2]->val_int()-1; - start=a->charpos(start); - - if (start > a->length() || start+b->length() > a->length()) + start0= start= args[2]->val_int() - 1; + + if ((start < 0) || (start > a->length())) + return 0; + + /* start is now sufficiently valid to pass to charpos function */ + start= a->charpos((int) start); + + if (start + b->length() > a->length()) return 0; } if (!b->length()) // Found empty string at start - return (longlong) (start+1); + return start + 1; if (!cmp_collation.collation->coll->instr(cmp_collation.collation, - a->ptr()+start, a->length()-start, + a->ptr()+start, + (uint) (a->length()-start), b->ptr(), b->length(), &match, 1)) return 0; - return (longlong) match.mblen + start0 + 1; + return (longlong) match.mb_len + start0 + 1; } void Item_func_locate::print(String *str) { - str->append("locate(", 7); + str->append(STRING_WITH_LEN("locate(")); args[1]->print(str); str->append(','); args[0]->print(str); @@ -1376,8 +2392,8 @@ longlong Item_func_field::val_int() if (cmp_type == STRING_RESULT) { String *field; - if (!(field=args[0]->val_str(&value))) - return 0; // -1 if null ? + if (!(field= args[0]->val_str(&value))) + return 0; for (uint i=1 ; i < arg_count ; i++) { String *tmp_value=args[i]->val_str(&tmp); @@ -1396,14 +2412,27 @@ longlong Item_func_field::val_int() return (longlong) (i); } } + else if (cmp_type == DECIMAL_RESULT) + { + my_decimal dec_arg_buf, *dec_arg, + dec_buf, *dec= args[0]->val_decimal(&dec_buf); + if (args[0]->null_value) + return 0; + for (uint i=1; i < arg_count; i++) + { + dec_arg= args[i]->val_decimal(&dec_arg_buf); + if (!args[i]->null_value && !my_decimal_cmp(dec_arg, dec)) + return (longlong) (i); + } + } else { - double val= args[0]->val(); + double val= args[0]->val_real(); if (args[0]->null_value) return 0; for (uint i=1; i < arg_count ; i++) { - if (val == args[i]->val() && !args[i]->null_value) + if (val == args[i]->val_real() && !args[i]->null_value) return (longlong) (i); } } @@ -1418,7 +2447,7 @@ void Item_func_field::fix_length_and_dec() for (uint i=1; i < arg_count ; i++) cmp_type= item_cmp_type(cmp_type, args[i]->result_type()); if (cmp_type == STRING_RESULT) - agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV); + agg_arg_charsets(cmp_collation, args, arg_count, MY_COLL_CMP_CONV, 1); } @@ -1485,7 +2514,7 @@ void Item_func_find_in_set::fix_length_and_dec() } } } - agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV); + agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV, 1); } static const char separator=','; @@ -1547,12 +2576,12 @@ longlong Item_func_find_in_set::val_int() } str_end= substr_end; } - else if (str_end - str_begin == 0 && - find_str_len == 0 && + else if (str_end - str_begin == 0 && + find_str_len == 0 && wc == (my_wc_t) separator) return (longlong) ++position; else - return (longlong) 0; + return LL(0); } } return 0; @@ -1576,13 +2605,6 @@ longlong Item_func_bit_count::val_int() #ifdef HAVE_DLOPEN -udf_handler::~udf_handler() -{ - /* Everything should be properly cleaned up by this moment. */ - DBUG_ASSERT(not_original || !(initialized || buffers)); -} - - void udf_handler::cleanup() { if (!not_original) @@ -1591,8 +2613,7 @@ void udf_handler::cleanup() { if (u_d->func_deinit != NULL) { - void (*deinit)(UDF_INIT *) = (void (*)(UDF_INIT*)) - u_d->func_deinit; + Udf_func_deinit deinit= u_d->func_deinit; (*deinit)(&initid); } free_udf(u_d); @@ -1606,7 +2627,7 @@ void udf_handler::cleanup() bool -udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, +udf_handler::fix_fields(THD *thd, Item_result_field *func, uint arg_count, Item **arguments) { #ifndef EMBEDDED_LIBRARY // Avoid compiler warning @@ -1614,16 +2635,15 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, #endif DBUG_ENTER("Item_udf_func::fix_fields"); - if (check_stack_overrun(thd, buff)) - DBUG_RETURN(1); // Fatal error flag is set! + if (check_stack_overrun(thd, STACK_MIN_SIZE, buff)) + DBUG_RETURN(TRUE); // Fatal error flag is set! udf_func *tmp_udf=find_udf(u_d->name.str,(uint) u_d->name.length,1); if (!tmp_udf) { - my_printf_error(ER_CANT_FIND_UDF,ER(ER_CANT_FIND_UDF),MYF(0),u_d->name.str, - errno); - DBUG_RETURN(1); + my_error(ER_CANT_FIND_UDF, MYF(0), u_d->name.str, errno); + DBUG_RETURN(TRUE); } u_d=tmp_udf; args=arguments; @@ -1640,7 +2660,7 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, { free_udf(u_d); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } uint i; Item **arg,**arg_end; @@ -1648,13 +2668,13 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, arg != arg_end ; arg++,i++) { - if (!(*arg)->fixed && - (*arg)->fix_fields(thd, tables, arg)) + if (!(*arg)->fixed && + (*arg)->fix_fields(thd, arg)) DBUG_RETURN(1); // we can't assign 'item' before, because fix_fields() can change arg Item *item= *arg; if (item->check_cols(1)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); /* TODO: We should think about this. It is not always right way just to set an UDF result to return my_charset_bin @@ -1664,7 +2684,7 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, Moreover, some arguments can represent a numeric input which doesn't effect the result character set and collation. There is no a general rule for UDF. Everything depends on - the particular user definted function. + the particular user defined function. */ if (item->collation.collation->state & MY_CS_BINSORT) func->collation.set(&my_charset_bin); @@ -1675,14 +2695,19 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, const_item_cache&=item->const_item(); f_args.arg_type[i]=item->result_type(); } + //TODO: why all following memory is not allocated with 1 call of sql_alloc? if (!(buffers=new String[arg_count]) || !(f_args.args= (char**) sql_alloc(arg_count * sizeof(char *))) || - !(f_args.lengths=(ulong*) sql_alloc(arg_count * sizeof(long))) || - !(f_args.maybe_null=(char*) sql_alloc(arg_count * sizeof(char))) || - !(num_buffer= (char*) sql_alloc(ALIGN_SIZE(sizeof(double))*arg_count))) + !(f_args.lengths= (ulong*) sql_alloc(arg_count * sizeof(long))) || + !(f_args.maybe_null= (char*) sql_alloc(arg_count * sizeof(char))) || + !(num_buffer= (char*) sql_alloc(arg_count * + ALIGN_SIZE(sizeof(double)))) || + !(f_args.attributes= (char**) sql_alloc(arg_count * sizeof(char *))) || + !(f_args.attribute_lengths= (ulong*) sql_alloc(arg_count * + sizeof(long)))) { free_udf(u_d); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } func->fix_length_and_dec(); @@ -1697,49 +2722,60 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, char *to=num_buffer; for (uint i=0; i < arg_count; i++) { - f_args.args[i]=0; - f_args.lengths[i]=arguments[i]->max_length; - f_args.maybe_null[i]=(char) arguments[i]->maybe_null; + /* + For a constant argument i, args->args[i] points to the argument value. + For non-constant, args->args[i] is NULL. + */ + f_args.args[i]= NULL; /* Non-const unless updated below. */ + + f_args.lengths[i]= arguments[i]->max_length; + f_args.maybe_null[i]= (char) arguments[i]->maybe_null; + f_args.attributes[i]= arguments[i]->name; + f_args.attribute_lengths[i]= arguments[i]->name_length; - switch(arguments[i]->type()) { - case Item::STRING_ITEM: // Constant string ! + if (arguments[i]->const_item()) { - String *res=arguments[i]->val_str((String *) 0); - if (arguments[i]->null_value) - continue; - f_args.args[i]= (char*) res->ptr(); - break; - } - case Item::INT_ITEM: - *((longlong*) to) = arguments[i]->val_int(); - if (!arguments[i]->null_value) - { - f_args.args[i]=to; - to+= ALIGN_SIZE(sizeof(longlong)); - } - break; - case Item::REAL_ITEM: - *((double*) to) = arguments[i]->val(); - if (!arguments[i]->null_value) - { - f_args.args[i]=to; - to+= ALIGN_SIZE(sizeof(double)); - } - break; - default: // Skip these - break; + switch (arguments[i]->result_type()) + { + case STRING_RESULT: + case DECIMAL_RESULT: + { + String *res= arguments[i]->val_str(&buffers[i]); + if (arguments[i]->null_value) + continue; + f_args.args[i]= (char*) res->ptr(); + break; + } + case INT_RESULT: + *((longlong*) to)= arguments[i]->val_int(); + if (arguments[i]->null_value) + continue; + f_args.args[i]= to; + to+= ALIGN_SIZE(sizeof(longlong)); + break; + case REAL_RESULT: + *((double*) to)= arguments[i]->val_real(); + if (arguments[i]->null_value) + continue; + f_args.args[i]= to; + to+= ALIGN_SIZE(sizeof(double)); + break; + case ROW_RESULT: + default: + // This case should never be chosen + DBUG_ASSERT(0); + break; + } } } thd->net.last_error[0]=0; - my_bool (*init)(UDF_INIT *, UDF_ARGS *, char *)= - (my_bool (*)(UDF_INIT *, UDF_ARGS *, char *)) - u_d->func_init; + Udf_func_init init= u_d->func_init; if ((error=(uchar) init(&initid, &f_args, thd->net.last_error))) { - my_printf_error(ER_CANT_INITIALIZE_UDF,ER(ER_CANT_INITIALIZE_UDF),MYF(0), - u_d->name.str, thd->net.last_error); + my_error(ER_CANT_INITIALIZE_UDF, MYF(0), + u_d->name.str, thd->net.last_error); free_udf(u_d); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } func->max_length=min(initid.max_length,MAX_BLOB_WIDTH); func->maybe_null=initid.maybe_null; @@ -1749,11 +2785,11 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, initialized=1; if (error) { - my_printf_error(ER_CANT_INITIALIZE_UDF,ER(ER_CANT_INITIALIZE_UDF),MYF(0), - u_d->name.str, ER(ER_UNKNOWN_ERROR)); - DBUG_RETURN(1); + my_error(ER_CANT_INITIALIZE_UDF, MYF(0), + u_d->name.str, ER(ER_UNKNOWN_ERROR)); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -1768,6 +2804,7 @@ bool udf_handler::get_arguments() f_args.args[i]=0; switch (f_args.arg_type[i]) { case STRING_RESULT: + case DECIMAL_RESULT: { String *res=args[i]->val_str(&buffers[str_count++]); if (!(args[i]->null_value)) @@ -1786,7 +2823,7 @@ bool udf_handler::get_arguments() } break; case REAL_RESULT: - *((double*) to) = args[i]->val(); + *((double*) to)= args[i]->val_real(); if (!args[i]->null_value) { f_args.args[i]=to; @@ -1795,7 +2832,7 @@ bool udf_handler::get_arguments() break; case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); break; } @@ -1809,9 +2846,10 @@ String *udf_handler::val_str(String *str,String *save_str) { uchar is_null_tmp=0; ulong res_length; + DBUG_ENTER("udf_handler::val_str"); if (get_arguments()) - return 0; + DBUG_RETURN(0); char * (*func)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *)= (char* (*)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *)) u_d->func; @@ -1821,22 +2859,56 @@ String *udf_handler::val_str(String *str,String *save_str) if (str->alloc(MAX_FIELD_WIDTH)) { error=1; - return 0; + DBUG_RETURN(0); } } char *res=func(&initid, &f_args, (char*) str->ptr(), &res_length, &is_null_tmp, &error); + DBUG_PRINT("info", ("udf func returned, res_length: %lu", res_length)); if (is_null_tmp || !res || error) // The !res is for safety { - return 0; + DBUG_PRINT("info", ("Null or error")); + DBUG_RETURN(0); } if (res == str->ptr()) { str->length(res_length); - return str; + DBUG_PRINT("exit", ("str: %s", str->ptr())); + DBUG_RETURN(str); } save_str->set(res, res_length, str->charset()); - return save_str; + DBUG_PRINT("exit", ("save_str: %s", save_str->ptr())); + DBUG_RETURN(save_str); +} + + +/* + For the moment, UDF functions are returning DECIMAL values as strings +*/ + +my_decimal *udf_handler::val_decimal(my_bool *null_value, my_decimal *dec_buf) +{ + char buf[DECIMAL_MAX_STR_LENGTH+1], *end; + ulong res_length= DECIMAL_MAX_STR_LENGTH; + + if (get_arguments()) + { + *null_value=1; + return 0; + } + char *(*func)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *)= + (char* (*)(UDF_INIT *, UDF_ARGS *, char *, ulong *, uchar *, uchar *)) + u_d->func; + + char *res= func(&initid, &f_args, buf, &res_length, &is_null, &error); + if (is_null || error) + { + *null_value= 1; + return 0; + } + end= res+ res_length; + str2my_decimal(E_DEC_FATAL_ERROR, res, dec_buf, &end); + return dec_buf; } @@ -1847,7 +2919,21 @@ void Item_udf_func::cleanup() } -double Item_func_udf_float::val() +void Item_udf_func::print(String *str) +{ + str->append(func_name()); + str->append('('); + for (uint i=0 ; i < arg_count ; i++) + { + if (i != 0) + str->append(','); + args[i]->print_item_w_name(str); + } + str->append(')'); +} + + +double Item_func_udf_float::val_real() { DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_udf_float::val"); @@ -1860,7 +2946,7 @@ double Item_func_udf_float::val() String *Item_func_udf_float::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals,&my_charset_bin); @@ -1872,9 +2958,6 @@ longlong Item_func_udf_int::val_int() { DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_udf_int::val_int"); - DBUG_PRINT("info",("result_type: %d arg_count: %d", - args[0]->result_type(), arg_count)); - DBUG_RETURN(udf.val_int(&null_value)); } @@ -1892,6 +2975,59 @@ String *Item_func_udf_int::val_str(String *str) return str; } + +longlong Item_func_udf_decimal::val_int() +{ + my_decimal dec_buf, *dec= udf.val_decimal(&null_value, &dec_buf); + longlong result; + if (null_value) + return 0; + my_decimal2int(E_DEC_FATAL_ERROR, dec, unsigned_flag, &result); + return result; +} + + +double Item_func_udf_decimal::val_real() +{ + my_decimal dec_buf, *dec= udf.val_decimal(&null_value, &dec_buf); + double result; + if (null_value) + return 0.0; + my_decimal2double(E_DEC_FATAL_ERROR, dec, &result); + return result; +} + + +my_decimal *Item_func_udf_decimal::val_decimal(my_decimal *dec_buf) +{ + DBUG_ASSERT(fixed == 1); + DBUG_ENTER("Item_func_udf_decimal::val_decimal"); + DBUG_PRINT("info",("result_type: %d arg_count: %d", + args[0]->result_type(), arg_count)); + + DBUG_RETURN(udf.val_decimal(&null_value, dec_buf)); +} + + +String *Item_func_udf_decimal::val_str(String *str) +{ + my_decimal dec_buf, *dec= udf.val_decimal(&null_value, &dec_buf); + if (null_value) + return 0; + if (str->length() < DECIMAL_MAX_STR_LENGTH) + str->length(DECIMAL_MAX_STR_LENGTH); + my_decimal_round(E_DEC_FATAL_ERROR, dec, decimals, FALSE, &dec_buf); + my_decimal2string(E_DEC_FATAL_ERROR, &dec_buf, 0, 0, '0', str); + return str; +} + + +void Item_func_udf_decimal::fix_length_and_dec() +{ + fix_num_length_and_dec(); +} + + /* Default max_length is max argument length */ void Item_func_udf_str::fix_length_and_dec() @@ -1911,6 +3047,18 @@ String *Item_func_udf_str::val_str(String *str) return res; } + +/* + This has to come last in the udf_handler methods, or C for AIX + version 6.0.0.0 fails to compile with debugging enabled. (Yes, really.) + */ + +udf_handler::~udf_handler() +{ + /* Everything should be properly cleaned up by this moment. */ + DBUG_ASSERT(not_original || !(initialized || buffers)); +} + #else bool udf_handler::get_arguments() { return 0; } #endif /* HAVE_DLOPEN */ @@ -1995,18 +3143,6 @@ void item_user_lock_release(User_level_lock *ull) { ull->locked=0; ull->thread_id= 0; - if (mysql_bin_log.is_open()) - { - char buf[256]; - const char *command="DO RELEASE_LOCK(\""; - String tmp(buf,sizeof(buf), system_charset_info); - tmp.copy(command, strlen(command), tmp.charset()); - tmp.append(ull->key,ull->key_length); - tmp.append("\")", 2); - Query_log_event qev(current_thd, tmp.ptr(), tmp.length(),0, FALSE); - qev.error_code=0; // this query is always safe to run on slave - mysql_bin_log.write(&qev); - } if (--ull->count) pthread_cond_signal(&ull->cond); else @@ -2031,9 +3167,9 @@ longlong Item_master_pos_wait::val_int() null_value = 1; return 0; } +#ifdef HAVE_REPLICATION longlong pos = (ulong)args[1]->val_int(); longlong timeout = (arg_count==3) ? args[2]->val_int() : 0 ; -#ifdef HAVE_REPLICATION if ((event_count = active_mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2) { null_value = 1; @@ -2049,7 +3185,7 @@ void debug_sync_point(const char* lock_name, uint lock_timeout) THD* thd=current_thd; User_level_lock* ull; struct timespec abstime; - int lock_name_len,error=0; + int lock_name_len; lock_name_len=strlen(lock_name); pthread_mutex_lock(&LOCK_user_locks); @@ -2082,9 +3218,13 @@ void debug_sync_point(const char* lock_name, uint lock_timeout) thd->mysys_var->current_cond= &ull->cond; set_timespec(abstime,lock_timeout); - while (!thd->killed && - (error=pthread_cond_timedwait(&ull->cond,&LOCK_user_locks,&abstime)) - != ETIME && error != ETIMEDOUT && ull->locked) ; + while (ull->locked && !thd->killed) + { + int error= pthread_cond_timedwait(&ull->cond, &LOCK_user_locks, &abstime); + if (error == ETIMEDOUT || error == ETIME) + break; + } + if (ull->locked) { if (!--ull->count) @@ -2128,7 +3268,17 @@ longlong Item_func_get_lock::val_int() struct timespec abstime; THD *thd=current_thd; User_level_lock *ull; - int error=0; + int error; + + /* + In slave thread no need to get locks, everything is serialized. Anyway + there is no way to make GET_LOCK() work on slave like it did on master + (i.e. make it return exactly the same value) because we don't have the + same other concurrent threads environment. No matter what we return here, + it's not guaranteed to be same as on master. + */ + if (thd->slave_thread) + return 1; pthread_mutex_lock(&LOCK_user_locks); @@ -2174,22 +3324,29 @@ longlong Item_func_get_lock::val_int() thd->mysys_var->current_cond= &ull->cond; set_timespec(abstime,timeout); - while (!thd->killed && - (error=pthread_cond_timedwait(&ull->cond,&LOCK_user_locks,&abstime)) - != ETIME && error != ETIMEDOUT && error != EINVAL && ull->locked) ; - if (thd->killed) - error=EINTR; // Return NULL + error= 0; + while (ull->locked && !thd->killed) + { + error= pthread_cond_timedwait(&ull->cond,&LOCK_user_locks,&abstime); + if (error == ETIMEDOUT || error == ETIME) + break; + error= 0; + } + if (ull->locked) { if (!--ull->count) + { + DBUG_ASSERT(0); delete ull; // Should never happen - if (error != ETIME && error != ETIMEDOUT) + } + if (!error) // Killed (thd->killed != 0) { error=1; null_value=1; // Return NULL } } - else + else // We got the lock { ull->locked=1; ull->thread=thd->real_id; @@ -2256,21 +3413,26 @@ longlong Item_func_release_lock::val_int() } -bool Item_func_last_insert_id::fix_fields(THD *thd, TABLE_LIST *tables, - Item **ref) +bool Item_func_last_insert_id::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); - if (Item_int_func::fix_fields(thd, tables, ref)) + if (Item_int_func::fix_fields(thd, ref)) return TRUE; if (arg_count == 0) { - /* - As this statement calls LAST_INSERT_ID(), set - THD::last_insert_id_used. - */ - thd->last_insert_id_used= TRUE; + if (!thd->last_insert_id_used) + { + /* + As this statement calls LAST_INSERT_ID(), set + THD::last_insert_id_used and remember first generated insert + id of the previous statement in THD::current_insert_id. + */ + thd->last_insert_id_used= TRUE; + thd->last_insert_id_used_bin_log= TRUE; + thd->current_insert_id= thd->last_insert_id; + } null_value= FALSE; } @@ -2282,13 +3444,13 @@ bool Item_func_last_insert_id::fix_fields(THD *thd, TABLE_LIST *tables, longlong Item_func_last_insert_id::val_int() { + THD *thd= current_thd; DBUG_ASSERT(fixed == 1); - THD* thd= current_thd; if (arg_count) { - longlong value=args[0]->val_int(); + longlong value= args[0]->val_int(); thd->insert_id(value); - null_value=args[0]->null_value; + null_value= args[0]->null_value; return value; } @@ -2303,13 +3465,14 @@ longlong Item_func_benchmark::val_int() DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; String tmp(buff,sizeof(buff), &my_charset_bin); + my_decimal tmp_decimal; THD *thd=current_thd; for (ulong loop=0 ; loop < loop_count && !thd->killed; loop++) { switch (args[0]->result_type()) { case REAL_RESULT: - (void) args[0]->val(); + (void) args[0]->val_real(); break; case INT_RESULT: (void) args[0]->val_int(); @@ -2317,9 +3480,12 @@ longlong Item_func_benchmark::val_int() case STRING_RESULT: (void) args[0]->val_str(&tmp); break; + case DECIMAL_RESULT: + (void) args[0]->val_decimal(&tmp_decimal); + break; case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); return 0; } @@ -2330,7 +3496,7 @@ longlong Item_func_benchmark::val_int() void Item_func_benchmark::print(String *str) { - str->append("benchmark(", 10); + str->append(STRING_WITH_LEN("benchmark(")); char buffer[20]; // my_charset_bin is good enough for numbers String st(buffer, sizeof(buffer), &my_charset_bin); @@ -2341,6 +3507,48 @@ void Item_func_benchmark::print(String *str) str->append(')'); } + +/* This function is just used to create tests with time gaps */ + +longlong Item_func_sleep::val_int() +{ + THD *thd= current_thd; + struct timespec abstime; + pthread_cond_t cond; + int error; + + DBUG_ASSERT(fixed == 1); + + double time= args[0]->val_real(); + set_timespec_nsec(abstime, (ulonglong)(time * ULL(1000000000))); + + pthread_cond_init(&cond, NULL); + pthread_mutex_lock(&LOCK_user_locks); + + thd->mysys_var->current_mutex= &LOCK_user_locks; + thd->mysys_var->current_cond= &cond; + + error= 0; + while (!thd->killed) + { + error= pthread_cond_timedwait(&cond, &LOCK_user_locks, &abstime); + if (error == ETIMEDOUT || error == ETIME) + break; + error= 0; + } + + pthread_mutex_lock(&thd->mysys_var->mutex); + thd->mysys_var->current_mutex= 0; + thd->mysys_var->current_cond= 0; + pthread_mutex_unlock(&thd->mysys_var->mutex); + + pthread_mutex_unlock(&LOCK_user_locks); + pthread_cond_destroy(&cond); + + return test(!error); // Return 1 killed +} + + #define extra_size sizeof(double) static user_var_entry *get_variable(HASH *hash, LEX_STRING &name, @@ -2364,6 +3572,7 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name, entry->length=0; entry->update_query_id=0; entry->collation.set(NULL, DERIVATION_IMPLICIT); + entry->unsigned_flag= 0; /* If we are here, we were called from a SET or a query which sets a variable. Imagine it is this: @@ -2391,14 +3600,13 @@ static user_var_entry *get_variable(HASH *hash, LEX_STRING &name, SELECT @a:= ). */ -bool Item_func_set_user_var::fix_fields(THD *thd, TABLE_LIST *tables, - Item **ref) +bool Item_func_set_user_var::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); /* fix_fields will call Item_func_set_user_var::fix_length_and_dec */ - if (Item_func::fix_fields(thd, tables, ref) || + if (Item_func::fix_fields(thd, ref) || !(entry= get_variable(&thd->user_vars, name, 1))) - return 1; + return TRUE; /* Remember the last query which updated it, this way a query can later know if this variable is a constant item in the query (it is if update_query_id @@ -2420,11 +3628,12 @@ bool Item_func_set_user_var::fix_fields(THD *thd, TABLE_LIST *tables, from the argument if the argument is NULL and the variable has previously been initialized. */ - if (!entry->collation.collation || !args[0]->null_value) + null_item= (args[0]->type() == NULL_ITEM); + if (!entry->collation.collation || !null_item) entry->collation.set(args[0]->collation.collation, DERIVATION_IMPLICIT); collation.set(entry->collation.collation, DERIVATION_IMPLICIT); cached_result_type= args[0]->result_type(); - return 0; + return FALSE; } @@ -2438,18 +3647,36 @@ Item_func_set_user_var::fix_length_and_dec() } -bool Item_func_set_user_var::update_hash(void *ptr, uint length, - Item_result type, - CHARSET_INFO *cs, - Derivation dv) +/* + Set value to user variable. + + SYNOPSYS + update_hash() + entry - pointer to structure representing variable + set_null - should we set NULL value ? + ptr - pointer to buffer with new value + length - length of new value + type - type of new value + cs - charset info for new value + dv - derivation for new value + unsigned_arg - indiates if a value of type INT_RESULT is unsigned + + RETURN VALUE + False - success, True - failure +*/ + +static bool +update_hash(user_var_entry *entry, bool set_null, void *ptr, uint length, + Item_result type, CHARSET_INFO *cs, Derivation dv, + bool unsigned_arg) { - if ((null_value=args[0]->null_value)) + if (set_null) { char *pos= (char*) entry+ ALIGN_SIZE(sizeof(user_var_entry)); if (entry->value && entry->value != pos) my_free(entry->value,MYF(0)); - entry->value=0; - entry->length=0; + entry->value= 0; + entry->length= 0; } else { @@ -2477,7 +3704,7 @@ bool Item_func_set_user_var::update_hash(void *ptr, uint length, entry->value= (char*) my_realloc(entry->value, length, MYF(MY_ALLOW_ZERO_PTR | MY_WME)); if (!entry->value) - goto err; + return 1; } } if (type == STRING_RESULT) @@ -2486,22 +3713,43 @@ bool Item_func_set_user_var::update_hash(void *ptr, uint length, entry->value[length]= 0; // Store end \0 } memcpy(entry->value,ptr,length); + if (type == DECIMAL_RESULT) + ((my_decimal*)entry->value)->fix_buffer_pointer(); entry->length= length; - entry->type=type; entry->collation.set(cs, dv); + entry->unsigned_flag= unsigned_arg; } + entry->type=type; return 0; +} - err: - current_thd->fatal_error(); // Probably end of memory - null_value= 1; - return 1; + +bool +Item_func_set_user_var::update_hash(void *ptr, uint length, + Item_result res_type, + CHARSET_INFO *cs, Derivation dv, + bool unsigned_arg) +{ + /* + If we set a variable explicitely to NULL then keep the old + result type of the variable + */ + if ((null_value= args[0]->null_value) && null_item) + res_type= entry->type; // Don't change type of item + if (::update_hash(entry, (null_value= args[0]->null_value), + ptr, length, res_type, cs, dv, unsigned_arg)) + { + current_thd->fatal_error(); // Probably end of memory + null_value= 1; + return 1; + } + return 0; } /* Get the value of a variable as a double */ -double user_var_entry::val(my_bool *null_value) +double user_var_entry::val_real(my_bool *null_value) { if ((*null_value= (value == 0))) return 0.0; @@ -2511,6 +3759,12 @@ double user_var_entry::val(my_bool *null_value) return *(double*) value; case INT_RESULT: return (double) *(longlong*) value; + case DECIMAL_RESULT: + { + double result; + my_decimal2double(E_DEC_FATAL_ERROR, (my_decimal *)value, &result); + return result; + } case STRING_RESULT: return my_atof(value); // This is null terminated case ROW_RESULT: @@ -2533,6 +3787,12 @@ longlong user_var_entry::val_int(my_bool *null_value) return (longlong) *(double*) value; case INT_RESULT: return *(longlong*) value; + case DECIMAL_RESULT: + { + longlong result; + my_decimal2int(E_DEC_FATAL_ERROR, (my_decimal *)value, 0, &result); + return result; + } case STRING_RESULT: { int error; @@ -2559,7 +3819,13 @@ String *user_var_entry::val_str(my_bool *null_value, String *str, str->set(*(double*) value, decimals, &my_charset_bin); break; case INT_RESULT: - str->set(*(longlong*) value, &my_charset_bin); + if (!unsigned_flag) + str->set(*(longlong*) value, &my_charset_bin); + else + str->set(*(ulonglong*) value, &my_charset_bin); + break; + case DECIMAL_RESULT: + my_decimal2string(E_DEC_FATAL_ERROR, (my_decimal *)value, 0, 0, 0, str); break; case STRING_RESULT: if (str->copy(value, length, collation.collation)) @@ -2571,49 +3837,90 @@ String *user_var_entry::val_str(my_bool *null_value, String *str, return(str); } +/* Get the value of a variable as a decimal */ + +my_decimal *user_var_entry::val_decimal(my_bool *null_value, my_decimal *val) +{ + if ((*null_value= (value == 0))) + return 0; + + switch (type) { + case REAL_RESULT: + double2my_decimal(E_DEC_FATAL_ERROR, *(double*) value, val); + break; + case INT_RESULT: + int2my_decimal(E_DEC_FATAL_ERROR, *(longlong*) value, 0, val); + break; + case DECIMAL_RESULT: + val= (my_decimal *)value; + break; + case STRING_RESULT: + str2my_decimal(E_DEC_FATAL_ERROR, value, length, collation.collation, val); + break; + case ROW_RESULT: + DBUG_ASSERT(1); // Impossible + break; + } + return(val); +} + /* This functions is invoked on SET @variable or @variable:= expression. - Evaluete (and check expression), store results. + Evaluate (and check expression), store results. SYNOPSYS Item_func_set_user_var::check() NOTES - For now it always return OK. All problem with value evalueting - will be catched by thd->net.report_error check in sql_set_variables(). + For now it always return OK. All problem with value evaluating + will be caught by thd->net.report_error check in sql_set_variables(). RETURN - 0 - OK. + FALSE OK. */ bool -Item_func_set_user_var::check() +Item_func_set_user_var::check(bool use_result_field) { DBUG_ENTER("Item_func_set_user_var::check"); + if (use_result_field) + DBUG_ASSERT(result_field); switch (cached_result_type) { case REAL_RESULT: { - save_result.vreal= args[0]->val(); + save_result.vreal= use_result_field ? result_field->val_real() : + args[0]->val_real(); break; } case INT_RESULT: { - save_result.vint= args[0]->val_int(); + save_result.vint= use_result_field ? result_field->val_int() : + args[0]->val_int(); + unsigned_flag= use_result_field ? ((Field_num*)result_field)->unsigned_flag: + args[0]->unsigned_flag; break; } case STRING_RESULT: { - save_result.vstr= args[0]->val_str(&value); + save_result.vstr= use_result_field ? result_field->val_str(&value) : + args[0]->val_str(&value); + break; + } + case DECIMAL_RESULT: + { + save_result.vdec= use_result_field ? + result_field->val_decimal(&decimal_buff) : + args[0]->val_decimal(&decimal_buff); break; } case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); break; } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -2628,7 +3935,7 @@ Item_func_set_user_var::check() the value method used by the user RETURN - 0 Ok + 0 OK 1 EOM Error */ @@ -2644,30 +3951,42 @@ Item_func_set_user_var::update() case REAL_RESULT: { res= update_hash((void*) &save_result.vreal,sizeof(save_result.vreal), - REAL_RESULT, &my_charset_bin, DERIVATION_IMPLICIT); + REAL_RESULT, &my_charset_bin, DERIVATION_IMPLICIT, 0); break; } case INT_RESULT: { res= update_hash((void*) &save_result.vint, sizeof(save_result.vint), - INT_RESULT, &my_charset_bin, DERIVATION_IMPLICIT); + INT_RESULT, &my_charset_bin, DERIVATION_IMPLICIT, + unsigned_flag); break; } case STRING_RESULT: { if (!save_result.vstr) // Null value res= update_hash((void*) 0, 0, STRING_RESULT, &my_charset_bin, - DERIVATION_IMPLICIT); + DERIVATION_IMPLICIT, 0); else res= update_hash((void*) save_result.vstr->ptr(), save_result.vstr->length(), STRING_RESULT, save_result.vstr->charset(), - DERIVATION_IMPLICIT); + DERIVATION_IMPLICIT, 0); + break; + } + case DECIMAL_RESULT: + { + if (!save_result.vdec) // Null value + res= update_hash((void*) 0, 0, DECIMAL_RESULT, &my_charset_bin, + DERIVATION_IMPLICIT, 0); + else + res= update_hash((void*) save_result.vdec, + sizeof(my_decimal), DECIMAL_RESULT, + &my_charset_bin, DERIVATION_IMPLICIT, 0); break; } case ROW_RESULT: default: - // This case should never be choosen + // This case should never be chosen DBUG_ASSERT(0); break; } @@ -2675,18 +3994,18 @@ Item_func_set_user_var::update() } -double Item_func_set_user_var::val() +double Item_func_set_user_var::val_real() { DBUG_ASSERT(fixed == 1); - check(); + check(0); update(); // Store expression - return entry->val(&null_value); + return entry->val_real(&null_value); } longlong Item_func_set_user_var::val_int() { DBUG_ASSERT(fixed == 1); - check(); + check(0); update(); // Store expression return entry->val_int(&null_value); } @@ -2694,22 +4013,163 @@ longlong Item_func_set_user_var::val_int() String *Item_func_set_user_var::val_str(String *str) { DBUG_ASSERT(fixed == 1); - check(); + check(0); update(); // Store expression return entry->val_str(&null_value, str, decimals); } +my_decimal *Item_func_set_user_var::val_decimal(my_decimal *val) +{ + DBUG_ASSERT(fixed == 1); + check(0); + update(); // Store expression + return entry->val_decimal(&null_value, val); +} + + void Item_func_set_user_var::print(String *str) { - str->append("(@", 2); + str->append(STRING_WITH_LEN("(@")); str->append(name.str, name.length); - str->append(":=", 2); + str->append(STRING_WITH_LEN(":=")); args[0]->print(str); str->append(')'); } +void Item_func_set_user_var::print_as_stmt(String *str) +{ + str->append(STRING_WITH_LEN("set @")); + str->append(name.str, name.length); + str->append(STRING_WITH_LEN(":=")); + args[0]->print(str); + str->append(')'); +} + +bool Item_func_set_user_var::send(Protocol *protocol, String *str_arg) +{ + if (result_field) + { + check(1); + update(); + return protocol->store(result_field); + } + return Item::send(protocol, str_arg); +} + +void Item_func_set_user_var::make_field(Send_field *tmp_field) +{ + if (result_field) + { + result_field->make_field(tmp_field); + DBUG_ASSERT(tmp_field->table_name != 0); + if (Item::name) + tmp_field->col_name=Item::name; // Use user supplied name + } + else + Item::make_field(tmp_field); +} + + +/* + Save the value of a user variable into a field + + SYNOPSIS + save_in_field() + field target field to save the value to + no_conversion flag indicating whether conversions are allowed + + DESCRIPTION + Save the function value into a field and update the user variable + accordingly. If a result field is defined and the target field doesn't + coincide with it then the value from the result field will be used as + the new value of the user variable. + + The reason to have this method rather than simply using the result + field in the val_xxx() methods is that the value from the result field + not always can be used when the result field is defined. + Let's consider the following cases: + 1) when filling a tmp table the result field is defined but the value of it + is undefined because it has to be produced yet. Thus we can't use it. + 2) on execution of an INSERT ... SELECT statement the save_in_field() + function will be called to fill the data in the new record. If the SELECT + part uses a tmp table then the result field is defined and should be + used in order to get the correct result. + + The difference between the SET_USER_VAR function and regular functions + like CONCAT is that the Item_func objects for the regular functions are + replaced by Item_field objects after the values of these functions have + been stored in a tmp table. Yet an object of the Item_field class cannot + be used to update a user variable. + Due to this we have to handle the result field in a special way here and + in the Item_func_set_user_var::send() function. + + RETURN VALUES + FALSE Ok + TRUE Error +*/ + +int Item_func_set_user_var::save_in_field(Field *field, bool no_conversions) +{ + bool use_result_field= (result_field && result_field != field); + int error; + + /* Update the value of the user variable */ + check(use_result_field); + update(); + + if (result_type() == STRING_RESULT || + result_type() == REAL_RESULT && + field->result_type() == STRING_RESULT) + { + String *result; + CHARSET_INFO *cs= collation.collation; + char buff[MAX_FIELD_WIDTH]; // Alloc buffer for small columns + str_value.set_quick(buff, sizeof(buff), cs); + result= entry->val_str(&null_value, &str_value, decimals); + + if (null_value) + { + str_value.set_quick(0, 0, cs); + return set_field_to_null_with_conversions(field, no_conversions); + } + + /* NOTE: If null_value == FALSE, "result" must be not NULL. */ + + field->set_notnull(); + error=field->store(result->ptr(),result->length(),cs); + str_value.set_quick(0, 0, cs); + } + else if (result_type() == REAL_RESULT) + { + double nr= entry->val_real(&null_value); + if (null_value) + return set_field_to_null(field); + field->set_notnull(); + error=field->store(nr); + } + else if (result_type() == DECIMAL_RESULT) + { + my_decimal decimal_value; + my_decimal *value= entry->val_decimal(&null_value, &decimal_value); + if (null_value) + return set_field_to_null(field); + field->set_notnull(); + error=field->store_decimal(value); + } + else + { + longlong nr= entry->val_int(&null_value); + if (null_value) + return set_field_to_null_with_conversions(field, no_conversions); + field->set_notnull(); + error=field->store(nr, unsigned_flag); + } + return error; +} + + String * Item_func_get_user_var::val_str(String *str) { @@ -2721,12 +4181,21 @@ Item_func_get_user_var::val_str(String *str) } -double Item_func_get_user_var::val() +double Item_func_get_user_var::val_real() { DBUG_ASSERT(fixed == 1); if (!var_entry) return 0.0; // No such variable - return (var_entry->val(&null_value)); + return (var_entry->val_real(&null_value)); +} + + +my_decimal *Item_func_get_user_var::val_decimal(my_decimal *dec) +{ + DBUG_ASSERT(fixed == 1); + if (!var_entry) + return 0; + return var_entry->val_decimal(&null_value, dec); } @@ -2754,21 +4223,28 @@ longlong Item_func_get_user_var::val_int() stores this variable and its value in thd->user_var_events, so that it can be written to the binlog (will be written just before the query is written, see log.cc). - + RETURN - 0 OK - 1 Failed to put appropiate record into binary log - + 0 OK + 1 Failed to put appropriate record into binary log + */ -int get_var_with_binlog(THD *thd, LEX_STRING &name, - user_var_entry **out_entry) +int get_var_with_binlog(THD *thd, enum_sql_command sql_command, + LEX_STRING &name, user_var_entry **out_entry) { BINLOG_USER_VAR_EVENT *user_var_event; user_var_entry *var_entry; var_entry= get_variable(&thd->user_vars, name, 0); - - if (!(opt_bin_log && is_update_query(thd->lex->sql_command))) + + /* + Any reference to user-defined variable which is done from stored + function or trigger affects their execution and the execution of the + calling statement. We must log all such variables even if they are + not involved in table-updating statements. + */ + if (!(opt_bin_log && + (is_update_query(sql_command) || thd->in_sub_stmt))) { *out_entry= var_entry; return 0; @@ -2781,8 +4257,8 @@ int get_var_with_binlog(THD *thd, LEX_STRING &name, that it gets into the binlog (if it didn't, the slave could be influenced by a variable of the same name previously set by another thread). - We create it like if it had been explicitely set with SET before. - The 'new' mimicks what sql_yacc.yy does when 'SET @a=10;'. + We create it like if it had been explicitly set with SET before. + The 'new' mimics what sql_yacc.yy does when 'SET @a=10;'. sql_set_variables() is what is called from 'case SQLCOM_SET_OPTION' in dispatch_command()). Instead of building a one-element list to pass to sql_set_variables(), we could instead manually call check() and update(); @@ -2809,7 +4285,8 @@ int get_var_with_binlog(THD *thd, LEX_STRING &name, if (!(var_entry= get_variable(&thd->user_vars, name, 0))) goto err; } - else if (var_entry->used_query_id == thd->query_id) + else if (var_entry->used_query_id == thd->query_id || + mysql_bin_log.is_query_in_union(thd, var_entry->used_query_id)) { /* If this variable was already stored in user_var_events by this query @@ -2823,13 +4300,19 @@ int get_var_with_binlog(THD *thd, LEX_STRING &name, uint size; /* First we need to store value of var_entry, when the next situation - appers: + appears: > set @a:=1; > insert into t1 values (@a), (@a:=@a+1), (@a:=@a+1); - We have to write to binlog value @a= 1; + We have to write to binlog value @a= 1. + + We allocate the user_var_event on user_var_events_alloc pool, not on + the this-statement-execution pool because in SPs user_var_event objects + may need to be valid after current [SP] statement execution pool is + destroyed. */ - size= ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT)) + var_entry->length; - if (!(user_var_event= (BINLOG_USER_VAR_EVENT *) thd->alloc(size))) + size= ALIGN_SIZE(sizeof(BINLOG_USER_VAR_EVENT)) + var_entry->length; + if (!(user_var_event= (BINLOG_USER_VAR_EVENT *) + alloc_root(thd->user_var_events_alloc, size))) goto err; user_var_event->value= (char*) user_var_event + @@ -2871,7 +4354,7 @@ void Item_func_get_user_var::fix_length_and_dec() decimals=NOT_FIXED_DEC; max_length=MAX_BLOB_WIDTH; - error= get_var_with_binlog(thd, name, &var_entry); + error= get_var_with_binlog(thd, thd->lex->sql_command, name, &var_entry); if (var_entry) { @@ -2879,13 +4362,21 @@ void Item_func_get_user_var::fix_length_and_dec() switch (var_entry->type) { case REAL_RESULT: max_length= DBL_DIG + 8; + break; case INT_RESULT: max_length= MAX_BIGINT_WIDTH; + decimals=0; break; case STRING_RESULT: max_length= MAX_BLOB_WIDTH; break; + case DECIMAL_RESULT: + max_length= DECIMAL_MAX_STR_LENGTH; + decimals= DECIMAL_MAX_SCALE; + break; case ROW_RESULT: // Keep compiler happy + default: + DBUG_ASSERT(0); break; } } @@ -2921,7 +4412,7 @@ enum Item_result Item_func_get_user_var::result_type() const void Item_func_get_user_var::print(String *str) { - str->append("(@", 2); + str->append(STRING_WITH_LEN("(@")); str->append(name.str,name.length); str->append(')'); } @@ -2934,7 +4425,7 @@ bool Item_func_get_user_var::eq(const Item *item, bool binary_cmp) const return 1; // Same item is same. /* Check if other type is also a get_user_var() object */ if (item->type() != FUNC_ITEM || - ((Item_func*) item)->func_name() != func_name()) + ((Item_func*) item)->functype() != functype()) return 0; Item_func_get_user_var *other=(Item_func_get_user_var*) item; return (name.length == other->name.length && @@ -2942,6 +4433,88 @@ bool Item_func_get_user_var::eq(const Item *item, bool binary_cmp) const } +bool Item_func_get_user_var::set_value(THD *thd, + sp_rcontext * /*ctx*/, Item **it) +{ + Item_func_set_user_var *suv= new Item_func_set_user_var(get_name(), *it); + /* + Item_func_set_user_var is not fixed after construction, call + fix_fields(). + */ + return (!suv || suv->fix_fields(thd, it) || suv->check(0) || suv->update()); +} + + +bool Item_user_var_as_out_param::fix_fields(THD *thd, Item **ref) +{ + DBUG_ASSERT(fixed == 0); + if (Item::fix_fields(thd, ref) || + !(entry= get_variable(&thd->user_vars, name, 1))) + return TRUE; + entry->type= STRING_RESULT; + /* + Let us set the same collation which is used for loading + of fields in LOAD DATA INFILE. + (Since Item_user_var_as_out_param is used only there). + */ + entry->collation.set(thd->variables.collation_database); + entry->update_query_id= thd->query_id; + return FALSE; +} + + +void Item_user_var_as_out_param::set_null_value(CHARSET_INFO* cs) +{ + if (::update_hash(entry, TRUE, 0, 0, STRING_RESULT, cs, + DERIVATION_IMPLICIT, 0 /* unsigned_arg */)) + current_thd->fatal_error(); // Probably end of memory +} + + +void Item_user_var_as_out_param::set_value(const char *str, uint length, + CHARSET_INFO* cs) +{ + if (::update_hash(entry, FALSE, (void*)str, length, STRING_RESULT, cs, + DERIVATION_IMPLICIT, 0 /* unsigned_arg */)) + current_thd->fatal_error(); // Probably end of memory +} + + +double Item_user_var_as_out_param::val_real() +{ + DBUG_ASSERT(0); + return 0.0; +} + + +longlong Item_user_var_as_out_param::val_int() +{ + DBUG_ASSERT(0); + return 0; +} + + +String* Item_user_var_as_out_param::val_str(String *str) +{ + DBUG_ASSERT(0); + return 0; +} + + +my_decimal* Item_user_var_as_out_param::val_decimal(my_decimal *decimal_buffer) +{ + DBUG_ASSERT(0); + return 0; +} + + +void Item_user_var_as_out_param::print(String *str) +{ + str->append('@'); + str->append(name.str,name.length); +} + + Item_func_get_system_var:: Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg, LEX_STRING *component_arg, const char *name_arg, @@ -2954,7 +4527,7 @@ Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg, bool -Item_func_get_system_var::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_func_get_system_var::fix_fields(THD *thd, Item **ref) { Item *item; DBUG_ENTER("Item_func_get_system_var::fix_fields"); @@ -3089,7 +4662,7 @@ void Item_func_match::init_search(bool no_order) } -bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) +bool Item_func_match::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); Item *item; @@ -3104,11 +4677,11 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) modifications to find_best and auto_close as complement to auto_init code above. */ - if (Item_func::fix_fields(thd, tlist, ref) || + if (Item_func::fix_fields(thd, ref) || !args[0]->const_during_execution()) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"AGAINST"); - return 1; + return TRUE; } const_item_cache=0; @@ -3131,7 +4704,7 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) if (key == NO_SUCH_KEY && !(flags & FT_BOOL)) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH"); - return 1; + return TRUE; } table=((Item_field *)item)->field->table; if (!(table->file->table_flags() & HA_CAN_FULLTEXT)) @@ -3140,7 +4713,8 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) return 1; } table->fulltext_searched=1; - return agg_arg_collations_for_comparison(cmp_collation, args+1, arg_count-1); + return agg_arg_collations_for_comparison(cmp_collation, + args+1, arg_count-1, 0); } bool Item_func_match::fix_index() @@ -3155,7 +4729,7 @@ bool Item_func_match::fix_index() if (!table) goto err; - for (keynr=0 ; keynr < table->keys ; keynr++) + for (keynr=0 ; keynr < table->s->keys ; keynr++) { if ((table->key_info[keynr].flags & HA_FULLTEXT) && (table->keys_in_use_for_query.is_set(keynr))) @@ -3221,14 +4795,16 @@ err: key=NO_SUCH_KEY; return 0; } - my_error(ER_FT_MATCHING_KEY_NOT_FOUND,MYF(0)); + my_message(ER_FT_MATCHING_KEY_NOT_FOUND, + ER(ER_FT_MATCHING_KEY_NOT_FOUND), MYF(0)); return 1; } bool Item_func_match::eq(const Item *item, bool binary_cmp) const { - if (item->type() != FUNC_ITEM || ((Item_func*)item)->functype() != FT_FUNC || + if (item->type() != FUNC_ITEM || + ((Item_func*)item)->functype() != FT_FUNC || flags != ((Item_func_match*)item)->flags) return 0; @@ -3242,7 +4818,7 @@ bool Item_func_match::eq(const Item *item, bool binary_cmp) const } -double Item_func_match::val() +double Item_func_match::val_real() { DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_match::val"); @@ -3274,15 +4850,15 @@ double Item_func_match::val() void Item_func_match::print(String *str) { - str->append("(match ", 7); + str->append(STRING_WITH_LEN("(match ")); print_args(str, 1); - str->append(" against (", 10); + str->append(STRING_WITH_LEN(" against (")); args[0]->print(str); if (flags & FT_BOOL) - str->append(" in boolean mode", 16); + str->append(STRING_WITH_LEN(" in boolean mode")); else if (flags & FT_EXPAND) - str->append(" with query expansion", 21); - str->append("))", 2); + str->append(STRING_WITH_LEN(" with query expansion")); + str->append(STRING_WITH_LEN("))")); } longlong Item_func_bit_xor::val_int() @@ -3325,12 +4901,6 @@ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name, sys_var *var; LEX_STRING *base_name, *component_name; - if (component.str == 0 && - !my_strcasecmp(system_charset_info, name.str, "VERSION")) - return new Item_string(NULL, server_version, - (uint) strlen(server_version), - system_charset_info, DERIVATION_SYSCONST); - if (component.str) { base_name= &component; @@ -3348,7 +4918,7 @@ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name, { if (!var->is_struct()) { - net_printf(thd, ER_VARIABLE_IS_NOT_STRUCT, base_name->str); + my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), base_name->str); return 0; } } @@ -3417,10 +4987,371 @@ longlong Item_func_is_used_lock::val_int() } -longlong Item_func_found_rows::val_int() +longlong Item_func_row_count::val_int() { DBUG_ASSERT(fixed == 1); THD *thd= current_thd; - return thd->found_rows(); + return thd->row_count_func; +} + + +Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, + sp_name *name_arg) + :Item_func(), context(context_arg), m_name(name_arg), m_sp(NULL), + result_field(NULL) +{ + maybe_null= 1; + m_name->init_qname(current_thd); + dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)); +} + + +Item_func_sp::Item_func_sp(Name_resolution_context *context_arg, + sp_name *name_arg, List<Item> &list) + :Item_func(list), context(context_arg), m_name(name_arg), m_sp(NULL), + result_field(NULL) +{ + maybe_null= 1; + m_name->init_qname(current_thd); + dummy_table= (TABLE*) sql_calloc(sizeof(TABLE)); +} + +void +Item_func_sp::cleanup() +{ + if (result_field) + { + delete result_field; + result_field= NULL; + } + m_sp= NULL; + dummy_table->s= NULL; + Item_func::cleanup(); +} + +const char * +Item_func_sp::func_name() const +{ + THD *thd= current_thd; + /* Calculate length to avoid reallocation of string for sure */ + uint len= ((m_name->m_db.length + + m_name->m_name.length)*2 + //characters*quoting + 2 + // ` and ` + 1 + // . + 1 + // end of string + ALIGN_SIZE(1)); // to avoid String reallocation + String qname((char *)alloc_root(thd->mem_root, len), len, + system_charset_info); + + qname.length(0); + append_identifier(thd, &qname, m_name->m_db.str, m_name->m_db.length); + qname.append('.'); + append_identifier(thd, &qname, m_name->m_name.str, m_name->m_name.length); + return qname.ptr(); +} + + +Field * +Item_func_sp::sp_result_field(void) const +{ + Field *field; + DBUG_ENTER("Item_func_sp::sp_result_field"); + DBUG_PRINT("info", ("sp: %s, flags: %x, level: %lu", + (m_sp ? "YES" : "NO"), + (m_sp ? m_sp->m_flags : (uint)0), + (m_sp ? m_sp->m_recursion_level : (ulong)0))); + + if (!m_sp) + { + THD *thd= current_thd; + if (!(m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, + &thd->sp_func_cache, TRUE))) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + DBUG_RETURN(0); + } + } + if (!dummy_table->s) + { + char *empty_name= (char *) ""; + TABLE_SHARE *share; + dummy_table->s= share= &dummy_table->share_not_to_be_used; + dummy_table->alias = empty_name; + dummy_table->maybe_null = maybe_null; + dummy_table->in_use= current_thd; + dummy_table->copy_blobs= TRUE; + share->table_cache_key = empty_name; + share->table_name = empty_name; + } + if (!(field= m_sp->create_result_field(max_length, name, dummy_table))) + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + + DBUG_RETURN(field); +} + + +/* + Execute function & store value in field + + RETURN + 0 value <> NULL + 1 value = NULL or error +*/ + +bool +Item_func_sp::execute(Field **flp) +{ + THD *thd= current_thd; + Field *f; + + /* + Get field in virtual tmp table to store result. Create the field if + invoked first time. + */ + + if (!(f= *flp)) + { + if (!(*flp= f= sp_result_field())) + { + /* Error set by sp_result_field() */ + null_value= 1; + return TRUE; + } + + f->move_field((f->pack_length() > sizeof(result_buf)) ? + sql_alloc(f->pack_length()) : result_buf); + f->null_ptr= (uchar *)&null_value; + f->null_bit= 1; + } + + /* Execute function and store the return value in the field. */ + + if (execute_impl(thd, f)) + { + null_value= 1; + context->process_error(thd); + return TRUE; + } + + /* Check that the field (the value) is not NULL. */ + + null_value= f->is_null(); + + return null_value; +} + + +bool +Item_func_sp::execute_impl(THD *thd, Field *return_value_fld) +{ + bool err_status= TRUE; + Sub_statement_state statement_state; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *save_security_ctx= thd->security_ctx; +#endif + + DBUG_ENTER("Item_func_sp::execute_impl"); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (context->security_ctx) + { + /* Set view definer security context */ + thd->security_ctx= context->security_ctx; + } +#endif + if (find_and_check_access(thd)) + goto error; + + /* + Disable the binlogging if this is not a SELECT statement. If this is a + SELECT, leave binlogging on, so execute_function() code writes the + function call into binlog. + */ + thd->reset_sub_statement_state(&statement_state, SUB_STMT_FUNCTION); + err_status= m_sp->execute_function(thd, args, arg_count, return_value_fld); + thd->restore_sub_statement_state(&statement_state); + +error: +#ifndef NO_EMBEDDED_ACCESS_CHECKS + thd->security_ctx= save_security_ctx; +#endif + + DBUG_RETURN(err_status); +} + + +void +Item_func_sp::make_field(Send_field *tmp_field) +{ + Field *field; + DBUG_ENTER("Item_func_sp::make_field"); + if ((field= sp_result_field())) + { + field->make_field(tmp_field); + delete field; + DBUG_VOID_RETURN; + } + init_make_field(tmp_field, MYSQL_TYPE_VARCHAR); + DBUG_VOID_RETURN; +} + + +enum enum_field_types +Item_func_sp::field_type() const +{ + Field *field; + DBUG_ENTER("Item_func_sp::field_type"); + + if (result_field) + DBUG_RETURN(result_field->type()); + if ((field= sp_result_field())) + { + enum_field_types result= field->type(); + delete field; + DBUG_RETURN(result); + } + DBUG_RETURN(MYSQL_TYPE_VARCHAR); +} + + +Item_result +Item_func_sp::result_type() const +{ + Field *field; + DBUG_ENTER("Item_func_sp::result_type"); + DBUG_PRINT("info", ("m_sp = %p", m_sp)); + + if (result_field) + DBUG_RETURN(result_field->result_type()); + if ((field= sp_result_field())) + { + Item_result result= field->result_type(); + delete field; + DBUG_RETURN(result); + } + DBUG_RETURN(STRING_RESULT); +} + +void +Item_func_sp::fix_length_and_dec() +{ + Field *field; + DBUG_ENTER("Item_func_sp::fix_length_and_dec"); + + if (result_field) + { + decimals= result_field->decimals(); + max_length= result_field->field_length; + collation.set(result_field->charset()); + DBUG_VOID_RETURN; + } + + if (!(field= sp_result_field())) + { + context->process_error(current_thd); + DBUG_VOID_RETURN; + } + decimals= field->decimals(); + max_length= field->field_length; + collation.set(field->charset()); + maybe_null= 1; + delete field; + DBUG_VOID_RETURN; +} + + +longlong Item_func_found_rows::val_int() +{ + DBUG_ASSERT(fixed == 1); + return current_thd->found_rows(); +} + + +Field * +Item_func_sp::tmp_table_field(TABLE *t_arg) +{ + Field *field= 0; + DBUG_ENTER("Item_func_sp::tmp_table_field"); + + if (m_sp) + field= m_sp->create_result_field(max_length, (const char*) name, t_arg); + + if (!field) + field= Item_func::tmp_table_field(t_arg); + + if (!field) + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + + DBUG_RETURN(field); +} + + +/* + Find the function and check access rights to the function + + SYNOPSIS + find_and_check_access() + thd thread handler + + RETURN + FALSE Access granted + TRUE Requested access can't be granted or function doesn't exists + + NOTES + Checks if requested access to function can be granted to user. + If function isn't found yet, it searches function first. + If function can't be found or user don't have requested access + error is raised. +*/ + +bool +Item_func_sp::find_and_check_access(THD *thd) +{ + if (! m_sp && ! (m_sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, m_name, + &thd->sp_func_cache, TRUE))) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + return TRUE; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (check_routine_access(thd, EXECUTE_ACL, + m_sp->m_db.str, m_sp->m_name.str, 0, FALSE)) + return TRUE; +#endif + + return FALSE; +} + + +bool +Item_func_sp::fix_fields(THD *thd, Item **ref) +{ + bool res; + DBUG_ASSERT(fixed == 0); + res= Item_func::fix_fields(thd, ref); + if (!res && thd->lex->view_prepare_mode) + { + /* + Here we check privileges of the stored routine only during view + creation, in order to validate the view. A runtime check is + perfomed in Item_func_sp::execute(), and this method is not + called during context analysis. Notice, that during view + creation we do not infer into stored routine bodies and do not + check privileges of its statements, which would probably be a + good idea especially if the view has SQL SECURITY DEFINER and + the used stored procedure has SQL SECURITY DEFINER. + */ + res= find_and_check_access(thd); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *save_secutiry_ctx; + if (!res && !(res= set_routine_security_ctx(thd, m_sp, false, + &save_secutiry_ctx))) + { + sp_restore_security_context(thd, save_secutiry_ctx); + } +#endif /* ! NO_EMBEDDED_ACCESS_CHECKS */ + } + return res; } diff --git a/sql/item_func.h b/sql/item_func.h index 467b88eda76..68591f9c6f5 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -43,16 +42,21 @@ public: bool const_item_cache; enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC, GE_FUNC,GT_FUNC,FT_FUNC, - LIKE_FUNC,NOTLIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC, - COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC, BETWEEN, IN_FUNC, + LIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC, + COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC, + BETWEEN, IN_FUNC, MULT_EQUAL_FUNC, INTERVAL_FUNC, ISNOTNULLTEST_FUNC, SP_EQUALS_FUNC, SP_DISJOINT_FUNC,SP_INTERSECTS_FUNC, SP_TOUCHES_FUNC,SP_CROSSES_FUNC,SP_WITHIN_FUNC, SP_CONTAINS_FUNC,SP_OVERLAPS_FUNC, SP_STARTPOINT,SP_ENDPOINT,SP_EXTERIORRING, SP_POINTN,SP_GEOMETRYN,SP_INTERIORRINGN, - NOT_FUNC, NOT_ALL_FUNC, NOW_FUNC, VAR_VALUE_FUNC}; - enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL }; + NOT_FUNC, NOT_ALL_FUNC, + NOW_FUNC, TRIG_COND_FUNC, + SUSERVAR_FUNC, GUSERVAR_FUNC, COLLATE_FUNC, + EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP, UDF_FUNC }; + enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL, + OPTIMIZE_EQUAL }; enum Type type() const { return FUNC_ITEM; } virtual enum Functype functype() const { return UNKNOWN_FUNC; } Item_func(void): @@ -111,7 +115,7 @@ public: Item_func(List<Item> &list); // Constructor used for Item_cond_and/or (see Item comment) Item_func(THD *thd, Item_func *item); - bool fix_fields(THD *,struct st_table_list *, Item **ref); + bool fix_fields(THD *, Item **ref); table_map used_tables() const; table_map not_null_tables() const; void update_used_tables(); @@ -119,7 +123,17 @@ public: virtual optimize_type select_optimize() const { return OPTIMIZE_NONE; } virtual bool have_rev_func() const { return 0; } virtual Item *key_item() const { return args[0]; } - virtual const char *func_name() const { return "?"; } + /* + This method is used for debug purposes to print the name of an + item to the debug log. The second use of this method is as + a helper function of print(), where it is applicable. + To suit both goals it should return a meaningful, + distinguishable and sintactically correct string. This method + should not be used for runtime type identification, use enum + {Sum}Functype and Item_func::functype()/Item_sum::sum_func() + instead. + */ + virtual const char *func_name() const= 0; virtual bool const_item() const { return const_item_cache; } inline Item **arguments() const { return args; } void set_arguments(List<Item> &list); @@ -129,7 +143,10 @@ public: void print(String *str); void print_op(String *str); void print_args(String *str, uint from); - void fix_num_length_and_dec(); + virtual void fix_num_length_and_dec(); + void count_only_length(); + void count_real_length(); + void count_decimal_length(); inline bool get_arg0_date(TIME *ltime, uint fuzzy_date) { return (null_value=args[0]->get_date(ltime, fuzzy_date)); @@ -139,31 +156,42 @@ public: return (null_value=args[0]->get_time(ltime)); } bool is_null() { - (void) val_int(); /* Discard result. It sets null_value as side-effect. */ + update_null_value(); return null_value; } + void signal_divide_by_null(); friend class udf_handler; Field *tmp_table_field() { return result_field; } Field *tmp_table_field(TABLE *t_arg); Item *get_tmp_table_item(THD *thd); - + + my_decimal *val_decimal(my_decimal *); + bool agg_arg_collations(DTCollation &c, Item **items, uint nitems, - uint flags= 0) + uint flags) { - return agg_item_collations(c, func_name(), items, nitems, flags); + return agg_item_collations(c, func_name(), items, nitems, flags, 1); } bool agg_arg_collations_for_comparison(DTCollation &c, Item **items, uint nitems, - uint flags= 0) + uint flags) { return agg_item_collations_for_comparison(c, func_name(), items, nitems, flags); } - bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems, uint flags) + bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems, + uint flags, int item_sep) { - return agg_item_charsets(c, func_name(), items, nitems, flags); + return agg_item_charsets(c, func_name(), items, nitems, flags, item_sep); } bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); + Item* compile(Item_analyzer analyzer, byte **arg_p, + Item_transformer transformer, byte *arg_t); + void traverse_cond(Cond_traverser traverser, + void * arg, traverse_order order); + bool is_expensive_processor(byte *arg); + virtual bool is_expensive() { return 0; } }; @@ -175,66 +203,109 @@ public: Item_real_func(Item *a,Item *b) :Item_func(a,b) {} Item_real_func(List<Item> &list) :Item_func(list) {} String *val_str(String*str); - longlong val_int() { DBUG_ASSERT(fixed == 1); return (longlong) val(); } + my_decimal *val_decimal(my_decimal *decimal_value); + longlong val_int() + { DBUG_ASSERT(fixed == 1); return (longlong) rint(val_real()); } enum Item_result result_type () const { return REAL_RESULT; } - void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); } + void fix_length_and_dec() + { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); } }; -class Item_num_func :public Item_func +class Item_func_numhybrid: public Item_func { - protected: +protected: Item_result hybrid_type; public: - Item_num_func(Item *a) :Item_func(a),hybrid_type(REAL_RESULT) {} - Item_num_func(Item *a,Item *b) :Item_func(a,b),hybrid_type(REAL_RESULT) {} - String *val_str(String*str); - longlong val_int() { DBUG_ASSERT(fixed == 1); return (longlong) val(); } + Item_func_numhybrid(Item *a) :Item_func(a), hybrid_type(REAL_RESULT) + {} + Item_func_numhybrid(Item *a,Item *b) + :Item_func(a,b), hybrid_type(REAL_RESULT) + {} + Item_func_numhybrid(List<Item> &list) + :Item_func(list), hybrid_type(REAL_RESULT) + {} + enum Item_result result_type () const { return hybrid_type; } - void fix_length_and_dec() { fix_num_length_and_dec(); } - bool is_null() { (void) val(); return null_value; } + void fix_length_and_dec(); + void fix_num_length_and_dec(); + virtual void find_num_type()= 0; /* To be called from fix_length_and_dec */ + + double val_real(); + longlong val_int(); + my_decimal *val_decimal(my_decimal *); + String *val_str(String*str); + + virtual longlong int_op()= 0; + virtual double real_op()= 0; + virtual my_decimal *decimal_op(my_decimal *)= 0; + virtual String *str_op(String *)= 0; + bool is_null() { update_null_value(); return null_value; } +}; + +/* function where type of result detected by first argument */ +class Item_func_num1: public Item_func_numhybrid +{ +public: + Item_func_num1(Item *a) :Item_func_numhybrid(a) {} + Item_func_num1(Item *a, Item *b) :Item_func_numhybrid(a, b) {} + + void fix_num_length_and_dec(); + void find_num_type(); + String *str_op(String *str) { DBUG_ASSERT(0); return 0; } }; -class Item_num_op :public Item_func +/* Base class for operations like '+', '-', '*' */ +class Item_num_op :public Item_func_numhybrid { - protected: - Item_result hybrid_type; public: - Item_num_op(Item *a,Item *b) :Item_func(a,b),hybrid_type(REAL_RESULT) {} - String *val_str(String*str); + Item_num_op(Item *a,Item *b) :Item_func_numhybrid(a, b) {} + virtual void result_precision()= 0; void print(String *str) { print_op(str); } - enum Item_result result_type () const { return hybrid_type; } - void fix_length_and_dec() { fix_num_length_and_dec(); find_num_type(); } - void find_num_type(void); - bool is_null() { (void) val(); return null_value; } + void find_num_type(); + String *str_op(String *str) { DBUG_ASSERT(0); return 0; } }; class Item_int_func :public Item_func { public: - Item_int_func() :Item_func() { max_length=21; } - Item_int_func(Item *a) :Item_func(a) { max_length=21; } - Item_int_func(Item *a,Item *b) :Item_func(a,b) { max_length=21; } - Item_int_func(Item *a,Item *b,Item *c) :Item_func(a,b,c) { max_length=21; } - Item_int_func(List<Item> &list) :Item_func(list) { max_length=21; } + Item_int_func() :Item_func() { max_length= 21; } + Item_int_func(Item *a) :Item_func(a) { max_length= 21; } + Item_int_func(Item *a,Item *b) :Item_func(a,b) { max_length= 21; } + Item_int_func(Item *a,Item *b,Item *c) :Item_func(a,b,c) + { max_length= 21; } + Item_int_func(List<Item> &list) :Item_func(list) { max_length= 21; } Item_int_func(THD *thd, Item_int_func *item) :Item_func(thd, item) {} - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String*str); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() {} }; +class Item_func_connection_id :public Item_int_func +{ + longlong value; + +public: + Item_func_connection_id() {} + const char *func_name() const { return "connection_id"; } + void fix_length_and_dec(); + bool fix_fields(THD *thd, Item **ref); + longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } +}; + + class Item_func_signed :public Item_int_func { public: Item_func_signed(Item *a) :Item_int_func(a) {} const char *func_name() const { return "cast_as_signed"; } - double val() + double val_real() { - double tmp= args[0]->val(); + double tmp= args[0]->val_real(); null_value= args[0]->null_value; return tmp; } @@ -243,6 +314,8 @@ public: void fix_length_and_dec() { max_length=args[0]->max_length; unsigned_flag=0; } void print(String *str); + uint decimal_precision() const { return args[0]->decimal_precision(); } + }; @@ -258,22 +331,53 @@ public: }; -class Item_func_plus :public Item_num_op +class Item_decimal_typecast :public Item_func { + my_decimal decimal_value; public: - Item_func_plus(Item *a,Item *b) :Item_num_op(a,b) {} - const char *func_name() const { return "+"; } - double val(); + Item_decimal_typecast(Item *a, int len, int dec) :Item_func(a) + { + max_length= len + 2; + decimals= dec; + } + String *val_str(String *str); + double val_real(); longlong val_int(); + my_decimal *val_decimal(my_decimal*); + enum Item_result result_type () const { return DECIMAL_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_NEWDECIMAL; } + void fix_length_and_dec() {}; + const char *func_name() const { return "decimal_typecast"; } + void print(String *); +}; + + +class Item_func_additive_op :public Item_num_op +{ +public: + Item_func_additive_op(Item *a,Item *b) :Item_num_op(a,b) {} + void result_precision(); }; -class Item_func_minus :public Item_num_op + +class Item_func_plus :public Item_func_additive_op { public: - Item_func_minus(Item *a,Item *b) :Item_num_op(a,b) {} + Item_func_plus(Item *a,Item *b) :Item_func_additive_op(a,b) {} + const char *func_name() const { return "+"; } + longlong int_op(); + double real_op(); + my_decimal *decimal_op(my_decimal *); +}; + +class Item_func_minus :public Item_func_additive_op +{ +public: + Item_func_minus(Item *a,Item *b) :Item_func_additive_op(a,b) {} const char *func_name() const { return "-"; } - double val(); - longlong val_int(); + longlong int_op(); + double real_op(); + my_decimal *decimal_op(my_decimal *); void fix_length_and_dec(); }; @@ -283,31 +387,36 @@ class Item_func_mul :public Item_num_op public: Item_func_mul(Item *a,Item *b) :Item_num_op(a,b) {} const char *func_name() const { return "*"; } - double val(); - longlong val_int(); + longlong int_op(); + double real_op(); + my_decimal *decimal_op(my_decimal *); + void result_precision(); }; class Item_func_div :public Item_num_op { public: + uint prec_increment; Item_func_div(Item *a,Item *b) :Item_num_op(a,b) {} - double val(); - longlong val_int(); + longlong int_op() { DBUG_ASSERT(0); return 0; } + double real_op(); + my_decimal *decimal_op(my_decimal *); const char *func_name() const { return "/"; } void fix_length_and_dec(); + void result_precision(); }; -class Item_func_int_div :public Item_num_op +class Item_func_int_div :public Item_int_func { public: - Item_func_int_div(Item *a,Item *b) :Item_num_op(a,b) - { hybrid_type=INT_RESULT; } - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + Item_func_int_div(Item *a,Item *b) :Item_int_func(a,b) + {} longlong val_int(); const char *func_name() const { return "DIV"; } void fix_length_and_dec(); + void print(String *str) { print_op(str); } }; @@ -315,37 +424,41 @@ class Item_func_mod :public Item_num_op { public: Item_func_mod(Item *a,Item *b) :Item_num_op(a,b) {} - double val(); - longlong val_int(); + longlong int_op(); + double real_op(); + my_decimal *decimal_op(my_decimal *); const char *func_name() const { return "%"; } + void result_precision(); void fix_length_and_dec(); }; -class Item_func_neg :public Item_num_func +class Item_func_neg :public Item_func_num1 { public: - Item_func_neg(Item *a) :Item_num_func(a) {} - double val(); - longlong val_int(); + Item_func_neg(Item *a) :Item_func_num1(a) {} + double real_op(); + longlong int_op(); + my_decimal *decimal_op(my_decimal *); const char *func_name() const { return "-"; } void fix_length_and_dec(); + void fix_num_length_and_dec(); + uint decimal_precision() const { return args[0]->decimal_precision(); } }; -class Item_func_abs :public Item_num_func +class Item_func_abs :public Item_func_num1 { public: - Item_func_abs(Item *a) :Item_num_func(a) {} + Item_func_abs(Item *a) :Item_func_num1(a) {} + double real_op(); + longlong int_op(); + my_decimal *decimal_op(my_decimal *); const char *func_name() const { return "abs"; } - double val(); - longlong val_int(); - enum Item_result result_type () const - { return args[0]->result_type() == INT_RESULT ? INT_RESULT : REAL_RESULT; } void fix_length_and_dec(); }; -// A class to handle logaritmic and trigometric functions +// A class to handle logarithmic and trigonometric functions class Item_dec_func :public Item_real_func { @@ -375,7 +488,7 @@ class Item_func_exp :public Item_dec_func { public: Item_func_exp(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "exp"; } }; @@ -384,7 +497,7 @@ class Item_func_ln :public Item_dec_func { public: Item_func_ln(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "ln"; } }; @@ -394,7 +507,7 @@ class Item_func_log :public Item_dec_func public: Item_func_log(Item *a) :Item_dec_func(a) {} Item_func_log(Item *a,Item *b) :Item_dec_func(a,b) {} - double val(); + double val_real(); const char *func_name() const { return "log"; } }; @@ -403,7 +516,7 @@ class Item_func_log2 :public Item_dec_func { public: Item_func_log2(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "log2"; } }; @@ -412,7 +525,7 @@ class Item_func_log10 :public Item_dec_func { public: Item_func_log10(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "log10"; } }; @@ -421,7 +534,7 @@ class Item_func_sqrt :public Item_dec_func { public: Item_func_sqrt(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "sqrt"; } }; @@ -430,57 +543,57 @@ class Item_func_pow :public Item_dec_func { public: Item_func_pow(Item *a,Item *b) :Item_dec_func(a,b) {} - double val(); + double val_real(); const char *func_name() const { return "pow"; } }; class Item_func_acos :public Item_dec_func { - public: +public: Item_func_acos(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "acos"; } }; class Item_func_asin :public Item_dec_func { - public: +public: Item_func_asin(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "asin"; } }; class Item_func_atan :public Item_dec_func { - public: +public: Item_func_atan(Item *a) :Item_dec_func(a) {} Item_func_atan(Item *a,Item *b) :Item_dec_func(a,b) {} - double val(); + double val_real(); const char *func_name() const { return "atan"; } }; class Item_func_cos :public Item_dec_func { - public: +public: Item_func_cos(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "cos"; } }; class Item_func_sin :public Item_dec_func { - public: +public: Item_func_sin(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "sin"; } }; class Item_func_tan :public Item_dec_func { - public: +public: Item_func_tan(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "tan"; } }; @@ -492,33 +605,48 @@ public: }; -class Item_func_ceiling :public Item_func_integer +class Item_func_int_val :public Item_func_num1 +{ +public: + Item_func_int_val(Item *a) :Item_func_num1(a) {} + void fix_num_length_and_dec(); + void find_num_type(); +}; + + +class Item_func_ceiling :public Item_func_int_val { - Item_func_ceiling(); /* Never called */ public: - Item_func_ceiling(Item *a) :Item_func_integer(a) {} + Item_func_ceiling(Item *a) :Item_func_int_val(a) {} const char *func_name() const { return "ceiling"; } - longlong val_int(); + longlong int_op(); + double real_op(); + my_decimal *decimal_op(my_decimal *); }; -class Item_func_floor :public Item_func_integer + +class Item_func_floor :public Item_func_int_val { public: - Item_func_floor(Item *a) :Item_func_integer(a) {} + Item_func_floor(Item *a) :Item_func_int_val(a) {} const char *func_name() const { return "floor"; } - longlong val_int(); + longlong int_op(); + double real_op(); + my_decimal *decimal_op(my_decimal *); }; /* This handles round and truncate */ -class Item_func_round :public Item_real_func +class Item_func_round :public Item_func_num1 { bool truncate; public: - Item_func_round(Item *a,Item *b,bool trunc_arg) - :Item_real_func(a,b),truncate(trunc_arg) {} + Item_func_round(Item *a, Item *b, bool trunc_arg) + :Item_func_num1(a,b), truncate(trunc_arg) {} const char *func_name() const { return truncate ? "truncate" : "round"; } - double val(); + double real_op(); + longlong int_op(); + my_decimal *decimal_op(my_decimal *); void fix_length_and_dec(); }; @@ -529,11 +657,11 @@ class Item_func_rand :public Item_real_func public: Item_func_rand(Item *a) :Item_real_func(a), rand(0) {} Item_func_rand() :Item_real_func() {} - double val(); + double val_real(); const char *func_name() const { return "rand"; } bool const_item() const { return 0; } void update_used_tables(); - bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref); + bool fix_fields(THD *thd, Item **ref); }; @@ -550,12 +678,13 @@ class Item_func_units :public Item_real_func { char *name; double mul,add; - public: +public: Item_func_units(char *name_arg,Item *a,double mul_arg,double add_arg) :Item_real_func(a),name(name_arg),mul(mul_arg),add(add_arg) {} - double val(); + double val_real(); const char *func_name() const { return name; } - void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); } + void fix_length_and_dec() + { decimals= NOT_FIXED_DEC; max_length= float_length(decimals); } }; @@ -567,12 +696,12 @@ class Item_func_min_max :public Item_func public: Item_func_min_max(List<Item> &list,int cmp_sign_arg) :Item_func(list), cmp_type(INT_RESULT), cmp_sign(cmp_sign_arg) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *); + my_decimal *val_decimal(my_decimal *); void fix_length_and_dec(); enum Item_result result_type () const { return cmp_type; } - table_map not_null_tables() const { return 0; } }; class Item_func_min :public Item_func_min_max @@ -757,14 +886,19 @@ public: Item_func_last_insert_id(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "last_insert_id"; } - void fix_length_and_dec() { if (arg_count) max_length= args[0]->max_length; } - bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + void fix_length_and_dec() + { + if (arg_count) + max_length= args[0]->max_length; + } + bool fix_fields(THD *thd, Item **ref); }; + class Item_func_benchmark :public Item_int_func { ulong loop_count; - public: +public: Item_func_benchmark(ulong loop_count_arg,Item *expr) :Item_int_func(expr), loop_count(loop_count_arg) {} @@ -775,22 +909,40 @@ class Item_func_benchmark :public Item_int_func }; +class Item_func_sleep :public Item_int_func +{ +public: + Item_func_sleep(Item *a) :Item_int_func(a) {} + bool const_item() const { return 0; } + const char *func_name() const { return "sleep"; } + void update_used_tables() + { + Item_int_func::update_used_tables(); + used_tables_cache|= RAND_TABLE_BIT; + } + longlong val_int(); +}; + + + #ifdef HAVE_DLOPEN class Item_udf_func :public Item_func { - protected: +protected: udf_handler udf; public: - Item_udf_func(udf_func *udf_arg) :Item_func(), udf(udf_arg) {} + Item_udf_func(udf_func *udf_arg) + :Item_func(), udf(udf_arg) {} Item_udf_func(udf_func *udf_arg, List<Item> &list) :Item_func(list), udf(udf_arg) {} const char *func_name() const { return udf.name(); } - bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref) + enum Functype functype() const { return UDF_FUNC; } + bool fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); - bool res= udf.fix_fields(thd, tables, this, arg_count, args); + bool res= udf.fix_fields(thd, this, arg_count, args); used_tables_cache= udf.used_tables_cache; const_item_cache= udf.const_item_cache; fixed= 1; @@ -799,18 +951,33 @@ public: void cleanup(); Item_result result_type () const { return udf.result_type(); } table_map not_null_tables() const { return 0; } + bool is_expensive() { return 1; } + void print(String *str); }; class Item_func_udf_float :public Item_udf_func { public: - Item_func_udf_float(udf_func *udf_arg) :Item_udf_func(udf_arg) {} - Item_func_udf_float(udf_func *udf_arg, List<Item> &list) - :Item_udf_func(udf_arg,list) {} + Item_func_udf_float(udf_func *udf_arg) + :Item_udf_func(udf_arg) {} + Item_func_udf_float(udf_func *udf_arg, + List<Item> &list) + :Item_udf_func(udf_arg, list) {} longlong val_int() - { DBUG_ASSERT(fixed == 1); return (longlong) Item_func_udf_float::val(); } - double val(); + { + DBUG_ASSERT(fixed == 1); + return (longlong) rint(Item_func_udf_float::val_real()); + } + my_decimal *val_decimal(my_decimal *dec_buf) + { + double res=val_real(); + if (null_value) + return NULL; + double2my_decimal(E_DEC_FATAL_ERROR, res, dec_buf); + return dec_buf; + } + double val_real(); String *val_str(String *str); void fix_length_and_dec() { fix_num_length_and_dec(); } }; @@ -819,38 +986,66 @@ class Item_func_udf_float :public Item_udf_func class Item_func_udf_int :public Item_udf_func { public: - Item_func_udf_int(udf_func *udf_arg) :Item_udf_func(udf_arg) {} - Item_func_udf_int(udf_func *udf_arg, List<Item> &list) - :Item_udf_func(udf_arg,list) {} + Item_func_udf_int(udf_func *udf_arg) + :Item_udf_func(udf_arg) {} + Item_func_udf_int(udf_func *udf_arg, + List<Item> &list) + :Item_udf_func(udf_arg, list) {} longlong val_int(); - double val() { return (double) Item_func_udf_int::val_int(); } + double val_real() { return (double) Item_func_udf_int::val_int(); } String *val_str(String *str); enum Item_result result_type () const { return INT_RESULT; } - void fix_length_and_dec() { decimals=0; max_length=21; } + void fix_length_and_dec() { decimals= 0; max_length= 21; } +}; + + +class Item_func_udf_decimal :public Item_udf_func +{ +public: + Item_func_udf_decimal(udf_func *udf_arg) + :Item_udf_func(udf_arg) {} + Item_func_udf_decimal(udf_func *udf_arg, List<Item> &list) + :Item_udf_func(udf_arg, list) {} + longlong val_int(); + double val_real(); + my_decimal *val_decimal(my_decimal *); + String *val_str(String *str); + enum Item_result result_type () const { return DECIMAL_RESULT; } + void fix_length_and_dec(); }; class Item_func_udf_str :public Item_udf_func { public: - Item_func_udf_str(udf_func *udf_arg) :Item_udf_func(udf_arg) {} + Item_func_udf_str(udf_func *udf_arg) + :Item_udf_func(udf_arg) {} Item_func_udf_str(udf_func *udf_arg, List<Item> &list) - :Item_udf_func(udf_arg,list) {} + :Item_udf_func(udf_arg, list) {} String *val_str(String *); - double val() + double val_real() { - int err; - String *res; + int err_not_used; char *end_not_used; - res=val_str(&str_value); - return res ? my_strntod(res->charset(), (char*) res->ptr(), res->length(), - &end_not_used, &err) : 0.0; + String *res; + res= val_str(&str_value); + return res ? my_strntod(res->charset(),(char*) res->ptr(), + res->length(), &end_not_used, &err_not_used) : 0.0; } longlong val_int() { - int err; + int err_not_used; String *res; res=val_str(&str_value); - return res ? my_strntoll(res->charset(),res->ptr(),res->length(),10,(char**) 0,&err) : (longlong) 0; + return res ? my_strntoll(res->charset(),res->ptr(),res->length(),10, + (char**) 0, &err_not_used) : (longlong) 0; + } + my_decimal *val_decimal(my_decimal *dec_buf) + { + String *res=val_str(&str_value); + if (!res) + return NULL; + string2my_decimal(E_DEC_FATAL_ERROR, res, dec_buf); + return dec_buf; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec(); @@ -861,29 +1056,46 @@ public: class Item_func_udf_float :public Item_real_func { public: - Item_func_udf_float(udf_func *udf_arg) :Item_real_func() {} - Item_func_udf_float(udf_func *udf_arg, List<Item> &list) :Item_real_func(list) {} - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + Item_func_udf_float(udf_func *udf_arg) + :Item_real_func() {} + Item_func_udf_float(udf_func *udf_arg, List<Item> &list) + :Item_real_func(list) {} + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } }; class Item_func_udf_int :public Item_int_func { public: - Item_func_udf_int(udf_func *udf_arg) :Item_int_func() {} - Item_func_udf_int(udf_func *udf_arg, List<Item> &list) :Item_int_func(list) {} + Item_func_udf_int(udf_func *udf_arg) + :Item_int_func() {} + Item_func_udf_int(udf_func *udf_arg, List<Item> &list) + :Item_int_func(list) {} longlong val_int() { DBUG_ASSERT(fixed == 1); return 0; } }; +class Item_func_udf_decimal :public Item_int_func +{ +public: + Item_func_udf_decimal(udf_func *udf_arg) + :Item_int_func() {} + Item_func_udf_decimal(udf_func *udf_arg, List<Item> &list) + :Item_int_func(list) {} + my_decimal *val_decimal(my_decimal *) { DBUG_ASSERT(fixed == 1); return 0; } +}; + + class Item_func_udf_str :public Item_func { public: - Item_func_udf_str(udf_func *udf_arg) :Item_func() {} - Item_func_udf_str(udf_func *udf_arg, List<Item> &list) :Item_func(list) {} + Item_func_udf_str(udf_func *udf_arg) + :Item_func() {} + Item_func_udf_str(udf_func *udf_arg, List<Item> &list) + :Item_func(list) {} String *val_str(String *) { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - double val() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); null_value= 1; return 0.0; } longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec() { maybe_null=1; max_length=0; } @@ -913,7 +1125,7 @@ class Item_func_get_lock :public Item_int_func class Item_func_release_lock :public Item_int_func { String value; - public: +public: Item_func_release_lock(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "release_lock"; } @@ -925,7 +1137,7 @@ class Item_func_release_lock :public Item_int_func class Item_master_pos_wait :public Item_int_func { String value; - public: +public: Item_master_pos_wait(Item *a,Item *b) :Item_int_func(a,b) {} Item_master_pos_wait(Item *a,Item *b,Item *c) :Item_int_func(a,b,c) {} longlong val_int(); @@ -934,7 +1146,7 @@ class Item_master_pos_wait :public Item_int_func }; -/* Handling of user definiable variables */ +/* Handling of user definable variables */ class user_var_entry; @@ -944,36 +1156,44 @@ class Item_func_set_user_var :public Item_func user_var_entry *entry; char buffer[MAX_FIELD_WIDTH]; String value; + my_decimal decimal_buff; + bool null_item; union { longlong vint; double vreal; String *vstr; + my_decimal *vdec; } save_result; - String save_buff; - public: LEX_STRING name; // keep it public Item_func_set_user_var(LEX_STRING a,Item *b) :Item_func(b), cached_result_type(INT_RESULT), name(a) {} - double val(); + enum Functype functype() const { return SUSERVAR_FUNC; } + double val_real(); longlong val_int(); String *val_str(String *str); - bool update_hash(void *ptr, uint length, enum Item_result type, - CHARSET_INFO *cs, Derivation dv); - bool check(); + my_decimal *val_decimal(my_decimal *); + bool update_hash(void *ptr, uint length, enum Item_result type, + CHARSET_INFO *cs, Derivation dv, bool unsigned_arg); + bool send(Protocol *protocol, String *str_arg); + void make_field(Send_field *tmp_field); + bool check(bool use_result_field); bool update(); enum Item_result result_type () const { return cached_result_type; } - bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref); + bool fix_fields(THD *thd, Item **ref); void fix_length_and_dec(); void print(String *str); + void print_as_stmt(String *str); const char *func_name() const { return "set_user_var"; } + int save_in_field(Field *field, bool no_conversions); }; -class Item_func_get_user_var :public Item_func +class Item_func_get_user_var :public Item_func, + private Settable_routine_parameter { user_var_entry *var_entry; @@ -981,8 +1201,11 @@ public: LEX_STRING name; // keep it public Item_func_get_user_var(LEX_STRING a): Item_func(), name(a) {} - double val(); + enum Functype functype() const { return GUSERVAR_FUNC; } + LEX_STRING get_name() { return name; } + double val_real(); longlong val_int(); + my_decimal *val_decimal(my_decimal*); String *val_str(String* str); void fix_length_and_dec(); void print(String *str); @@ -991,13 +1214,50 @@ public: We must always return variables as strings to guard against selects of type select @t1:=1,@t1,@t:="hello",@t from foo where (@t1:= t2.b) */ - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } - enum Functype functype() const { return VAR_VALUE_FUNC; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } const char *func_name() const { return "get_user_var"; } bool const_item() const; table_map used_tables() const { return const_item() ? 0 : RAND_TABLE_BIT; } bool eq(const Item *item, bool binary_cmp) const; + +private: + bool set_value(THD *thd, sp_rcontext *ctx, Item **it); + +public: + Settable_routine_parameter *get_settable_routine_parameter() + { + return this; + } +}; + + +/* + This item represents user variable used as out parameter (e.g in LOAD DATA), + and it is supposed to be used only for this purprose. So it is simplified + a lot. Actually you should never obtain its value. + + The only two reasons for this thing being an Item is possibility to store it + in List<Item> and desire to place this code somewhere near other functions + working with user variables. +*/ +class Item_user_var_as_out_param :public Item +{ + LEX_STRING name; + user_var_entry *entry; +public: + Item_user_var_as_out_param(LEX_STRING a) : name(a) {} + /* We should return something different from FIELD_ITEM here */ + enum Type type() const { return STRING_ITEM;} + double val_real(); + longlong val_int(); + String *val_str(String *str); + my_decimal *val_decimal(my_decimal *decimal_buffer); + /* fix_fields() binds variable name with its entry structure */ + bool fix_fields(THD *thd, Item **ref); + void print(String *str); + void set_null_value(CHARSET_INFO* cs); + void set_value(const char *str, uint length, CHARSET_INFO* cs); }; @@ -1012,15 +1272,17 @@ public: Item_func_get_system_var(sys_var *var_arg, enum_var_type var_type_arg, LEX_STRING *component_arg, const char *name_arg, size_t name_len_arg); - bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + bool fix_fields(THD *thd, Item **ref); /* Stubs for pure virtual methods. Should never be called: this item is always substituted with a constant in fix_fields(). */ - double val() { DBUG_ASSERT(0); return 0.0; } + double val_real() { DBUG_ASSERT(0); return 0.0; } longlong val_int() { DBUG_ASSERT(0); return 0; } String* val_str(String*) { DBUG_ASSERT(0); return 0; } void fix_length_and_dec() { DBUG_ASSERT(0); } + /* TODO: fix to support views */ + const char *func_name() const { return "get_system_var"; } }; @@ -1030,7 +1292,7 @@ public: Item_func_inet_aton(Item *a) :Item_int_func(a) {} longlong val_int(); const char *func_name() const { return "inet_aton"; } - void fix_length_and_dec() { decimals = 0; max_length = 21; maybe_null=1;} + void fix_length_and_dec() { decimals = 0; max_length = 21; maybe_null=1;unsigned_flag=1;} }; @@ -1057,26 +1319,20 @@ public: DBUG_ENTER("Item_func_match"); Item_real_func::cleanup(); if (!master && ft_handler) - { ft_handler->please->close_search(ft_handler); - } - if (concat) - { - delete concat; - concat= 0; - } ft_handler= 0; + concat= 0; DBUG_VOID_RETURN; } enum Functype functype() const { return FT_FUNC; } const char *func_name() const { return "match"; } void update_used_tables() {} table_map not_null_tables() const { return 0; } - bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref); + bool fix_fields(THD *thd, Item **ref); bool eq(const Item *, bool binary_cmp) const; /* The following should be safe, even if we compare doubles */ - longlong val_int() { DBUG_ASSERT(fixed == 1); return val()!=0.0; } - double val(); + longlong val_int() { DBUG_ASSERT(fixed == 1); return val_real() != 0.0; } + double val_real(); void print(String *str); bool fix_index(); @@ -1117,7 +1373,116 @@ public: enum Cast_target { ITEM_CAST_BINARY, ITEM_CAST_SIGNED_INT, ITEM_CAST_UNSIGNED_INT, - ITEM_CAST_DATE, ITEM_CAST_TIME, ITEM_CAST_DATETIME, ITEM_CAST_CHAR + ITEM_CAST_DATE, ITEM_CAST_TIME, ITEM_CAST_DATETIME, ITEM_CAST_CHAR, + ITEM_CAST_DECIMAL +}; + + +class Item_func_row_count :public Item_int_func +{ +public: + Item_func_row_count() :Item_int_func() {} + longlong val_int(); + const char *func_name() const { return "row_count"; } + void fix_length_and_dec() { decimals= 0; maybe_null=0; } +}; + + +/* + * + * Stored FUNCTIONs + * + */ + +class sp_head; +class sp_name; +struct st_sp_security_context; + +class Item_func_sp :public Item_func +{ +private: + Name_resolution_context *context; + sp_name *m_name; + mutable sp_head *m_sp; + TABLE *dummy_table; + Field *result_field; + char result_buf[64]; + + bool execute(Field **flp); + bool execute_impl(THD *thd, Field *return_value_fld); + Field *sp_result_field(void) const; + +public: + + Item_func_sp(Name_resolution_context *context_arg, sp_name *name); + + Item_func_sp(Name_resolution_context *context_arg, + sp_name *name, List<Item> &list); + + virtual ~Item_func_sp() + {} + + void cleanup(); + + const char *func_name() const; + + enum enum_field_types field_type() const; + + Field *tmp_table_field(TABLE *t_arg); + + void make_field(Send_field *tmp_field); + + Item_result result_type() const; + + longlong val_int() + { + if (execute(&result_field)) + return (longlong) 0; + return result_field->val_int(); + } + + double val_real() + { + if (execute(&result_field)) + return 0.0; + return result_field->val_real(); + } + + my_decimal *val_decimal(my_decimal *dec_buf) + { + if (execute(&result_field)) + return NULL; + return result_field->val_decimal(dec_buf); + } + + String *val_str(String *str) + { + String buf; + char buff[20]; + buf.set(buff, 20, str->charset()); + buf.length(0); + if (execute(&result_field)) + return NULL; + /* + result_field will set buf pointing to internal buffer + of the resul_field. Due to this it will change any time + when SP is executed. In order to prevent occasional + corruption of returned value, we make here a copy. + */ + result_field->val_str(&buf); + str->copy(buf); + return str; + } + + virtual bool change_context_processor(byte *cntx) + { context= (Name_resolution_context *)cntx; return FALSE; } + + void fix_length_and_dec(); + bool find_and_check_access(THD * thd); + virtual enum Functype functype() const { return FUNC_SP; } + + bool fix_fields(THD *thd, Item **ref); + bool is_expensive() { return 1; } }; diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 6cb8c790319..6c012277888 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2003-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -25,6 +24,12 @@ #ifdef HAVE_SPATIAL #include <m_ctype.h> +Field *Item_geometry_func::tmp_table_field(TABLE *t_arg) +{ + return new Field_geom(max_length, maybe_null, name, t_arg, + (Field::geometry_type) get_geometry_type()); +} + void Item_geometry_func::fix_length_and_dec() { collation.set(&my_charset_bin); @@ -33,6 +38,10 @@ void Item_geometry_func::fix_length_and_dec() maybe_null= 1; } +int Item_geometry_func::get_geometry_type() const +{ + return (int)Field::GEOM_GEOMETRY; +} String *Item_func_geometry_from_text::val_str(String *str) { @@ -151,6 +160,12 @@ String *Item_func_geometry_type::val_str(String *str) } +int Item_func_envelope::get_geometry_type() const +{ + return (int) Field::GEOM_POLYGON; +} + + String *Item_func_envelope::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -175,6 +190,12 @@ String *Item_func_envelope::val_str(String *str) } +int Item_func_centroid::get_geometry_type() const +{ + return (int) Field::GEOM_POINT; +} + + String *Item_func_centroid::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -309,11 +330,17 @@ err: */ +int Item_func_point::get_geometry_type() const +{ + return (int) Field::GEOM_POINT; +} + + String *Item_func_point::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double x= args[0]->val(); - double y= args[1]->val(); + double x= args[0]->val_real(); + double y= args[1]->val_real(); if ((null_value= (args[0]->null_value || args[1]->null_value || @@ -615,7 +642,7 @@ longlong Item_func_numpoints::val_int() } -double Item_func_x::val() +double Item_func_x::val_real() { DBUG_ASSERT(fixed == 1); double res= 0.0; // In case of errors @@ -631,7 +658,7 @@ double Item_func_x::val() } -double Item_func_y::val() +double Item_func_y::val_real() { DBUG_ASSERT(fixed == 1); double res= 0; // In case of errors @@ -647,7 +674,7 @@ double Item_func_y::val() } -double Item_func_area::val() +double Item_func_area::val_real() { DBUG_ASSERT(fixed == 1); double res= 0; // In case of errors @@ -663,7 +690,7 @@ double Item_func_area::val() return res; } -double Item_func_glength::val() +double Item_func_glength::val_real() { DBUG_ASSERT(fixed == 1); double res= 0; // In case of errors @@ -673,8 +700,9 @@ double Item_func_glength::val() null_value= (!swkb || !(geom= Geometry::construct(&buffer, - swkb->ptr(), swkb->length())) || - geom->length(&res)); + swkb->ptr(), + swkb->length())) || + geom->geom_length(&res)); return res; } diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 2719cbb0bab..9c7970f9e53 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -33,6 +32,8 @@ public: Item_geometry_func(List<Item> &list) :Item_str_func(list) {} void fix_length_and_dec(); enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } + Field *tmp_table_field(TABLE *t_arg); + virtual int get_geometry_type() const; bool is_null() { (void) val_int(); return null_value; } }; @@ -91,6 +92,7 @@ public: Item_func_centroid(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "centroid"; } String *val_str(String *); + int get_geometry_type() const; }; class Item_func_envelope: public Item_geometry_func @@ -99,6 +101,7 @@ public: Item_func_envelope(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "envelope"; } String *val_str(String *); + int get_geometry_type() const; }; class Item_func_point: public Item_geometry_func @@ -108,6 +111,7 @@ public: Item_func_point(Item *a, Item *b, Item *srid): Item_geometry_func(a, b, srid) {} const char *func_name() const { return "point"; } String *val_str(String *); + int get_geometry_type() const; }; class Item_func_spatial_decomp: public Item_geometry_func @@ -272,7 +276,7 @@ class Item_func_x: public Item_real_func String value; public: Item_func_x(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "x"; } void fix_length_and_dec() { @@ -287,7 +291,7 @@ class Item_func_y: public Item_real_func String value; public: Item_func_y(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "y"; } void fix_length_and_dec() { @@ -335,7 +339,7 @@ class Item_func_area: public Item_real_func String value; public: Item_func_area(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "area"; } void fix_length_and_dec() { @@ -350,7 +354,7 @@ class Item_func_glength: public Item_real_func String value; public: Item_func_glength(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "glength"; } void fix_length_and_dec() { diff --git a/sql/item_row.cc b/sql/item_row.cc index 493eefc9ff0..c037c092d89 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -53,7 +52,7 @@ void Item_row::illegal_method_call(const char *method) DBUG_VOID_RETURN; } -bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref) +bool Item_row::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); null_value= 0; @@ -61,8 +60,8 @@ bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref) Item **arg, **arg_end; for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++) { - if ((*arg)->fix_fields(thd, tabl, arg)) - return 1; + if ((*arg)->fix_fields(thd, arg)) + return TRUE; // we can't assign 'item' before, because fix_fields() can change arg Item *item= *arg; used_tables_cache |= item->used_tables(); @@ -73,15 +72,15 @@ bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref) with_null|= item->null_inside(); else { - item->val_int(); - with_null|= item->null_value; + if (item->is_null()) + with_null|= 1; } } maybe_null|= item->maybe_null; with_sum_func= with_sum_func || item->with_sum_func; } fixed= 1; - return 0; + return FALSE; } @@ -104,7 +103,7 @@ void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array, { Item **arg, **arg_end; for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++) - (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg); + (*arg)->split_sum_func2(thd, ref_pointer_array, fields, arg, TRUE); } @@ -152,6 +151,28 @@ bool Item_row::walk(Item_processor processor, byte *arg) return (this->*processor)(arg); } +Item *Item_row::transform(Item_transformer transformer, byte *arg) +{ + DBUG_ASSERT(!current_thd->is_stmt_prepare()); + + for (uint i= 0; i < arg_count; i++) + { + Item *new_item= items[i]->transform(transformer, arg); + if (!new_item) + return 0; + + /* + THD::change_item_tree() should be called only if the tree was + really transformed, i.e. when a new item has been created. + Otherwise we'll be allocating a lot of unnecessary memory for + change records at each execution. + */ + if (items[i] != new_item) + current_thd->change_item_tree(&items[i], new_item); + } + return (this->*transformer)(arg); +} + void Item_row::bring_value() { for (uint i= 0; i < arg_count; i++) diff --git a/sql/item_row.h b/sql/item_row.h index 28cb47b6815..8623b579e33 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -39,7 +38,7 @@ public: { illegal_method_call((const char*)"make_field"); }; - double val() + double val_real() { illegal_method_call((const char*)"val"); return 0; @@ -54,7 +53,12 @@ public: illegal_method_call((const char*)"val_str"); return 0; }; - bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + my_decimal *val_decimal(my_decimal *) + { + illegal_method_call((const char*)"val_decimal"); + return 0; + }; + bool fix_fields(THD *thd, Item **ref); void cleanup(); void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); table_map used_tables() const { return used_tables_cache; }; @@ -64,9 +68,10 @@ public: void print(String *str); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); uint cols() { return arg_count; } - Item* el(uint i) { return items[i]; } + Item* element_index(uint i) { return items[i]; } Item** addr(uint i) { return items + i; } bool check_cols(uint c); bool null_inside() { return with_null; }; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 7c98f0c6380..6b1921e5bc8 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -38,38 +37,79 @@ C_MODE_END String my_empty_string("",default_charset_info); -static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, - const char *fname) -{ - my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), - fname); + +String *Item_str_func::check_well_formed_result(String *str) +{ + /* Check whether we got a well-formed string */ + CHARSET_INFO *cs= str->charset(); + int well_formed_error; + uint wlen= cs->cset->well_formed_len(cs, + str->ptr(), str->ptr() + str->length(), + str->length(), &well_formed_error); + if (wlen < str->length()) + { + THD *thd= current_thd; + char hexbuf[7]; + enum MYSQL_ERROR::enum_warning_level level; + uint diff= str->length() - wlen; + set_if_smaller(diff, 3); + octet2hex(hexbuf, str->ptr() + wlen, diff); + if (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)) + { + level= MYSQL_ERROR::WARN_LEVEL_ERROR; + null_value= 1; + str= 0; + } + else + level= MYSQL_ERROR::WARN_LEVEL_WARN; + push_warning_printf(thd, level, ER_INVALID_CHARACTER_STRING, + ER(ER_INVALID_CHARACTER_STRING), cs->csname, hexbuf); + } + return str; } -uint nr_of_decimals(const char *str) + +bool Item_str_func::fix_fields(THD *thd, Item **ref) { - if ((str=strchr(str,'.'))) - { - const char *start= ++str; - for (; my_isdigit(system_charset_info,*str) ; str++) ; - return (uint) (str-start); - } - return 0; + bool res= Item_func::fix_fields(thd, ref); + /* + In Item_str_func::check_well_formed_result() we may set null_value + flag on the same condition as in test() below. + */ + maybe_null= (maybe_null || + test(thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); + return res; } -double Item_str_func::val() + +my_decimal *Item_str_func::val_decimal(my_decimal *decimal_value) { DBUG_ASSERT(fixed == 1); - int err; char buff[64]; - char *end_not_used; String *res, tmp(buff,sizeof(buff), &my_charset_bin); res= val_str(&tmp); - return res ? my_strntod(res->charset(), (char*) res->ptr(),res->length(), - &end_not_used, &err) : 0.0; + if (!res) + return 0; + (void)str2my_decimal(E_DEC_FATAL_ERROR, (char*) res->ptr(), + res->length(), res->charset(), decimal_value); + return decimal_value; +} + + +double Item_str_func::val_real() +{ + DBUG_ASSERT(fixed == 1); + int err_not_used; + char *end_not_used, buff[64]; + String *res, tmp(buff,sizeof(buff), &my_charset_bin); + res= val_str(&tmp); + return res ? my_strntod(res->charset(), (char*) res->ptr(), res->length(), + &end_not_used, &err_not_used) : 0.0; } + longlong Item_str_func::val_int() { DBUG_ASSERT(fixed == 1); @@ -371,11 +411,18 @@ void Item_func_concat::fix_length_and_dec() { ulonglong max_result_length= 0; - if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1)) return; for (uint i=0 ; i < arg_count ; i++) - max_result_length+= args[i]->max_length; + { + if (args[i]->collation.collation->mbmaxlen != collation.collation->mbmaxlen) + max_result_length+= (args[i]->max_length / + args[i]->collation.collation->mbmaxlen) * + collation.collation->mbmaxlen; + else + max_result_length+= args[i]->max_length; + } if (max_result_length >= MAX_BLOB_WIDTH) { @@ -491,7 +538,6 @@ String *Item_func_des_decrypt::val_str(String *str) DBUG_ASSERT(fixed == 1); #ifdef HAVE_OPENSSL uint code= ER_WRONG_PARAMETERS_TO_PROCEDURE; - DES_key_schedule ks1, ks2, ks3; DES_cblock ivec; struct st_des_keyblock keyblock; struct st_des_keyschedule keyschedule; @@ -508,7 +554,8 @@ String *Item_func_des_decrypt::val_str(String *str) { uint key_number=(uint) (*res)[0] & 127; // Check if automatic key and that we have privilege to uncompress using it - if (!(current_thd->master_access & SUPER_ACL) || key_number > 9) + if (!(current_thd->security_ctx->master_access & SUPER_ACL) || + key_number > 9) goto error; VOID(pthread_mutex_lock(&LOCK_des_key_file)); @@ -686,7 +733,7 @@ void Item_func_concat_ws::fix_length_and_dec() { ulonglong max_result_length; - if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1)) return; /* @@ -899,7 +946,7 @@ void Item_func_replace::fix_length_and_dec() } max_length= (ulong) max_result_length; - if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV)) + if (agg_arg_charsets(collation, args, 3, MY_COLL_CMP_CONV, 1)) return; } @@ -908,24 +955,35 @@ String *Item_func_insert::val_str(String *str) { DBUG_ASSERT(fixed == 1); String *res,*res2; - uint start,length; + longlong start, length; /* must be longlong to avoid truncation */ null_value=0; res=args[0]->val_str(str); res2=args[3]->val_str(&tmp_value); - start=(uint) args[1]->val_int()-1; - length=(uint) args[2]->val_int(); + start= args[1]->val_int() - 1; + length= args[2]->val_int(); + if (args[0]->null_value || args[1]->null_value || args[2]->null_value || args[3]->null_value) goto null; /* purecov: inspected */ - start=res->charpos(start); - length=res->charpos(length,start); - if (start > res->length()+1) - return res; // Wrong param; skip insert - if (length > res->length()-start) - length=res->length()-start; - if (res->length() - length + res2->length() > - current_thd->variables.max_allowed_packet) + + if ((start < 0) || (start > res->length())) + return res; // Wrong param; skip insert + if ((length < 0) || (length > res->length())) + length= res->length(); + + /* start and length are now sufficiently valid to pass to charpos function */ + start= res->charpos((int) start); + length= res->charpos((int) length, (uint32) start); + + /* Re-testing with corrected params */ + if (start > res->length()) + return res; /* purecov: inspected */ // Wrong param; skip insert + if (length > res->length() - start) + length= res->length() - start; + + if ((ulonglong) (res->length() - length + res2->length()) > + (ulonglong) current_thd->variables.max_allowed_packet) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, @@ -934,7 +992,7 @@ String *Item_func_insert::val_str(String *str) goto null; } res=copy_if_not_alloced(str,res,res->length()); - res->replace(start,length,*res2); + res->replace((uint32) start,(uint32) length,*res2); return res; null: null_value=1; @@ -944,15 +1002,11 @@ null: void Item_func_insert::fix_length_and_dec() { - Item *cargs[2]; ulonglong max_result_length; - cargs[0]= args[0]; - cargs[1]= args[3]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + // Handle character set for args[0] and args[3]. + if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 3)) return; - args[0]= cargs[0]; - args[3]= cargs[1]; max_result_length= ((ulonglong) args[0]->max_length+ (ulonglong) args[3]->max_length); if (max_result_length >= MAX_BLOB_WIDTH) @@ -964,7 +1018,7 @@ void Item_func_insert::fix_length_and_dec() } -String *Item_func_lcase::val_str(String *str) +String *Item_str_conv::val_str(String *str) { DBUG_ASSERT(fixed == 1); String *res; @@ -974,24 +1028,25 @@ String *Item_func_lcase::val_str(String *str) return 0; /* purecov: inspected */ } null_value=0; - res=copy_if_not_alloced(str,res,res->length()); - res->casedn(); - return res; -} - - -String *Item_func_ucase::val_str(String *str) -{ - DBUG_ASSERT(fixed == 1); - String *res; - if (!(res=args[0]->val_str(str))) + if (multiply == 1) { - null_value=1; /* purecov: inspected */ - return 0; /* purecov: inspected */ + uint len; + res= copy_if_not_alloced(str,res,res->length()); + len= converter(collation.collation, (char*) res->ptr(), res->length(), + (char*) res->ptr(), res->length()); + DBUG_ASSERT(len <= res->length()); + res->length(len); + } + else + { + uint len= res->length() * multiply; + tmp_value.alloc(len); + tmp_value.set_charset(collation.collation); + len= converter(collation.collation, (char*) res->ptr(), res->length(), + (char*) tmp_value.ptr(), len); + tmp_value.length(len); + res= &tmp_value; } - null_value=0; - res=copy_if_not_alloced(str,res,res->length()); - res->caseup(); return res; } @@ -999,16 +1054,21 @@ String *Item_func_ucase::val_str(String *str) String *Item_func_left::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - long length =(long) args[1]->val_int(); + String *res= args[0]->val_str(str); + + /* must be longlong to avoid truncation */ + longlong length= args[1]->val_int(); uint char_pos; - if ((null_value=args[0]->null_value)) + if ((null_value=(args[0]->null_value || args[1]->null_value))) return 0; - if (length <= 0) + + /* if "unsigned_flag" is set, we have a *huge* positive number. */ + if ((length <= 0) && (!args[1]->unsigned_flag)) return &my_empty_string; - if (res->length() <= (uint) length || - res->length() <= (char_pos= res->charpos(length))) + + if ((res->length() <= (ulonglong) length) || + (res->length() <= (char_pos= res->charpos((int) length)))) return res; tmp_value.set(*res, 0, char_pos); @@ -1040,14 +1100,18 @@ void Item_func_left::fix_length_and_dec() String *Item_func_right::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - long length =(long) args[1]->val_int(); + String *res= args[0]->val_str(str); + /* must be longlong to avoid truncation */ + longlong length= args[1]->val_int(); - if ((null_value=args[0]->null_value)) + if ((null_value=(args[0]->null_value || args[1]->null_value))) return 0; /* purecov: inspected */ - if (length <= 0) + + /* if "unsigned_flag" is set, we have a *huge* positive number. */ + if ((length <= 0) && (!args[1]->unsigned_flag)) return &my_empty_string; /* purecov: inspected */ - if (res->length() <= (uint) length) + + if (res->length() <= (ulonglong) length) return res; /* purecov: inspected */ uint start=res->numchars(); @@ -1070,25 +1134,44 @@ String *Item_func_substr::val_str(String *str) { DBUG_ASSERT(fixed == 1); String *res = args[0]->val_str(str); - int32 start = (int32) args[1]->val_int(); - int32 length = arg_count == 3 ? (int32) args[2]->val_int() : INT_MAX32; - int32 tmp_length; + /* must be longlong to avoid truncation */ + longlong start= args[1]->val_int(); + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Limit so that code sees out-of-bound value properly. */ + longlong length= arg_count == 3 ? args[2]->val_int() : INT_MAX32; + longlong tmp_length; if ((null_value=(args[0]->null_value || args[1]->null_value || (arg_count == 3 && args[2]->null_value)))) return 0; /* purecov: inspected */ - start= (int32)((start < 0) ? res->numchars() + start : start -1); - start=res->charpos(start); - length=res->charpos(length,start); - if (start < 0 || (uint) start+1 > res->length() || length <= 0) + + /* Negative length, will return empty string. */ + if ((arg_count == 3) && (length <= 0) && !args[2]->unsigned_flag) + return &my_empty_string; + + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Set here so that rest of code sees out-of-bound value as such. */ + if ((length <= 0) || (length > INT_MAX32)) + length= INT_MAX32; + + /* if "unsigned_flag" is set, we have a *huge* positive number. */ + /* Assumes that the maximum length of a String is < INT_MAX32. */ + if ((!args[1]->unsigned_flag && (start < INT_MIN32 || start > INT_MAX32)) || + (args[1]->unsigned_flag && ((ulonglong) start > INT_MAX32))) + return &my_empty_string; + + start= ((start < 0) ? res->numchars() + start : start - 1); + start= res->charpos((int) start); + if ((start < 0) || ((uint) start + 1 > res->length())) return &my_empty_string; - tmp_length=(int32) res->length()-start; - length=min(length,tmp_length); + length= res->charpos((int) length, (uint32) start); + tmp_length= res->length() - start; + length= min(length, tmp_length); - if (!start && res->length() == (uint) length) + if (!start && (longlong) res->length() == length) return res; - tmp_value.set(*res,(uint) start,(uint) length); + tmp_value.set(*res, (uint32) start, (uint32) length); return &tmp_value; } @@ -1101,11 +1184,10 @@ void Item_func_substr::fix_length_and_dec() if (args[1]->const_item()) { int32 start= (int32) args[1]->val_int(); - start= (int32)((start < 0) ? max_length + start : start - 1); - if (start < 0 || start >= (int32) max_length) - max_length=0; /* purecov: inspected */ + if (start < 0) + max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start); else - max_length-= (uint) start; + max_length-= min((uint)(start - 1), max_length); } if (arg_count == 3 && args[2]->const_item()) { @@ -1123,7 +1205,7 @@ void Item_func_substr_index::fix_length_and_dec() { max_length= args[0]->max_length; - if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV)) + if (agg_arg_charsets(collation, args, 2, MY_COLL_CMP_CONV, 1)) return; } @@ -1131,9 +1213,9 @@ void Item_func_substr_index::fix_length_and_dec() String *Item_func_substr_index::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - String *delimeter =args[1]->val_str(&tmp_value); - int32 count = (int32) args[2]->val_int(); + String *res= args[0]->val_str(str); + String *delimiter= args[1]->val_str(&tmp_value); + int32 count= (int32) args[2]->val_int(); uint offset; if (args[0]->null_value || args[1]->null_value || args[2]->null_value) @@ -1142,8 +1224,8 @@ String *Item_func_substr_index::val_str(String *str) return 0; } null_value=0; - uint delimeter_length=delimeter->length(); - if (!res->length() || !delimeter_length || !count) + uint delimiter_length= delimiter->length(); + if (!res->length() || !delimiter_length || !count) return &my_empty_string; // Wrong parameters res->set_charset(collation.collation); @@ -1151,11 +1233,11 @@ String *Item_func_substr_index::val_str(String *str) #ifdef USE_MB if (use_mb(res->charset())) { - const char *ptr=res->ptr(); - const char *strend = ptr+res->length(); - const char *end=strend-delimeter_length+1; - const char *search=delimeter->ptr(); - const char *search_end=search+delimeter_length; + const char *ptr= res->ptr(); + const char *strend= ptr+res->length(); + const char *end= strend-delimiter_length+1; + const char *search= delimiter->ptr(); + const char *search_end= search+delimiter_length; int32 n=0,c=count,pass; register uint32 l; for (pass=(count>0);pass<2;++pass) @@ -1170,7 +1252,7 @@ String *Item_func_substr_index::val_str(String *str) if (*i++ != *j++) goto skip; if (pass==0) ++n; else if (!--c) break; - ptr+=delimeter_length; + ptr+= delimiter_length; continue; } skip: @@ -1192,7 +1274,7 @@ String *Item_func_substr_index::val_str(String *str) } else /* return right part */ { - ptr+=delimeter_length; + ptr+= delimiter_length; tmp_value.set(*res,(ulong) (ptr-res->ptr()), (ulong) (strend-ptr)); } } @@ -1203,9 +1285,9 @@ String *Item_func_substr_index::val_str(String *str) { if (count > 0) { // start counting from the beginning - for (offset=0 ;; offset+=delimeter_length) + for (offset=0; ; offset+= delimiter_length) { - if ((int) (offset=res->strstr(*delimeter,offset)) < 0) + if ((int) (offset= res->strstr(*delimiter, offset)) < 0) return res; // Didn't find, return org string if (!--count) { @@ -1226,7 +1308,7 @@ String *Item_func_substr_index::val_str(String *str) address space less than where the found substring is located in res */ - if ((int) (offset=res->strrstr(*delimeter,offset)) < 0) + if ((int) (offset= res->strrstr(*delimiter, offset)) < 0) return res; // Didn't find, return org string /* At this point, we've searched for the substring @@ -1234,13 +1316,19 @@ String *Item_func_substr_index::val_str(String *str) */ if (!++count) { - offset+=delimeter_length; + offset+= delimiter_length; tmp_value.set(*res,offset,res->length()- offset); break; } } } } + /* + We always mark tmp_value as const so that if val_str() is called again + on this object, we don't disrupt the contents of tmp_value when it was + derived from another String. + */ + tmp_value.mark_as_const(); return (&tmp_value); } @@ -1255,21 +1343,29 @@ String *Item_func_substr_index::val_str(String *str) String *Item_func_ltrim::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),res->charset()); - String *remove_str= (arg_count==2) ? args[1]->val_str(&tmp) : &remove; + char buff[MAX_FIELD_WIDTH], *ptr, *end; + String tmp(buff,sizeof(buff),system_charset_info); + String *res, *remove_str; uint remove_length; LINT_INIT(remove_length); - if (!remove_str || (remove_length=remove_str->length()) == 0 || + res= args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + remove_str= &remove; /* Default value. */ + if (arg_count == 2) + { + remove_str= args[1]->val_str(&tmp); + if ((null_value= args[1]->null_value)) + return 0; + } + + if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) return res; - char *ptr=(char*) res->ptr(); - char *end=ptr+res->length(); + ptr= (char*) res->ptr(); + end= ptr+res->length(); if (remove_length == 1) { char chr=(*remove_str)[0]; @@ -1294,21 +1390,29 @@ String *Item_func_ltrim::val_str(String *str) String *Item_func_rtrim::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),res->charset()); - String *remove_str= (arg_count==2) ? args[1]->val_str(&tmp) : &remove; + char buff[MAX_FIELD_WIDTH], *ptr, *end; + String tmp(buff, sizeof(buff), system_charset_info); + String *res, *remove_str; uint remove_length; LINT_INIT(remove_length); - if (!remove_str || (remove_length=remove_str->length()) == 0 || + res= args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + remove_str= &remove; /* Default value. */ + if (arg_count == 2) + { + remove_str= args[1]->val_str(&tmp); + if ((null_value= args[1]->null_value)) + return 0; + } + + if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) return res; - char *ptr=(char*) res->ptr(); - char *end=ptr+res->length(); + ptr= (char*) res->ptr(); + end= ptr+res->length(); #ifdef USE_MB char *p=ptr; register uint32 l; @@ -1367,31 +1471,31 @@ String *Item_func_rtrim::val_str(String *str) String *Item_func_trim::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),res->charset()); + char buff[MAX_FIELD_WIDTH], *ptr, *end; + const char *r_ptr; + String tmp(buff, sizeof(buff), system_charset_info); + String *res, *remove_str; uint remove_length; LINT_INIT(remove_length); - String *remove_str; /* The string to remove from res. */ + res= args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + remove_str= &remove; /* Default value. */ if (arg_count == 2) { remove_str= args[1]->val_str(&tmp); if ((null_value= args[1]->null_value)) return 0; } - else - remove_str= &remove; /* Default value. */ - if (!remove_str || (remove_length=remove_str->length()) == 0 || + if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) return res; - char *ptr=(char*) res->ptr(); - char *end=ptr+res->length(); - const char *r_ptr=remove_str->ptr(); + ptr= (char*) res->ptr(); + end= ptr+res->length(); + r_ptr= remove_str->ptr(); while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length)) ptr+=remove_length; #ifdef USE_MB @@ -1437,13 +1541,10 @@ void Item_func_trim::fix_length_and_dec() } else { - Item *cargs[2]; - cargs[0]= args[1]; - cargs[1]= args[0]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_CMP_CONV)) + // Handle character set for args[1] and args[0]. + // Note that we pass args[1] as the first item, and args[0] as the second. + if (agg_arg_charsets(collation, &args[1], 2, MY_COLL_CMP_CONV, -1)) return; - args[0]= cargs[1]; - args[1]= cargs[0]; } } @@ -1459,7 +1560,7 @@ void Item_func_trim::print(String *str) str->append(mode_name()); str->append(' '); args[1]->print(str); - str->append(" from ",6); + str->append(STRING_WITH_LEN(" from ")); args[0]->print(str); str->append(')'); } @@ -1619,14 +1720,16 @@ Item *Item_func_sysconst::safe_charset_converter(CHARSET_INFO *tocs) uint conv_errors; String tmp, cstr, *ostr= val_str(&tmp); cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors); - if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(), - cstr.charset(), - collation.derivation))) + if (conv_errors || + !(conv= new Item_static_string_func(fully_qualified_func_name(), + cstr.ptr(), cstr.length(), + cstr.charset(), + collation.derivation))) { return NULL; } conv->str_value.copy(); - conv->str_value.shrink_to_length(); + conv->str_value.mark_as_const(); return conv; } @@ -1635,43 +1738,69 @@ String *Item_func_database::val_str(String *str) { DBUG_ASSERT(fixed == 1); THD *thd= current_thd; - if (!thd->db) + if (thd->db == NULL) { null_value= 1; return 0; } else - str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info); + str->copy(thd->db, thd->db_length, system_charset_info); return str; } -// TODO: make USER() replicate properly (currently it is replicated to "") -String *Item_func_user::val_str(String *str) +/* + TODO: make USER() replicate properly (currently it is replicated to "") +*/ +bool Item_func_user::init(const char *user, const char *host) { DBUG_ASSERT(fixed == 1); - THD *thd=current_thd; - CHARSET_INFO *cs= system_charset_info; - const char *host= thd->host_or_ip; - uint res_length; // For system threads (e.g. replication SQL thread) user may be empty - if (!thd->user) - return &my_empty_string; - res_length= (strlen(thd->user)+strlen(host)+2) * cs->mbmaxlen; - - if (str->alloc(res_length)) + if (user) { - null_value=1; - return 0; + CHARSET_INFO *cs= str_value.charset(); + uint res_length= (strlen(user)+strlen(host)+2) * cs->mbmaxlen; + + if (str_value.alloc(res_length)) + { + null_value=1; + return TRUE; + } + + res_length=cs->cset->snprintf(cs, (char*)str_value.ptr(), res_length, + "%s@%s", user, host); + str_value.length(res_length); + str_value.mark_as_const(); } - res_length=cs->cset->snprintf(cs, (char*)str->ptr(), res_length, "%s@%s", - thd->user, host); - str->length(res_length); - str->set_charset(cs); - return str; + return FALSE; +} + + +bool Item_func_user::fix_fields(THD *thd, Item **ref) +{ + return (Item_func_sysconst::fix_fields(thd, ref) || + init(thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip)); +} + + +bool Item_func_current_user::fix_fields(THD *thd, Item **ref) +{ + if (Item_func_sysconst::fix_fields(thd, ref)) + return TRUE; + + Security_context *ctx= +#ifndef NO_EMBEDDED_ACCESS_CHECKS + (context->security_ctx + ? context->security_ctx : thd->security_ctx); +#else + thd->security_ctx; +#endif /*NO_EMBEDDED_ACCESS_CHECKS*/ + return init(ctx->priv_user, ctx->priv_host); } + void Item_func_soundex::fix_length_and_dec() { collation.set(args[0]->collation); @@ -1766,27 +1895,43 @@ Item_func_format::Item_func_format(Item *org,int dec) :Item_str_func(org) String *Item_func_format::val_str(String *str) { - DBUG_ASSERT(fixed == 1); - double nr =args[0]->val(); + uint32 length, str_length ,dec; int diff; - uint32 length, str_length; - uint dec; - if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ + DBUG_ASSERT(fixed == 1); dec= decimals ? decimals+1 : 0; - /* Here default_charset() is right as this is not an automatic conversion */ - str->set(nr,decimals, default_charset()); - if (isnan(nr)) - return str; - str_length=str->length(); - if (nr < 0) - str_length--; // Don't count sign + if (args[0]->result_type() == DECIMAL_RESULT || + args[0]->result_type() == INT_RESULT) + { + my_decimal dec_val, rnd_dec, *res; + res= args[0]->val_decimal(&dec_val); + if ((null_value=args[0]->null_value)) + return 0; /* purecov: inspected */ + my_decimal_round(E_DEC_FATAL_ERROR, res, decimals, false, &rnd_dec); + my_decimal2string(E_DEC_FATAL_ERROR, &rnd_dec, 0, 0, 0, str); + str_length= str->length(); + if (rnd_dec.sign()) + str_length--; + } + else + { + double nr= args[0]->val_real(); + if ((null_value=args[0]->null_value)) + return 0; /* purecov: inspected */ + nr= my_double_round(nr, decimals, FALSE); + /* Here default_charset() is right as this is not an automatic conversion */ + str->set(nr,decimals, default_charset()); + if (isnan(nr)) + return str; + str_length=str->length(); + if (nr < 0) + str_length--; // Don't count sign + } /* We need this test to handle 'nan' values */ if (str_length >= dec+4) { char *tmp,*pos; - length= str->length()+(diff= (int)(str_length- dec-1)/3); + length= str->length()+(diff=((int)(str_length- dec-1))/3); str= copy_if_not_alloced(&tmp_str,str,length); str->length(length); tmp= (char*) str->ptr()+length - dec-1; @@ -1811,7 +1956,7 @@ String *Item_func_format::val_str(String *str) void Item_func_format::print(String *str) { - str->append("format(", 7); + str->append(STRING_WITH_LEN("format(")); args[0]->print(str); str->append(','); // my_charset_bin is good enough for numbers @@ -1827,7 +1972,7 @@ void Item_func_elt::fix_length_and_dec() max_length=0; decimals=0; - if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args+1, arg_count-1, MY_COLL_ALLOW_CONV, 1)) return; for (uint i= 1 ; i < arg_count ; i++) @@ -1839,14 +1984,14 @@ void Item_func_elt::fix_length_and_dec() } -double Item_func_elt::val() +double Item_func_elt::val_real() { DBUG_ASSERT(fixed == 1); uint tmp; null_value=1; if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) return 0.0; - double result= args[tmp]->val(); + double result= args[tmp]->val_real(); null_value= args[tmp]->null_value; return result; } @@ -1885,7 +2030,7 @@ String *Item_func_elt::val_str(String *str) void Item_func_make_set::split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields) { - item->split_sum_func2(thd, ref_pointer_array, fields, &item); + item->split_sum_func2(thd, ref_pointer_array, fields, &item, TRUE); Item_str_func::split_sum_func(thd, ref_pointer_array, fields); } @@ -1894,12 +2039,12 @@ void Item_func_make_set::fix_length_and_dec() { max_length=arg_count-1; - if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV)) + if (agg_arg_charsets(collation, args, arg_count, MY_COLL_ALLOW_CONV, 1)) return; for (uint i=0 ; i < arg_count ; i++) max_length+=args[i]->max_length; - + used_tables_cache|= item->used_tables(); not_null_tables_cache&= item->not_null_tables(); const_item_cache&= item->const_item(); @@ -1959,7 +2104,7 @@ String *Item_func_make_set::val_str(String *str) return &my_empty_string; result= &tmp_str; } - if (tmp_str.append(",", 1, &my_charset_bin) || tmp_str.append(*res)) + if (tmp_str.append(STRING_WITH_LEN(","), &my_charset_bin) || tmp_str.append(*res)) return &my_empty_string; } } @@ -1969,9 +2114,29 @@ String *Item_func_make_set::val_str(String *str) } +Item *Item_func_make_set::transform(Item_transformer transformer, byte *arg) +{ + DBUG_ASSERT(!current_thd->is_stmt_prepare()); + + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + + /* + THD::change_item_tree() should be called only if the tree was + really transformed, i.e. when a new item has been created. + Otherwise we'll be allocating a lot of unnecessary memory for + change records at each execution. + */ + if (item != new_item) + current_thd->change_item_tree(&item, new_item); + return Item_str_func::transform(transformer, arg); +} + + void Item_func_make_set::print(String *str) { - str->append("make_set(", 9); + str->append(STRING_WITH_LEN("make_set(")); item->print(str); if (arg_count) { @@ -1991,26 +2156,21 @@ String *Item_func_char::val_str(String *str) int32 num=(int32) args[i]->val_int(); if (!args[i]->null_value) { -#ifdef USE_MB - if (use_mb(collation.collation)) - { - if (num&0xFF000000L) { - str->append((char)(num>>24)); - goto b2; - } else if (num&0xFF0000L) { -b2: str->append((char)(num>>16)); - goto b1; - } else if (num&0xFF00L) { -b1: str->append((char)(num>>8)); - } + if (num&0xFF000000L) { + str->append((char)(num>>24)); + goto b2; + } else if (num&0xFF0000L) { + b2: str->append((char)(num>>16)); + goto b1; + } else if (num&0xFF00L) { + b1: str->append((char)(num>>8)); } -#endif - str->append((char)num); + str->append((char) num); } } str->set_charset(collation.collation); str->realloc(str->length()); // Add end 0 (for Purify) - return str; + return check_well_formed_result(str); } @@ -2041,8 +2201,15 @@ void Item_func_repeat::fix_length_and_dec() collation.set(args[0]->collation); if (args[1]->const_item()) { - ulonglong max_result_length= ((ulonglong) args[0]->max_length * - args[1]->val_int()); + /* must be longlong to avoid truncation */ + longlong count= args[1]->val_int(); + + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Set here so that rest of code sees out-of-bound value as such. */ + if (count > INT_MAX32) + count= INT_MAX32; + + ulonglong max_result_length= (ulonglong) args[0]->max_length * count; if (max_result_length >= MAX_BLOB_WIDTH) { max_result_length= MAX_BLOB_WIDTH; @@ -2067,19 +2234,26 @@ String *Item_func_repeat::val_str(String *str) DBUG_ASSERT(fixed == 1); uint length,tot_length; char *to; - long count= (long) args[1]->val_int(); - String *res =args[0]->val_str(str); + /* must be longlong to avoid truncation */ + longlong count= args[1]->val_int(); + String *res= args[0]->val_str(str); if (args[0]->null_value || args[1]->null_value) goto err; // string and/or delim are null - null_value=0; - if (count <= 0) // For nicer SQL code + null_value= 0; + + if (count == 0 || count < 0 && !args[1]->unsigned_flag) return &my_empty_string; + + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Bounds check on count: If this is triggered, we will error. */ + if ((ulonglong) count > INT_MAX32) + count= INT_MAX32; if (count == 1) // To avoid reallocs return res; length=res->length(); // Safe length check - if (length > current_thd->variables.max_allowed_packet/count) + if (length > current_thd->variables.max_allowed_packet / (uint) count) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, @@ -2107,18 +2281,25 @@ err: void Item_func_rpad::fix_length_and_dec() { - Item *cargs[2]; - - cargs[0]= args[0]; - cargs[1]= args[2]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + // Handle character set for args[0] and args[2]. + if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2)) return; - args[0]= cargs[0]; - args[2]= cargs[1]; if (args[1]->const_item()) { - ulonglong length= ((ulonglong) args[1]->val_int() * - collation.collation->mbmaxlen); + ulonglong length= 0; + + if (collation.collation->mbmaxlen > 0) + { + ulonglong temp= (ulonglong) args[1]->val_int(); + + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Set here so that rest of code sees out-of-bound value as such. */ + if (temp > INT_MAX32) + temp = INT_MAX32; + + length= temp * collation.collation->mbmaxlen; + } + if (length >= MAX_BLOB_WIDTH) { length= MAX_BLOB_WIDTH; @@ -2140,21 +2321,29 @@ String *Item_func_rpad::val_str(String *str) uint32 res_byte_length,res_char_length,pad_char_length,pad_byte_length; char *to; const char *ptr_pad; - int32 count= (int32) args[1]->val_int(); - int32 byte_count= count * collation.collation->mbmaxlen; - String *res =args[0]->val_str(str); - String *rpad = args[2]->val_str(&rpad_str); + /* must be longlong to avoid truncation */ + longlong count= args[1]->val_int(); + longlong byte_count; + String *res= args[0]->val_str(str); + String *rpad= args[2]->val_str(&rpad_str); - if (!res || args[1]->null_value || !rpad || count < 0) + if (!res || args[1]->null_value || !rpad || + ((count < 0) && !args[1]->unsigned_flag)) goto err; null_value=0; - if (count <= (int32) (res_char_length=res->numchars())) + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Set here so that rest of code sees out-of-bound value as such. */ + if ((ulonglong) count > INT_MAX32) + count= INT_MAX32; + if (count <= (res_char_length= res->numchars())) { // String to pad is big enough - res->length(res->charpos(count)); // Shorten result if longer + res->length(res->charpos((int) count)); // Shorten result if longer return (res); } pad_char_length= rpad->numchars(); - if ((ulong) byte_count > current_thd->variables.max_allowed_packet) + + byte_count= count * collation.collation->mbmaxlen; + if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, @@ -2162,10 +2351,10 @@ String *Item_func_rpad::val_str(String *str) func_name(), current_thd->variables.max_allowed_packet); goto err; } - if(args[2]->null_value || !pad_char_length) + if (args[2]->null_value || !pad_char_length) goto err; res_byte_length= res->length(); /* Must be done before alloc_buffer */ - if (!(res= alloc_buffer(res,str,&tmp_value,byte_count))) + if (!(res= alloc_buffer(res,str,&tmp_value, (ulong) byte_count))) goto err; to= (char*) res->ptr()+res_byte_length; @@ -2179,7 +2368,7 @@ String *Item_func_rpad::val_str(String *str) } if (count) { - pad_byte_length= rpad->charpos(count); + pad_byte_length= rpad->charpos((int) count); memcpy(to,ptr_pad,(size_t) pad_byte_length); to+= pad_byte_length; } @@ -2194,18 +2383,26 @@ String *Item_func_rpad::val_str(String *str) void Item_func_lpad::fix_length_and_dec() { - Item *cargs[2]; - cargs[0]= args[0]; - cargs[1]= args[2]; - if (agg_arg_charsets(collation, cargs, 2, MY_COLL_ALLOW_CONV)) + // Handle character set for args[0] and args[2]. + if (agg_arg_charsets(collation, &args[0], 2, MY_COLL_ALLOW_CONV, 2)) return; - args[0]= cargs[0]; - args[2]= cargs[1]; if (args[1]->const_item()) { - ulonglong length= ((ulonglong) args[1]->val_int() * - collation.collation->mbmaxlen); + ulonglong length= 0; + + if (collation.collation->mbmaxlen > 0) + { + ulonglong temp= (ulonglong) args[1]->val_int(); + + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Set here so that rest of code sees out-of-bound value as such. */ + if (temp > INT_MAX32) + temp= INT_MAX32; + + length= temp * collation.collation->mbmaxlen; + } + if (length >= MAX_BLOB_WIDTH) { length= MAX_BLOB_WIDTH; @@ -2225,26 +2422,33 @@ String *Item_func_lpad::val_str(String *str) { DBUG_ASSERT(fixed == 1); uint32 res_char_length,pad_char_length; - ulong count= (long) args[1]->val_int(), byte_count; + /* must be longlong to avoid truncation */ + longlong count= args[1]->val_int(); + longlong byte_count; String *res= args[0]->val_str(&tmp_value); String *pad= args[2]->val_str(&lpad_str); - if (!res || args[1]->null_value || !pad) - goto err; - + if (!res || args[1]->null_value || !pad || + ((count < 0) && !args[1]->unsigned_flag)) + goto err; null_value=0; + /* Assumes that the maximum length of a String is < INT_MAX32. */ + /* Set here so that rest of code sees out-of-bound value as such. */ + if ((ulonglong) count > INT_MAX32) + count= INT_MAX32; + res_char_length= res->numchars(); if (count <= res_char_length) { - res->length(res->charpos(count)); + res->length(res->charpos((int) count)); return res; } pad_char_length= pad->numchars(); byte_count= count * collation.collation->mbmaxlen; - if (byte_count > current_thd->variables.max_allowed_packet) + if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_ALLOWED_PACKET_OVERFLOWED, @@ -2253,7 +2457,8 @@ String *Item_func_lpad::val_str(String *str) goto err; } - if (args[2]->null_value || !pad_char_length || str->alloc(byte_count)) + if (args[2]->null_value || !pad_char_length || + str->alloc((uint32) byte_count)) goto err; str->length(0); @@ -2265,7 +2470,7 @@ String *Item_func_lpad::val_str(String *str) count-= pad_char_length; } if (count > 0) - str->append(pad->ptr(), pad->charpos(count), collation.collation); + str->append(pad->ptr(), pad->charpos((int) count), collation.collation); str->append(*res); null_value= 0; @@ -2291,17 +2496,33 @@ String *Item_func_conv::val_str(String *str) abs(to_base) > 36 || abs(to_base) < 2 || abs(from_base) > 36 || abs(from_base) < 2 || !(res->length())) { - null_value=1; - return 0; + null_value= 1; + return NULL; } - null_value=0; + null_value= 0; unsigned_flag= !(from_base < 0); - if (from_base < 0) - dec= my_strntoll(res->charset(),res->ptr(),res->length(),-from_base,&endptr,&err); + + if (args[0]->field_type() == MYSQL_TYPE_BIT) + { + /* + Special case: The string representation of BIT doesn't resemble the + decimal representation, so we shouldn't change it to string and then to + decimal. + */ + dec= args[0]->val_int(); + } else - dec= (longlong) my_strntoull(res->charset(),res->ptr(),res->length(),from_base,&endptr,&err); - ptr= longlong2str(dec,ans,to_base); - if (str->copy(ans,(uint32) (ptr-ans), default_charset())) + { + if (from_base < 0) + dec= my_strntoll(res->charset(), res->ptr(), res->length(), + -from_base, &endptr, &err); + else + dec= (longlong) my_strntoull(res->charset(), res->ptr(), res->length(), + from_base, &endptr, &err); + } + + ptr= longlong2str(dec, ans, to_base); + if (str->copy(ans, (uint32) (ptr-ans), default_charset())) return &my_empty_string; return str; } @@ -2321,7 +2542,7 @@ String *Item_func_conv_charset::val_str(String *str) } null_value= str_value.copy(arg->ptr(),arg->length(),arg->charset(), conv_charset, &dummy_errors); - return null_value ? 0 : &str_value; + return null_value ? 0 : check_well_formed_result(&str_value); } void Item_func_conv_charset::fix_length_and_dec() @@ -2332,9 +2553,9 @@ void Item_func_conv_charset::fix_length_and_dec() void Item_func_conv_charset::print(String *str) { - str->append("convert(", 8); + str->append(STRING_WITH_LEN("convert(")); args[0]->print(str); - str->append(" using ", 7); + str->append(STRING_WITH_LEN(" using ")); str->append(conv_charset->csname); str->append(')'); } @@ -2370,8 +2591,8 @@ void Item_func_set_collation::fix_length_and_dec() if (!set_collation || !my_charset_same(args[0]->collation.collation,set_collation)) { - my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), - colname,args[0]->collation.collation->csname); + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + colname, args[0]->collation.collation->csname); return; } collation.set(set_collation, DERIVATION_EXPLICIT); @@ -2388,7 +2609,7 @@ bool Item_func_set_collation::eq(const Item *item, bool binary_cmp) const return 0; Item_func *item_func=(Item_func*) item; if (arg_count != item_func->arg_count || - func_name() != item_func->func_name()) + functype() != item_func->functype()) return 0; Item_func_set_collation *item_func_sc=(Item_func_set_collation*) item; if (collation.collation != item_func_sc->collation.collation) @@ -2399,6 +2620,18 @@ bool Item_func_set_collation::eq(const Item *item, bool binary_cmp) const return 1; } + +void Item_func_set_collation::print(String *str) +{ + str->append('('); + args[0]->print(str); + str->append(STRING_WITH_LEN(" collate ")); + DBUG_ASSERT(args[1]->basic_const_item() && + args[1]->type() == Item::STRING_ITEM); + args[1]->str_value.print(str); + str->append(')'); +} + String *Item_func_charset::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -2426,15 +2659,17 @@ String *Item_func_collation::val_str(String *str) String *Item_func_hex::val_str(String *str) { + String *res; DBUG_ASSERT(fixed == 1); if (args[0]->result_type() != STRING_RESULT) { ulonglong dec; char ans[65],*ptr; /* Return hex of unsigned longlong value */ - if (args[0]->result_type() == REAL_RESULT) + if (args[0]->result_type() == REAL_RESULT || + args[0]->result_type() == DECIMAL_RESULT) { - double val= args[0]->val(); + double val= args[0]->val_real(); if ((val <= (double) LONGLONG_MIN) || (val >= (double) (ulonglong) ULONGLONG_MAX)) dec= ~(longlong) 0; @@ -2453,24 +2688,16 @@ String *Item_func_hex::val_str(String *str) } /* Convert given string to a hex string, character by character */ - String *res= args[0]->val_str(str); - const char *from, *end; - char *to; - if (!res || tmp_value.alloc(res->length()*2)) + res= args[0]->val_str(str); + if (!res || tmp_value.alloc(res->length()*2+1)) { null_value=1; return 0; } null_value=0; tmp_value.length(res->length()*2); - for (from=res->ptr(), end=from+res->length(), to= (char*) tmp_value.ptr(); - from < end ; - from++, to+=2) - { - uint tmp=(uint) (uchar) *from; - to[0]=_dig_vec_upper[tmp >> 4]; - to[1]=_dig_vec_upper[tmp & 15]; - } + + octet2hex((char*) tmp_value.ptr(), res->ptr(), res->length()); return &tmp_value; } @@ -2518,9 +2745,9 @@ String *Item_func_unhex::val_str(String *str) void Item_func_binary::print(String *str) { - str->append("cast(", 5); + str->append(STRING_WITH_LEN("cast(")); args[0]->print(str); - str->append(" as binary)", 11); + str->append(STRING_WITH_LEN(" as binary)")); } @@ -2537,7 +2764,7 @@ String *Item_load_file::val_str(String *str) if (!(file_name= args[0]->val_str(str)) #ifndef NO_EMBEDDED_ACCESS_CHECKS - || !(current_thd->master_access & FILE_ACL) + || !(current_thd->security_ctx->master_access & FILE_ACL) #endif ) goto err; @@ -2545,6 +2772,11 @@ String *Item_load_file::val_str(String *str) (void) fn_format(path, file_name->c_ptr(), mysql_real_data_home, "", MY_RELATIVE_PATH | MY_UNPACK_FILENAME); + /* Read only allowed from within dir specified by secure_file_priv */ + if (opt_secure_file_priv && + strncmp(opt_secure_file_priv, path, strlen(opt_secure_file_priv))) + goto err; + if (!my_stat(path, &stat_info, MYF(0))) goto err; @@ -2573,7 +2805,7 @@ String *Item_load_file::val_str(String *str) tmp_value.length(stat_info.st_size); my_close(file, MYF(0)); null_value = 0; - return &tmp_value; + DBUG_RETURN(&tmp_value); err: null_value = 1; @@ -2628,7 +2860,7 @@ String* Item_func_export_set::val_str(String* str) { /* errors is not checked - assume "," can always be converted */ uint errors; - sep_buf.copy(",", 1, &my_charset_bin, collation.collation, &errors); + sep_buf.copy(STRING_WITH_LEN(","), &my_charset_bin, collation.collation, &errors); sep = &sep_buf; } break; @@ -2656,7 +2888,7 @@ void Item_func_export_set::fix_length_and_dec() max_length=length*64+sep_length*63; if (agg_arg_charsets(collation, args+1, min(4,arg_count)-1, - MY_COLL_ALLOW_CONV)) + MY_COLL_ALLOW_CONV, 1)) return; } @@ -2744,7 +2976,8 @@ String *Item_func_quote::val_str(String *str) uint arg_length, new_length; if (!arg) // Null argument { - str->copy("NULL", 4, collation.collation); // Return the string 'NULL' + /* Return the string 'NULL' */ + str->copy(STRING_WITH_LEN("NULL"), collation.collation); null_value= 0; return str; } @@ -2915,6 +3148,16 @@ String *Item_func_uncompress::val_str(String *str) if (res->is_empty()) return res; + /* If length is less than 4 bytes, data is corrupt */ + if (res->length() <= 4) + { + push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_ZLIB_Z_DATA_ERROR, + ER(ER_ZLIB_Z_DATA_ERROR)); + goto err; + } + + /* Size of uncompressed data is stored as first 4 bytes of field */ new_size= uint4korr(res->ptr()) & 0x3FFFFFFF; if (new_size > current_thd->variables.max_allowed_packet) { @@ -2985,6 +3228,8 @@ String *Item_func_uuid::val_str(String *str) { DBUG_ASSERT(fixed == 1); char *s; + THD *thd= current_thd; + pthread_mutex_lock(&LOCK_uuid_generator); if (! uuid_time) /* first UUID() call. initializing data */ { @@ -2993,15 +3238,17 @@ String *Item_func_uuid::val_str(String *str) int i; if (my_gethwaddr(mac)) { + /* purecov: begin inspected */ /* generating random "hardware addr" and because specs explicitly specify that it should NOT correlate with a clock_seq value (initialized random below), we use a separate randominit() here */ - randominit(&uuid_rand, tmp + (ulong)current_thd, tmp + query_id); + randominit(&uuid_rand, tmp + (ulong) thd, tmp + (ulong)global_query_id); for (i=0; i < (int)sizeof(mac); i++) mac[i]=(uchar)(my_rnd(&uuid_rand)*255); + /* purecov: end */ } s=clock_seq_and_node_str+sizeof(clock_seq_and_node_str)-1; for (i=sizeof(mac)-1 ; i>=0 ; i--) @@ -3009,16 +3256,17 @@ String *Item_func_uuid::val_str(String *str) *--s=_dig_vec_lower[mac[i] & 15]; *--s=_dig_vec_lower[mac[i] >> 4]; } - randominit(&uuid_rand, tmp + (ulong)start_time, tmp + bytes_sent); + randominit(&uuid_rand, tmp + (ulong) server_start_time, + tmp + thd->status_var.bytes_sent); set_clock_seq_str(); } ulonglong tv=my_getsystime() + UUID_TIME_OFFSET + nanoseq; if (unlikely(tv < uuid_time)) set_clock_seq_str(); - else - if (unlikely(tv == uuid_time)) - { /* special protection from low-res system clocks */ + else if (unlikely(tv == uuid_time)) + { + /* special protection from low-res system clocks */ nanoseq++; tv++; } @@ -3049,4 +3297,3 @@ String *Item_func_uuid::val_str(String *str) strmov(s+18, clock_seq_and_node_str); return str; } - diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 4bd8574ff04..778ea6e9496 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -32,9 +31,12 @@ public: Item_str_func(Item *a,Item *b,Item *c,Item *d, Item* e) :Item_func(a,b,c,d,e) {decimals=NOT_FIXED_DEC; } Item_str_func(List<Item> &list) :Item_func(list) {decimals=NOT_FIXED_DEC; } longlong val_int(); - double val(); + double val_real(); + my_decimal *val_decimal(my_decimal *); enum Item_result result_type () const { return STRING_RESULT; } void left_right_max_length(); + String *check_well_formed_result(String *str); + bool fix_fields(THD *thd, Item **ref); }; class Item_func_md5 :public Item_str_func @@ -141,13 +143,14 @@ public: class Item_str_conv :public Item_str_func { +protected: + uint multiply; + uint (*converter)(CHARSET_INFO *cs, char *src, uint srclen, + char *dst, uint dstlen); + String tmp_value; public: Item_str_conv(Item *item) :Item_str_func(item) {} - void fix_length_and_dec() - { - collation.set(args[0]->collation); - max_length = args[0]->max_length; - } + String *val_str(String *); }; @@ -155,16 +158,28 @@ class Item_func_lcase :public Item_str_conv { public: Item_func_lcase(Item *item) :Item_str_conv(item) {} - String *val_str(String *); const char *func_name() const { return "lcase"; } + void fix_length_and_dec() + { + collation.set(args[0]->collation); + multiply= collation.collation->casedn_multiply; + converter= collation.collation->cset->casedn; + max_length= args[0]->max_length * multiply; + } }; class Item_func_ucase :public Item_str_conv { public: Item_func_ucase(Item *item) :Item_str_conv(item) {} - String *val_str(String *); const char *func_name() const { return "ucase"; } + void fix_length_and_dec() + { + collation.set(args[0]->collation); + multiply= collation.collation->caseup_multiply; + converter= collation.collation->cset->caseup; + max_length= args[0]->max_length * multiply; + } }; @@ -209,7 +224,7 @@ public: Item_func_substr_index(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} String *val_str(String *); void fix_length_and_dec(); - const char *func_name() const { return "substr_index"; } + const char *func_name() const { return "substring_index"; } }; @@ -256,7 +271,7 @@ public: Returns strcat('*', octet2hex(sha1(sha1(password)))). '*' stands for new password format, sha1(sha1(password) is so-called hash_stage2 value. Length of returned string is always 41 byte. To find out how entire - authentification procedure works, see comments in password.c. + authentication procedure works, see comments in password.c. */ class Item_func_password :public Item_str_func @@ -333,7 +348,7 @@ public: } String *val_str(String *); void fix_length_and_dec() { maybe_null=1; max_length = 13; } - const char *func_name() const { return "ecrypt"; } + const char *func_name() const { return "encrypt"; } }; #include "sql_crypt.h" @@ -372,8 +387,15 @@ public: Item_func_sysconst() { collation.set(system_charset_info,DERIVATION_SYSCONST); } Item *safe_charset_converter(CHARSET_INFO *tocs); + /* + Used to create correct Item name in new converted item in + safe_charset_converter, return string representation of this function + call + */ + virtual const char *fully_qualified_func_name() const = 0; }; + class Item_func_database :public Item_func_sysconst { public: @@ -385,18 +407,46 @@ public: maybe_null=1; } const char *func_name() const { return "database"; } + const char *fully_qualified_func_name() const { return "database()"; } }; + class Item_func_user :public Item_func_sysconst { +protected: + bool init (const char *user, const char *host); + public: - Item_func_user() :Item_func_sysconst() {} - String *val_str(String *); - void fix_length_and_dec() - { - max_length= (USERNAME_LENGTH+HOSTNAME_LENGTH+1)*system_charset_info->mbmaxlen; + Item_func_user() + { + str_value.set("", 0, system_charset_info); + } + String *val_str(String *) + { + DBUG_ASSERT(fixed == 1); + return (null_value ? 0 : &str_value); + } + bool fix_fields(THD *thd, Item **ref); + void fix_length_and_dec() + { + max_length= ((USERNAME_LENGTH + HOSTNAME_LENGTH + 1) * + system_charset_info->mbmaxlen); } const char *func_name() const { return "user"; } + const char *fully_qualified_func_name() const { return "user()"; } +}; + + +class Item_func_current_user :public Item_func_user +{ + Name_resolution_context *context; + +public: + Item_func_current_user(Name_resolution_context *context_arg) + : context(context_arg) {} + bool fix_fields(THD *thd, Item **ref); + const char *func_name() const { return "current_user"; } + const char *fully_qualified_func_name() const { return "current_user()"; } }; @@ -415,7 +465,7 @@ class Item_func_elt :public Item_str_func { public: Item_func_elt(List<Item> &list) :Item_str_func(list) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); void fix_length_and_dec(); @@ -431,13 +481,12 @@ class Item_func_make_set :public Item_str_func public: Item_func_make_set(Item *a,List<Item> &list) :Item_str_func(list),item(a) {} String *val_str(String *str); - bool fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) + bool fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); - return (!item->fixed && - item->fix_fields(thd, tlist, &item) || + return ((!item->fixed && item->fix_fields(thd, &item)) || item->check_cols(1) || - Item_func::fix_fields(thd, tlist, ref)); + Item_func::fix_fields(thd, ref)); } void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); void fix_length_and_dec(); @@ -449,6 +498,7 @@ public: return item->walk(processor, arg) || Item_str_func::walk(processor, arg); } + Item *transform(Item_transformer transformer, byte *arg); void print(String *str); }; @@ -462,7 +512,9 @@ public: void fix_length_and_dec() { collation.set(default_charset()); - max_length=args[0]->max_length+(args[0]->max_length-args[0]->decimals)/3; + uint char_length= args[0]->max_length/args[0]->collation.collation->mbmaxlen; + max_length= ((char_length + (char_length-args[0]->decimals)/3) * + collation.collation->mbmaxlen); } const char *func_name() const { return "format"; } void print(String *); @@ -473,14 +525,13 @@ class Item_func_char :public Item_str_func { public: Item_func_char(List<Item> &list) :Item_str_func(list) - { collation.set(default_charset()); } + { collation.set(&my_charset_bin); } Item_func_char(List<Item> &list, CHARSET_INFO *cs) :Item_str_func(list) - { collation.set(cs); } + { collation.set(cs); } String *val_str(String *); void fix_length_and_dec() - { - maybe_null=0; - max_length=arg_count * collation.collation->mbmaxlen; + { + max_length= arg_count * collation.collation->mbmaxlen; } const char *func_name() const { return "char"; } }; @@ -530,7 +581,7 @@ public: void fix_length_and_dec() { collation.set(default_charset()); - decimals=0; max_length=64; + max_length= 64; } }; @@ -554,7 +605,11 @@ class Item_func_unhex :public Item_str_func { String tmp_value; public: - Item_func_unhex(Item *a) :Item_str_func(a) {} + Item_func_unhex(Item *a) :Item_str_func(a) + { + /* there can be bad hex strings */ + maybe_null= 1; + } const char *func_name() const { return "unhex"; } String *val_str(String *); void fix_length_and_dec() @@ -585,6 +640,7 @@ public: max_length=args[0]->max_length; } void print(String *str); + const char *func_name() const { return "cast_as_binary"; } }; @@ -661,6 +717,7 @@ public: str->charset(), conv_charset, &errors)) null_value= 1; use_cached_value= 1; + str_value.mark_as_const(); safe= (errors == 0); } else @@ -690,7 +747,13 @@ public: void fix_length_and_dec(); bool eq(const Item *item, bool binary_cmp) const; const char *func_name() const { return "collate"; } - void print(String *str) { print_op(str); } + enum Functype functype() const { return COLLATE_FUNC; } + void print(String *str); + Item_field *filed_for_view_update() + { + /* this function is transparent for view updating */ + return args[0]->filed_for_view_update(); + } }; class Item_func_charset :public Item_str_func @@ -764,7 +827,7 @@ class Item_func_uncompress: public Item_str_func String buffer; public: Item_func_uncompress(Item *a): Item_str_func(a){} - void fix_length_and_dec(){max_length= MAX_BLOB_WIDTH;} + void fix_length_and_dec(){ maybe_null= 1; max_length= MAX_BLOB_WIDTH; } const char *func_name() const{return "uncompress";} String *val_str(String *) ZLIB_DEPENDED_FUNCTION }; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index cdbcde8b56b..b3744d6eb96 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -37,7 +36,7 @@ inline Item * and_items(Item* cond, Item *item) Item_subselect::Item_subselect(): Item_result_field(), value_assigned(0), thd(0), substitution(0), engine(0), old_engine(0), used_tables_cache(0), have_to_be_excluded(0), - const_item_cache(1), engine_changed(0), changed(0) + const_item_cache(1), engine_changed(0), changed(0), is_correlated(FALSE) { with_subselect= 1; reset(); @@ -52,9 +51,13 @@ Item_subselect::Item_subselect(): void Item_subselect::init(st_select_lex *select_lex, select_subselect *result) { + /* + Please see Item_singlerow_subselect::invalidate_and_restore_select_lex(), + which depends on alterations to the parse tree implemented here. + */ DBUG_ENTER("Item_subselect::init"); - DBUG_PRINT("subs", ("select_lex 0x%xl", (ulong) select_lex)); + DBUG_PRINT("enter", ("select_lex: 0x%lx", (long) select_lex)); unit= select_lex->master_unit(); if (unit->item) @@ -67,7 +70,7 @@ void Item_subselect::init(st_select_lex *select_lex, parsing_place= unit->item->parsing_place; unit->item->engine= 0; unit->item= this; - engine->change_item(this, result); + engine->change_result(this, result); } else { @@ -92,6 +95,12 @@ void Item_subselect::init(st_select_lex *select_lex, DBUG_VOID_RETURN; } +st_select_lex * +Item_subselect::get_select_lex() +{ + return unit->first_select(); +} + void Item_subselect::cleanup() { DBUG_ENTER("Item_subselect::cleanup"); @@ -131,20 +140,20 @@ Item_subselect::select_transformer(JOIN *join) } -bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) +bool Item_subselect::fix_fields(THD *thd_param, Item **ref) { + char const *save_where= thd_param->where; + bool res; + DBUG_ASSERT(fixed == 0); engine->set_thd((thd= thd_param)); - char const *save_where= thd->where; - int res; - - if (check_stack_overrun(thd, (gptr)&res)) - return 1; + if (check_stack_overrun(thd, STACK_MIN_SIZE, (gptr)&res)) + return TRUE; res= engine->prepare(); - // all transformetion is done (used by prepared statements) + // all transformation is done (used by prepared statements) changed= 1; if (!res) @@ -166,7 +175,7 @@ bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) substitution= 0; thd->where= "checking transformed subquery"; if (!(*ref)->fixed) - ret= (*ref)->fix_fields(thd, tables, ref); + ret= (*ref)->fix_fields(thd, ref); thd->where= save_where; return ret; } @@ -174,7 +183,7 @@ bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) if (engine->cols() > max_columns) { my_error(ER_OPERAND_COLUMNS, MYF(0), 1); - return 1; + return TRUE; } fix_length_and_dec(); } @@ -195,18 +204,12 @@ bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) bool Item_subselect::exec() { int res; - MEM_ROOT *old_root= thd->mem_root; - /* - As this is execution, all objects should be allocated through the main - mem root - */ - thd->mem_root= &thd->main_mem_root; if (thd->net.report_error) /* Do not execute subselect in case of a fatal error */ return 1; + res= engine->exec(); - thd->mem_root= old_root; if (engine_changed) { @@ -239,18 +242,18 @@ bool Item_subselect::const_item() const return const_item_cache; } -Item *Item_subselect::get_tmp_table_item(THD *thd) +Item *Item_subselect::get_tmp_table_item(THD *thd_arg) { if (!with_sum_func && !const_item()) return new Item_field(result_field); - return copy_or_same(thd); + return copy_or_same(thd_arg); } void Item_subselect::update_used_tables() { if (!engine->uncacheable()) { - // did all used tables become ststic? + // did all used tables become static? if (!(used_tables_cache & ~engine->upper_select_const_tables())) const_item_cache= 1; } @@ -275,7 +278,28 @@ Item_singlerow_subselect::Item_singlerow_subselect(st_select_lex *select_lex) DBUG_VOID_RETURN; } -Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent, +st_select_lex * +Item_singlerow_subselect::invalidate_and_restore_select_lex() +{ + DBUG_ENTER("Item_singlerow_subselect::invalidate_and_restore_select_lex"); + st_select_lex *result= get_select_lex(); + + DBUG_ASSERT(result); + + /* + This code restore the parse tree in it's state before the execution of + Item_singlerow_subselect::Item_singlerow_subselect(), + and in particular decouples this object from the SELECT_LEX, + so that the SELECT_LEX can be used with a different flavor + or Item_subselect instead, as part of query rewriting. + */ + unit->item= NULL; + + DBUG_RETURN(result); +} + +Item_maxmin_subselect::Item_maxmin_subselect(THD *thd_param, + Item_subselect *parent, st_select_lex *select_lex, bool max_arg) :Item_singlerow_subselect(), was_values(TRUE) @@ -294,6 +318,12 @@ Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent, used_tables_cache= parent->get_used_tables_cache(); const_item_cache= parent->get_const_item_cache(); + /* + this subquery always creates during preparation, so we can assign + thd here + */ + thd= thd_param; + DBUG_VOID_RETURN; } @@ -337,10 +367,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join) return RES_OK; SELECT_LEX *select_lex= join->select_lex; - - /* Juggle with current arena only if we're in prepared statement prepare */ - Item_arena *arena= join->thd->current_arena; - + Query_arena *arena= thd->stmt_arena; + if (!select_lex->master_unit()->first_select()->next_select() && !select_lex->table_list.elements && select_lex->item_list.elements == 1 && @@ -356,20 +384,20 @@ Item_singlerow_subselect::select_transformer(JOIN *join) select_lex->item_list.head()->type() == REF_ITEM) && !join->conds && !join->having && /* - switch off this optimisation for prepare statement, + switch off this optimization for prepare statement, because we do not rollback this changes TODO: make rollback for it, or special name resolving mode in 5.0. */ - !arena->is_stmt_prepare() + !arena->is_stmt_prepare_or_first_sp_execute() ) { have_to_be_excluded= 1; - if (join->thd->lex->describe) + if (thd->lex->describe) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number); - push_warning(join->thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SELECT_REDUCED, warn_buff); } substitution= select_lex->item_list.head(); @@ -384,6 +412,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join) return RES_OK; } + void Item_singlerow_subselect::store(uint i, Item *item) { row[i]->store(item); @@ -416,6 +445,7 @@ void Item_singlerow_subselect::fix_length_and_dec() engine->fix_length_and_dec(row); value= *row; } + unsigned_flag= value->unsigned_flag; /* If there are not tables in subquery then ability to have NULL value depends on SELECT list (if single row subquery have tables then it @@ -455,13 +485,13 @@ void Item_singlerow_subselect::bring_value() exec(); } -double Item_singlerow_subselect::val() +double Item_singlerow_subselect::val_real() { DBUG_ASSERT(fixed == 1); if (!exec() && !value->null_value) { null_value= 0; - return value->val(); + return value->val_real(); } else { @@ -485,7 +515,7 @@ longlong Item_singlerow_subselect::val_int() } } -String *Item_singlerow_subselect::val_str (String *str) +String *Item_singlerow_subselect::val_str(String *str) { if (!exec() && !value->null_value) { @@ -500,35 +530,64 @@ String *Item_singlerow_subselect::val_str (String *str) } +my_decimal *Item_singlerow_subselect::val_decimal(my_decimal *decimal_value) +{ + if (!exec() && !value->null_value) + { + null_value= 0; + return value->val_decimal(decimal_value); + } + else + { + reset(); + return 0; + } +} + + +bool Item_singlerow_subselect::val_bool() +{ + if (!exec() && !value->null_value) + { + null_value= 0; + return value->val_bool(); + } + else + { + reset(); + return 0; + } +} + + Item_exists_subselect::Item_exists_subselect(st_select_lex *select_lex): Item_subselect() { DBUG_ENTER("Item_exists_subselect::Item_exists_subselect"); + bool val_bool(); init(select_lex, new select_exists_subselect(this)); max_columns= UINT_MAX; null_value= 0; //can't be NULL maybe_null= 0; //can't be NULL value= 0; - // We need only 1 row to determinate existence - select_lex->master_unit()->global_parameters->select_limit= 1; DBUG_VOID_RETURN; } void Item_exists_subselect::print(String *str) { - str->append("exists", 6); + str->append(STRING_WITH_LEN("exists")); Item_subselect::print(str); } -bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit) +bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit_arg) { - if (unit->fake_select_lex && - unit->fake_select_lex->test_limit()) + if (unit_arg->fake_select_lex && + unit_arg->fake_select_lex->test_limit()) return(1); - SELECT_LEX *sl= unit->first_select(); + SELECT_LEX *sl= unit_arg->first_select(); for (; sl; sl= sl->next_select()) { if (sl->test_limit()) @@ -539,7 +598,8 @@ bool Item_in_subselect::test_limit(SELECT_LEX_UNIT *unit) Item_in_subselect::Item_in_subselect(Item * left_exp, st_select_lex *select_lex): - Item_exists_subselect(), optimizer(0), transformed(0), upper_item(0) + Item_exists_subselect(), optimizer(0), transformed(0), + pushed_cond_guards(NULL), upper_item(0) { DBUG_ENTER("Item_in_subselect::Item_in_subselect"); left_expr= left_exp; @@ -577,9 +637,11 @@ void Item_exists_subselect::fix_length_and_dec() decimals= 0; max_length= 1; max_columns= engine->cols(); + /* We need only 1 row to determine existence */ + unit->global_parameters->select_limit= new Item_int((int32) 1); } -double Item_exists_subselect::val() +double Item_exists_subselect::val_real() { DBUG_ASSERT(fixed == 1); if (exec()) @@ -609,12 +671,37 @@ String *Item_exists_subselect::val_str(String *str) reset(); return 0; } - str->set(value,&my_charset_bin); + str->set((ulonglong)value,&my_charset_bin); return str; } -double Item_in_subselect::val() +my_decimal *Item_exists_subselect::val_decimal(my_decimal *decimal_value) +{ + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + return 0; + } + int2my_decimal(E_DEC_FATAL_ERROR, value, 0, decimal_value); + return decimal_value; +} + + +bool Item_exists_subselect::val_bool() +{ + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + return 0; + } + return value != 0; +} + + +double Item_in_subselect::val_real() { /* As far as Item_in_subselect called only from Item_in_optimizer this @@ -637,6 +724,11 @@ double Item_in_subselect::val() longlong Item_in_subselect::val_int() { + /* + As far as Item_in_subselect called only from Item_in_optimizer this + method should not be used + */ + DBUG_ASSERT(0); DBUG_ASSERT(fixed == 1); null_value= 0; if (exec()) @@ -671,20 +763,104 @@ String *Item_in_subselect::val_str(String *str) null_value= 1; return 0; } - str->set(value, &my_charset_bin); + str->set((ulonglong)value, &my_charset_bin); return str; } -/* Rewrite a single-column IN/ALL/ANY subselect. */ +bool Item_in_subselect::val_bool() +{ + DBUG_ASSERT(fixed == 1); + null_value= 0; + if (exec()) + { + reset(); + null_value= 1; + return 0; + } + if (was_null && !value) + null_value= 1; + return value; +} + +my_decimal *Item_in_subselect::val_decimal(my_decimal *decimal_value) +{ + /* + As far as Item_in_subselect called only from Item_in_optimizer this + method should not be used + */ + DBUG_ASSERT(0); + null_value= 0; + DBUG_ASSERT(fixed == 1); + if (exec()) + { + reset(); + null_value= 1; + return 0; + } + if (was_null && !value) + null_value= 1; + int2my_decimal(E_DEC_FATAL_ERROR, value, 0, decimal_value); + return decimal_value; +} + + +/* + Rewrite a single-column IN/ALL/ANY subselect + + SYNOPSIS + Item_in_subselect::single_value_transformer() + join Join object of the subquery (i.e. 'child' join). + func Subquery comparison creator + + DESCRIPTION + Rewrite a single-column subquery using rule-based approach. The subquery + + oe $cmp$ (SELECT ie FROM ... WHERE subq_where ... HAVING subq_having) + + First, try to convert the subquery to scalar-result subquery in one of + the forms: + + - oe $cmp$ (SELECT MAX(...) ) // handled by Item_singlerow_subselect + - oe $cmp$ <max>(SELECT ...) // handled by Item_maxmin_subselect + + If that fails, the subquery will be handled with class Item_in_optimizer, + Inject the predicates into subquery, i.e. convert it to: + + - If the subquery has aggregates, GROUP BY, or HAVING, convert to + + SELECT ie FROM ... HAVING subq_having AND + trigcond(oe $cmp$ ref_or_null_helper<ie>) + + the addition is wrapped into trigger only when we want to distinguish + between NULL and FALSE results. + + - Otherwise (no aggregates/GROUP BY/HAVING) convert it to one of the + following: + + = If we don't need to distinguish between NULL and FALSE subquery: + + SELECT 1 FROM ... WHERE (oe $cmp$ ie) AND subq_where + + = If we need to distinguish between those: + + SELECT 1 FROM ... + WHERE subq_where AND trigcond((oe $cmp$ ie) OR (ie IS NULL)) + HAVING trigcond(<is_not_null_test>(ie)) + + RETURN + RES_OK - OK, either subquery was transformed, or appopriate + predicates where injected into it. + RES_REDUCE - The subquery was reduced to non-subquery + RES_ERROR - Error +*/ Item_subselect::trans_res Item_in_subselect::single_value_transformer(JOIN *join, Comp_creator *func) { - DBUG_ENTER("Item_in_subselect::single_value_transformer"); - SELECT_LEX *select_lex= join->select_lex; + DBUG_ENTER("Item_in_subselect::single_value_transformer"); /* Check that the right part of the subselect contains no more than one @@ -724,6 +900,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, select_lex->table_list.elements) { Item_sum_hybrid *item; + nesting_map save_allow_sum_func; if (func->l_op()) { /* @@ -749,12 +926,16 @@ Item_in_subselect::single_value_transformer(JOIN *join, it.replace(item); } + save_allow_sum_func= thd->lex->allow_sum_func; + thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; /* Item_sum_(max|min) can't substitute other item => we can use 0 as - reference + reference, also Item_sum_(max|min) can't be fixed after creation, so + we do not check item->fixed */ - if (item->fix_fields(thd, join->tables_list, 0)) + if (item->fix_fields(thd, 0)) DBUG_RETURN(RES_ERROR); + thd->lex->allow_sum_func= save_allow_sum_func; /* we added aggregate function => we have to change statistic */ count_field_types(&join->tmp_table_param, join->all_fields, 0); @@ -763,10 +944,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, else { Item_maxmin_subselect *item; - // remove LIMIT placed by ALL/ANY subquery - select_lex->master_unit()->global_parameters->select_limit= - HA_POS_ERROR; - subs= item= new Item_maxmin_subselect(this, select_lex, func->l_op()); + subs= item= new Item_maxmin_subselect(thd, this, select_lex, func->l_op()); if (upper_item) upper_item->set_sub_test(item); } @@ -777,15 +955,15 @@ Item_in_subselect::single_value_transformer(JOIN *join, if (!substitution) { - //first call for this unit - SELECT_LEX_UNIT *unit= select_lex->master_unit(); + /* We're invoked for the 1st (or the only) SELECT in the subquery UNION */ + SELECT_LEX_UNIT *master_unit= select_lex->master_unit(); substitution= optimizer; SELECT_LEX *current= thd->lex->current_select, *up; thd->lex->current_select= up= current->return_after_parsing(); //optimizer never use Item **ref => we can pass 0 as parameter - if (!optimizer || optimizer->fix_left(thd, up->get_table_list(), 0)) + if (!optimizer || optimizer->fix_left(thd, 0)) { thd->lex->current_select= current; DBUG_RETURN(RES_ERROR); @@ -793,82 +971,122 @@ Item_in_subselect::single_value_transformer(JOIN *join, thd->lex->current_select= current; /* - As far as Item_ref_in_optimizer do not substitude itself on fix_fields + As far as Item_ref_in_optimizer do not substitute itself on fix_fields we can use same item for all selects. */ - expr= new Item_direct_ref((Item**)optimizer->get_cache(), + expr= new Item_direct_ref(&select_lex->context, + (Item**)optimizer->get_cache(), (char *)"<no matter>", (char *)in_left_expr_name); - unit->uncacheable|= UNCACHEABLE_DEPENDENT; + master_unit->uncacheable|= UNCACHEABLE_DEPENDENT; + } + if (!abort_on_null && left_expr->maybe_null && !pushed_cond_guards) + { + if (!(pushed_cond_guards= (bool*)join->thd->alloc(sizeof(bool)))) + DBUG_RETURN(RES_ERROR); + pushed_cond_guards[0]= TRUE; } select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; - Item *item; - - item= (Item*) select_lex->item_list.head(); - /* - Add the left part of a subselect to a WHERE or HAVING clause of - the right part, e.g. SELECT 1 IN (SELECT a FROM t1) => - SELECT Item_in_optimizer(1, SELECT a FROM t1 WHERE a=1) - HAVING is used only if the right part contains a SUM function, a GROUP - BY or a HAVING clause. - */ if (join->having || select_lex->with_sum_func || select_lex->group_list.elements) { - item= func->create(expr, - new Item_ref_null_helper(this, - select_lex->ref_pointer_array, - (char *)"<ref>", - this->full_name())); + bool tmp; + Item *item= func->create(expr, + new Item_ref_null_helper(&select_lex->context, + this, + select_lex-> + ref_pointer_array, + (char *)"<ref>", + this->full_name())); + if (!abort_on_null && left_expr->maybe_null) + { + /* + We can encounter "NULL IN (SELECT ...)". Wrap the added condition + within a trig_cond. + */ + item= new Item_func_trig_cond(item, get_cond_guard(0)); + } + /* AND and comparison functions can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last argument (reference) to fix_fields() */ select_lex->having= join->having= and_items(join->having, item); + if (join->having == item) + item->name= (char*)in_having_cond; select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, 0)) - { - select_lex->having_fix_field= 0; - DBUG_RETURN(RES_ERROR); - } + /* + we do not check join->having->fixed, because Item_and (from and_items) + or comparison function (from func->create) can't be fixed after creation + */ + tmp= join->having->fix_fields(thd, 0); select_lex->having_fix_field= 0; + if (tmp) + DBUG_RETURN(RES_ERROR); } else { + Item *item= (Item*) select_lex->item_list.head(); + if (select_lex->table_list.elements) { + bool tmp; Item *having= item, *orig_item= item; select_lex->item_list.empty(); select_lex->item_list.push_back(new Item_int("Not_used", - (longlong) 1, 21)); + (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); select_lex->ref_pointer_array[0]= select_lex->item_list.head(); + item= func->create(expr, item); if (!abort_on_null && orig_item->maybe_null) { having= new Item_is_not_null_test(this, having); + if (left_expr->maybe_null) + { + if (!(having= new Item_func_trig_cond(having, + get_cond_guard(0)))) + DBUG_RETURN(RES_ERROR); + } /* Item_is_not_null_test can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last argument (reference) to fix_fields() */ - select_lex->having= - join->having= (join->having ? - new Item_cond_and(having, join->having) : - having); + having->name= (char*)in_having_cond; + select_lex->having= join->having= having; select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, 0)) - { - select_lex->having_fix_field= 0; + /* + we do not check join->having->fixed, because Item_and (from + and_items) or comparison function (from func->create) can't be + fixed after creation + */ + tmp= join->having->fix_fields(thd, 0); + select_lex->having_fix_field= 0; + if (tmp) DBUG_RETURN(RES_ERROR); - } - select_lex->having_fix_field= 0; item= new Item_cond_or(item, new Item_func_isnull(orig_item)); } + /* + If we may encounter NULL IN (SELECT ...) and care whether subquery + result is NULL or FALSE, wrap condition in a trig_cond. + */ + if (!abort_on_null && left_expr->maybe_null) + { + if (!(item= new Item_func_trig_cond(item, get_cond_guard(0)))) + DBUG_RETURN(RES_ERROR); + } + /* + TODO: figure out why the following is done here in + single_value_transformer but there is no corresponding action in + row_value_transformer? + */ item->name= (char *)in_additional_cond; + /* AND can't be changed during fix_fields() we can assign select_lex->having here, and pass 0 as last @@ -876,11 +1094,16 @@ Item_in_subselect::single_value_transformer(JOIN *join, */ select_lex->where= join->conds= and_items(join->conds, item); select_lex->where->top_level_item(); - if (join->conds->fix_fields(thd, join->tables_list, 0)) + /* + we do not check join->conds->fixed, because Item_and can't be fixed + after creation + */ + if (join->conds->fix_fields(thd, 0)) DBUG_RETURN(RES_ERROR); } else { + bool tmp; if (select_lex->master_unit()->first_select()->next_select()) { /* @@ -888,28 +1111,36 @@ Item_in_subselect::single_value_transformer(JOIN *join, we can assign select_lex->having here, and pass 0 as last argument (reference) to fix_fields() */ - select_lex->having= - join->having= - func->create(expr, - new Item_ref_null_helper(this, + Item *new_having= + func->create(expr, + new Item_ref_null_helper(&select_lex->context, this, select_lex->ref_pointer_array, (char *)"<no matter>", (char *)"<result>")); - + if (!abort_on_null && left_expr->maybe_null) + { + if (!(new_having= new Item_func_trig_cond(new_having, + get_cond_guard(0)))) + DBUG_RETURN(RES_ERROR); + } + new_having->name= (char*)in_having_cond; + select_lex->having= join->having= new_having; select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, - 0)) - { - select_lex->having_fix_field= 0; + + /* + we do not check join->having->fixed, because comparison function + (from func->create) can't be fixed after creation + */ + tmp= join->having->fix_fields(thd, 0); + select_lex->having_fix_field= 0; + if (tmp) DBUG_RETURN(RES_ERROR); - } - select_lex->having_fix_field= 0; } else { // it is single select without tables => possible optimization item= func->create(left_expr, item); - // fix_field of item will be done in time of substituting + // fix_field of item will be done in time of substituting substitution= item; have_to_be_excluded= 1; if (thd->lex->describe) @@ -948,23 +1179,32 @@ Item_in_subselect::row_value_transformer(JOIN *join) if (!substitution) { //first call for this unit - SELECT_LEX_UNIT *unit= select_lex->master_unit(); + SELECT_LEX_UNIT *master_unit= select_lex->master_unit(); substitution= optimizer; SELECT_LEX *current= thd->lex->current_select, *up; thd->lex->current_select= up= current->return_after_parsing(); //optimizer never use Item **ref => we can pass 0 as parameter - if (!optimizer || optimizer->fix_left(thd, up->get_table_list(), 0)) + if (!optimizer || optimizer->fix_left(thd, 0)) { thd->lex->current_select= current; DBUG_RETURN(RES_ERROR); } - // we will refer to apper level cache array => we have to save it in PS + // we will refer to upper level cache array => we have to save it in PS optimizer->keep_top_level_cache(); thd->lex->current_select= current; - unit->uncacheable|= UNCACHEABLE_DEPENDENT; + master_unit->uncacheable|= UNCACHEABLE_DEPENDENT; + + if (!abort_on_null && left_expr->maybe_null && !pushed_cond_guards) + { + if (!(pushed_cond_guards= (bool*)join->thd->alloc(sizeof(bool) * + left_expr->cols()))) + DBUG_RETURN(RES_ERROR); + for (uint i= 0; i < cols_num; i++) + pushed_cond_guards[i]= TRUE; + } } select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; @@ -981,46 +1221,57 @@ Item_in_subselect::row_value_transformer(JOIN *join) is_not_null_test(v3)) where is_not_null_test used to register nulls in case if we have not found matching to return correct NULL value + TODO: say here explicitly if the order of AND parts matters or not. */ Item *item_having_part2= 0; for (uint i= 0; i < cols_num; i++) { DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed); if (select_lex->ref_pointer_array[i]-> - check_cols(left_expr->el(i)->cols())) + check_cols(left_expr->element_index(i)->cols())) DBUG_RETURN(RES_ERROR); Item *item_eq= new Item_func_eq(new - Item_ref((*optimizer->get_cache())-> - addr(i), - (char *)"<no matter>", - (char *)in_left_expr_name), + Item_ref(&select_lex->context, + (*optimizer->get_cache())-> + addr(i), + (char *)"<no matter>", + (char *)in_left_expr_name), new - Item_ref(select_lex->ref_pointer_array + i, - (char *)"<no matter>", - (char *)"<list ref>") + Item_ref(&select_lex->context, + select_lex->ref_pointer_array + i, + (char *)"<no matter>", + (char *)"<list ref>") ); Item *item_isnull= new Item_func_isnull(new - Item_ref( select_lex-> - ref_pointer_array+i, - (char *)"<no matter>", - (char *)"<list ref>") + Item_ref(&select_lex->context, + select_lex->ref_pointer_array+i, + (char *)"<no matter>", + (char *)"<list ref>") ); - having_item= - and_items(having_item, - new Item_cond_or(item_eq, item_isnull)); - item_having_part2= - and_items(item_having_part2, - new - Item_is_not_null_test(this, - new - Item_ref(select_lex-> - ref_pointer_array + i, - (char *)"<no matter>", - (char *)"<list ref>") - ) - ); + Item *col_item= new Item_cond_or(item_eq, item_isnull); + if (!abort_on_null && left_expr->element_index(i)->maybe_null) + { + if (!(col_item= new Item_func_trig_cond(col_item, get_cond_guard(i)))) + DBUG_RETURN(RES_ERROR); + } + having_item= and_items(having_item, col_item); + + Item *item_nnull_test= + new Item_is_not_null_test(this, + new Item_ref(&select_lex->context, + select_lex-> + ref_pointer_array + i, + (char *)"<no matter>", + (char *)"<list ref>")); + if (!abort_on_null && left_expr->element_index(i)->maybe_null) + { + if (!(item_nnull_test= + new Item_func_trig_cond(item_nnull_test, get_cond_guard(i)))) + DBUG_RETURN(RES_ERROR); + } + item_having_part2= and_items(item_having_part2, item_nnull_test); item_having_part2->top_level_item(); } having_item= and_items(having_item, item_having_part2); @@ -1051,44 +1302,56 @@ Item_in_subselect::row_value_transformer(JOIN *join) Item *item, *item_isnull; DBUG_ASSERT(left_expr->fixed && select_lex->ref_pointer_array[i]->fixed); if (select_lex->ref_pointer_array[i]-> - check_cols(left_expr->el(i)->cols())) + check_cols(left_expr->element_index(i)->cols())) DBUG_RETURN(RES_ERROR); item= new Item_func_eq(new - Item_direct_ref((*optimizer->get_cache())-> + Item_direct_ref(&select_lex->context, + (*optimizer->get_cache())-> addr(i), (char *)"<no matter>", (char *)in_left_expr_name), new - Item_direct_ref( select_lex-> - ref_pointer_array+i, - (char *)"<no matter>", - (char *)"<list ref>") + Item_direct_ref(&select_lex->context, + select_lex-> + ref_pointer_array+i, + (char *)"<no matter>", + (char *)"<list ref>") ); if (!abort_on_null) { - having_item= - and_items(having_item, - new - Item_is_not_null_test(this, - new - Item_ref(select_lex-> - ref_pointer_array + i, - (char *)"<no matter>", - (char *)"<list ref>") - ) - ); + Item *having_col_item= + new Item_is_not_null_test(this, + new + Item_ref(&select_lex->context, + select_lex->ref_pointer_array + i, + (char *)"<no matter>", + (char *)"<list ref>")); + + item_isnull= new Item_func_isnull(new - Item_direct_ref( select_lex-> - ref_pointer_array+i, - (char *)"<no matter>", - (char *)"<list ref>") + Item_direct_ref(&select_lex->context, + select_lex-> + ref_pointer_array+i, + (char *)"<no matter>", + (char *)"<list ref>") ); - item= new Item_cond_or(item, item_isnull); + /* + TODO: why we create the above for cases where the right part + cant be NULL? + */ + if (left_expr->element_index(i)->maybe_null) + { + if (!(item= new Item_func_trig_cond(item, get_cond_guard(i)))) + DBUG_RETURN(RES_ERROR); + if (!(having_col_item= + new Item_func_trig_cond(having_col_item, get_cond_guard(i)))) + DBUG_RETURN(RES_ERROR); + } + having_item= and_items(having_item, having_col_item); } - where_item= and_items(where_item, item); } /* @@ -1098,13 +1361,15 @@ Item_in_subselect::row_value_transformer(JOIN *join) */ select_lex->where= join->conds= and_items(join->conds, where_item); select_lex->where->top_level_item(); - if (join->conds->fix_fields(thd, join->tables_list, 0)) + if (join->conds->fix_fields(thd, 0)) DBUG_RETURN(RES_ERROR); } if (having_item) { bool res; select_lex->having= join->having= and_items(join->having, having_item); + if (having_item == select_lex->having) + having_item->name= (char*)in_having_cond; select_lex->having->top_level_item(); /* AND can't be changed during fix_fields() @@ -1112,13 +1377,14 @@ Item_in_subselect::row_value_transformer(JOIN *join) argument (reference) to fix_fields() */ select_lex->having_fix_field= 1; - res= join->having->fix_fields(thd, join->tables_list, 0); + res= join->having->fix_fields(thd, 0); select_lex->having_fix_field= 0; if (res) { DBUG_RETURN(RES_ERROR); } } + DBUG_RETURN(RES_OK); } @@ -1154,7 +1420,7 @@ Item_in_subselect::select_transformer(JOIN *join) Item_subselect::trans_res Item_in_subselect::select_in_like_transformer(JOIN *join, Comp_creator *func) { - Item_arena *arena, backup; + Query_arena *arena, backup; SELECT_LEX *current= thd->lex->current_select, *up; const char *save_where= thd->where; Item_subselect::trans_res res= RES_ERROR; @@ -1176,18 +1442,17 @@ Item_in_subselect::select_in_like_transformer(JOIN *join, Comp_creator *func) */ if (!optimizer) { - arena= thd->change_arena_if_needed(&backup); + arena= thd->activate_stmt_arena_if_needed(&backup); result= (!(optimizer= new Item_in_optimizer(left_expr, this))); if (arena) - thd->restore_backup_item_arena(arena, &backup); + thd->restore_active_arena(arena, &backup); if (result) goto err; } thd->lex->current_select= up= current->return_after_parsing(); result= (!left_expr->fixed && - left_expr->fix_fields(thd, up->get_table_list(), - optimizer->arguments())); + left_expr->fix_fields(thd, optimizer->arguments())); /* fix_fields can change reference to left_expr, we need reassign it */ left_expr= optimizer->arguments()[0]; @@ -1196,7 +1461,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join, Comp_creator *func) goto err; transformed= 1; - arena= thd->change_arena_if_needed(&backup); + arena= thd->activate_stmt_arena_if_needed(&backup); /* Both transformers call fix_fields() only for Items created inside them, and all that items do not make permanent changes in current item arena @@ -1212,14 +1477,14 @@ Item_in_subselect::select_in_like_transformer(JOIN *join, Comp_creator *func) if (func != &eq_creator) { if (arena) - thd->restore_backup_item_arena(arena, &backup); + thd->restore_active_arena(arena, &backup); my_error(ER_OPERAND_COLUMNS, MYF(0), 1); DBUG_RETURN(RES_ERROR); } res= row_value_transformer(join); } if (arena) - thd->restore_backup_item_arena(arena, &backup); + thd->restore_active_arena(arena, &backup); err: thd->where= save_where; DBUG_RETURN(res); @@ -1229,16 +1494,27 @@ err: void Item_in_subselect::print(String *str) { if (transformed) - str->append("<exists>", 8); + str->append(STRING_WITH_LEN("<exists>")); else { left_expr->print(str); - str->append(" in ", 4); + str->append(STRING_WITH_LEN(" in ")); } Item_subselect::print(str); } +bool Item_in_subselect::fix_fields(THD *thd_arg, Item **ref) +{ + bool result = 0; + + if (thd_arg->lex->view_prepare_mode && left_expr && !left_expr->fixed) + result = left_expr->fix_fields(thd_arg, &left_expr); + + return result || Item_subselect::fix_fields(thd_arg, ref); +} + + Item_subselect::trans_res Item_allany_subselect::select_transformer(JOIN *join) { @@ -1252,7 +1528,7 @@ Item_allany_subselect::select_transformer(JOIN *join) void Item_allany_subselect::print(String *str) { if (transformed) - str->append("<exists>", 8); + str->append(STRING_WITH_LEN("<exists>")); else { left_expr->print(str); @@ -1264,24 +1540,23 @@ void Item_allany_subselect::print(String *str) } +void subselect_engine::set_thd(THD *thd_arg) +{ + thd= thd_arg; + if (result) + result->set_thd(thd_arg); +} + + subselect_single_select_engine:: subselect_single_select_engine(st_select_lex *select, - select_subselect *result, - Item_subselect *item) - :subselect_engine(item, result), - prepared(0), optimized(0), executed(0), join(0) + select_subselect *result_arg, + Item_subselect *item_arg) + :subselect_engine(item_arg, result_arg), + prepared(0), optimized(0), executed(0), + select_lex(select), join(0) { - select_lex= select; - SELECT_LEX_UNIT *unit= select_lex->master_unit(); - unit->offset_limit_cnt= unit->global_parameters->offset_limit; - unit->select_limit_cnt= unit->global_parameters->select_limit+ - unit->global_parameters ->offset_limit; - if (unit->select_limit_cnt < unit->global_parameters->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR) - select_lex->options&= ~OPTION_FOUND_ROWS; - unit->item= item; - this->select_lex= select_lex; + select_lex->master_unit()->item= item_arg; } @@ -1304,6 +1579,33 @@ void subselect_union_engine::cleanup() } +bool subselect_union_engine::is_executed() const +{ + return unit->executed; +} + + +/* + Check if last execution of the subquery engine produced any rows + + SYNOPSIS + subselect_union_engine::no_rows() + + DESCRIPTION + Check if last execution of the subquery engine produced any rows. The + return value is undefined if last execution ended in an error. + + RETURN + TRUE - Last subselect execution has produced no rows + FALSE - Otherwise +*/ + +bool subselect_union_engine::no_rows() +{ + /* Check if we got any rows when reading UNION result from temp. table: */ + return test(!unit->fake_select_lex->join->send_records); +} + void subselect_uniquesubquery_engine::cleanup() { DBUG_ENTER("subselect_uniquesubquery_engine::cleanup"); @@ -1359,7 +1661,7 @@ int subselect_single_select_engine::prepare() int subselect_union_engine::prepare() { - return unit->prepare(thd, result, SELECT_NO_UNLOCK, ""); + return unit->prepare(thd, result, SELECT_NO_UNLOCK); } int subselect_uniquesubquery_engine::prepare() @@ -1369,6 +1671,28 @@ int subselect_uniquesubquery_engine::prepare() return 1; } + +/* + Check if last execution of the subquery engine produced any rows + + SYNOPSIS + subselect_single_select_engine::no_rows() + + DESCRIPTION + Check if last execution of the subquery engine produced any rows. The + return value is undefined if last execution ended in an error. + + RETURN + TRUE - Last subselect execution has produced no rows + FALSE - Otherwise +*/ + +bool subselect_single_select_engine::no_rows() +{ + return !item->assigned(); +} + + /* makes storage for the output values for the subquery and calcuates their data and column types and their nullability. @@ -1385,6 +1709,7 @@ void subselect_engine::set_row(List<Item> &item_list, Item_cache **row) res_type= sel_item->result_type(); res_field_type= sel_item->field_type(); item->decimals= sel_item->decimals; + item->unsigned_flag= sel_item->unsigned_flag; maybe_null= sel_item->maybe_null; if (!(row[i]= Item_cache::get_cache(res_type))) return; @@ -1426,20 +1751,27 @@ void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row) DBUG_ASSERT(0); } +int init_read_record_seq(JOIN_TAB *tab); +int join_read_always_key_or_null(JOIN_TAB *tab); +int join_read_next_same_or_null(READ_RECORD *info); + int subselect_single_select_engine::exec() { DBUG_ENTER("subselect_single_select_engine::exec"); - char const *save_where= join->thd->where; - SELECT_LEX *save_select= join->thd->lex->current_select; - join->thd->lex->current_select= select_lex; + char const *save_where= thd->where; + SELECT_LEX *save_select= thd->lex->current_select; + thd->lex->current_select= select_lex; if (!optimized) { - optimized=1; + SELECT_LEX_UNIT *unit= select_lex->master_unit(); + + optimized= 1; + unit->set_limit(unit->global_parameters); if (join->optimize()) { - join->thd->where= save_where; + thd->where= save_where; executed= 1; - join->thd->lex->current_select= save_select; + thd->lex->current_select= save_select; DBUG_RETURN(join->error ? join->error : 1); } if (item->engine_changed) @@ -1451,8 +1783,8 @@ int subselect_single_select_engine::exec() { if (join->reinit()) { - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(1); } item->reset(); @@ -1461,40 +1793,212 @@ int subselect_single_select_engine::exec() if (!executed) { item->reset_value_registration(); + JOIN_TAB *changed_tabs[MAX_TABLES]; + JOIN_TAB **last_changed_tab= changed_tabs; + if (item->have_guarded_conds()) + { + /* + For at least one of the pushed predicates the following is true: + We should not apply optimizations based on the condition that was + pushed down into the subquery. Those optimizations are ref[_or_null] + acceses. Change them to be full table scans. + */ + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + if (tab && tab->keyuse) + { + for (uint i= 0; i < tab->ref.key_parts; i++) + { + bool *cond_guard= tab->ref.cond_guards[i]; + if (cond_guard && !*cond_guard) + { + /* Change the access method to full table scan */ + tab->read_first_record= init_read_record_seq; + tab->read_record.record= tab->table->record[0]; + tab->read_record.thd= join->thd; + tab->read_record.ref_length= tab->table->file->ref_length; + *(last_changed_tab++)= tab; + break; + } + } + } + } + } + join->exec(); + + /* Enable the optimizations back */ + for (JOIN_TAB **ptab= changed_tabs; ptab != last_changed_tab; ptab++) + { + JOIN_TAB *tab= *ptab; + tab->read_record.record= 0; + tab->read_record.ref_length= 0; + tab->read_first_record= join_read_always_key_or_null; + tab->read_record.read_record= join_read_next_same_or_null; + } executed= 1; - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(join->error||thd->is_fatal_error); } - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(0); } int subselect_union_engine::exec() { - char const *save_where= unit->thd->where; + char const *save_where= thd->where; int res= unit->exec(); - unit->thd->where= save_where; + thd->where= save_where; return res; } -int subselect_uniquesubquery_engine::exec() +/* + Search for at least one row satisfying select condition + + SYNOPSIS + subselect_uniquesubquery_engine::scan_table() + + DESCRIPTION + Scan the table using sequential access until we find at least one row + satisfying select condition. + + The caller must set this->empty_result_set=FALSE before calling this + function. This function will set it to TRUE if it finds a matching row. + + RETURN + FALSE - OK + TRUE - Error +*/ + +int subselect_uniquesubquery_engine::scan_table() { - DBUG_ENTER("subselect_uniquesubquery_engine::exec"); int error; TABLE *table= tab->table; - for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) + DBUG_ENTER("subselect_uniquesubquery_engine::scan_table"); + + if (table->file->inited) + table->file->ha_index_end(); + + table->file->ha_rnd_init(1); + table->file->extra_opt(HA_EXTRA_CACHE, + current_thd->variables.read_buff_size); + table->null_row= 0; + for (;;) + { + error=table->file->rnd_next(table->record[0]); + if (error && error != HA_ERR_END_OF_FILE) + { + error= report_error(table, error); + break; + } + /* No more rows */ + if (table->status) + break; + + if (!cond || cond->val_int()) + { + empty_result_set= FALSE; + break; + } + } + + table->file->ha_rnd_end(); + DBUG_RETURN(error != 0); +} + + +/* + Copy ref key and check for null parts in it + + SYNOPSIS + subselect_uniquesubquery_engine::copy_ref_key() + + DESCRIPTION + Copy ref key and check for null parts in it. + + RETURN + FALSE - ok, index lookup key without keys copied. + TRUE - an error occured while copying the key +*/ + +bool subselect_uniquesubquery_engine::copy_ref_key() +{ + DBUG_ENTER("subselect_uniquesubquery_engine::copy_ref_key"); + + for (store_key **copy= tab->ref.key_copy ; *copy ; copy++) { - if ((tab->ref.key_err= (*copy)->copy()) & 1) + tab->ref.key_err= (*copy)->copy(); + + /* + When there is a NULL part in the key we don't need to make index + lookup for such key thus we don't need to copy whole key. + If we later should do a sequential scan return OK. Fail otherwise. + + See also the comment for the subselect_uniquesubquery_engine::exec() + function. + */ + null_keypart= (*copy)->null_key; + bool top_level= ((Item_in_subselect *) item)->is_top_level_item(); + if (null_keypart && !top_level) + break; + if ((tab->ref.key_err) & 1 || (null_keypart && top_level)) { - table->status= STATUS_NOT_FOUND; + tab->table->status= STATUS_NOT_FOUND; DBUG_RETURN(1); } } + DBUG_RETURN(0); +} + +/* + Execute subselect + + SYNOPSIS + subselect_uniquesubquery_engine::exec() + + DESCRIPTION + Find rows corresponding to the ref key using index access. + If some part of the lookup key is NULL, then we're evaluating + NULL IN (SELECT ... ) + This is a special case, we don't need to search for NULL in the table, + instead, the result value is + - NULL if select produces empty row set + - FALSE otherwise. + + In some cases (IN subselect is a top level item, i.e. abort_on_null==TRUE) + the caller doesn't distinguish between NULL and FALSE result and we just + return FALSE. + Otherwise we make a full table scan to see if there is at least one + matching row. + + The result of this function (info about whether a row was found) is + stored in this->empty_result_set. + NOTE + + RETURN + FALSE - ok + TRUE - an error occured while scanning +*/ + +int subselect_uniquesubquery_engine::exec() +{ + DBUG_ENTER("subselect_uniquesubquery_engine::exec"); + int error; + TABLE *table= tab->table; + empty_result_set= TRUE; + + /* TODO: change to use of 'full_scan' here? */ + if (copy_ref_key()) + DBUG_RETURN(1); + + if (null_keypart) + DBUG_RETURN(scan_table()); + if (!table->file->inited) table->file->ha_index_init(tab->ref.key); error= table->file->index_read(table->record[0], @@ -1507,9 +2011,13 @@ int subselect_uniquesubquery_engine::exec() { error= 0; table->null_row= 0; - ((Item_in_subselect *) item)->value= (!table->status && - (!cond || cond->val_int()) ? 1 : - 0); + if (!table->status && (!cond || cond->val_int())) + { + ((Item_in_subselect *) item)->value= 1; + empty_result_set= FALSE; + } + else + ((Item_in_subselect *) item)->value= 0; } DBUG_RETURN(error != 0); @@ -1523,14 +2031,68 @@ subselect_uniquesubquery_engine::~subselect_uniquesubquery_engine() } +/* + Index-lookup subselect 'engine' - run the subquery + + SYNOPSIS + subselect_uniquesubquery_engine:exec() + full_scan + + DESCRIPTION + The engine is used to resolve subqueries in form + + oe IN (SELECT key FROM tbl WHERE subq_where) + + The value of the predicate is calculated as follows: + 1. If oe IS NULL, this is a special case, do a full table scan on + table tbl and search for row that satisfies subq_where. If such + row is found, return NULL, otherwise return FALSE. + 2. Make an index lookup via key=oe, search for a row that satisfies + subq_where. If found, return TRUE. + 3. If check_null==TRUE, make another lookup via key=NULL, search for a + row that satisfies subq_where. If found, return NULL, otherwise + return FALSE. + + TODO + The step #1 can be optimized further when the index has several key + parts. Consider a subquery: + + (oe1, oe2) IN (SELECT keypart1, keypart2 FROM tbl WHERE subq_where) + + and suppose we need to evaluate it for {oe1, oe2}=={const1, NULL}. + Current code will do a full table scan and obtain correct result. There + is a better option: instead of evaluating + + SELECT keypart1, keypart2 FROM tbl WHERE subq_where (1) + + and checking if it has produced any matching rows, evaluate + + SELECT keypart2 FROM tbl WHERE subq_where AND keypart1=const1 (2) + + If this query produces a row, the result is NULL (as we're evaluating + "(const1, NULL) IN { (const1, X), ... }", which has a value of UNKNOWN, + i.e. NULL). If the query produces no rows, the result is FALSE. + + We currently evaluate (1) by doing a full table scan. (2) can be + evaluated by doing a "ref" scan on "keypart1=const1", which can be much + cheaper. We can use index statistics to quickly check whether "ref" scan + will be cheaper than full table scan. + + RETURN + 0 + 1 +*/ + int subselect_indexsubquery_engine::exec() { - DBUG_ENTER("subselect_indexsubselect_engine::exec"); + DBUG_ENTER("subselect_indexsubquery_engine::exec"); int error; bool null_finding= 0; TABLE *table= tab->table; ((Item_in_subselect *) item)->value= 0; + empty_result_set= TRUE; + null_keypart= 0; if (check_null) { @@ -1539,14 +2101,12 @@ int subselect_indexsubquery_engine::exec() ((Item_in_subselect *) item)->was_null= 0; } - for (store_key **copy=tab->ref.key_copy ; *copy ; copy++) - { - if ((tab->ref.key_err= (*copy)->copy()) & 1) - { - table->status= STATUS_NOT_FOUND; - DBUG_RETURN(1); - } - } + /* Copy the ref key and check for nulls... */ + if (copy_ref_key()) + DBUG_RETURN(1); + + if (null_keypart) + DBUG_RETURN(scan_table()); if (!table->file->inited) table->file->ha_index_init(tab->ref.key); @@ -1564,8 +2124,9 @@ int subselect_indexsubquery_engine::exec() table->null_row= 0; if (!table->status) { - if (!cond || cond->val_int()) + if ((!cond || cond->val_int()) && (!having || having->val_int())) { + empty_result_set= FALSE; if (null_finding) ((Item_in_subselect *) item)->was_null= 1; else @@ -1599,7 +2160,7 @@ int subselect_indexsubquery_engine::exec() uint subselect_single_select_engine::cols() { - DBUG_ASSERT(select_lex->join); // should be called after fix_fields() + DBUG_ASSERT(select_lex->join != 0); // should be called after fix_fields() return select_lex->join->fields_list.elements; } @@ -1644,7 +2205,7 @@ void subselect_uniquesubquery_engine::exclude() table_map subselect_engine::calc_const_tables(TABLE_LIST *table) { table_map map= 0; - for (; table; table= table->next) + for (; table; table= table->next_leaf) { TABLE *tbl= table->table; if (tbl && tbl->const_table) @@ -1657,14 +2218,13 @@ table_map subselect_engine::calc_const_tables(TABLE_LIST *table) table_map subselect_single_select_engine::upper_select_const_tables() { return calc_const_tables((TABLE_LIST *) select_lex->outer_select()-> - table_list.first); + leaf_tables); } table_map subselect_union_engine::upper_select_const_tables() { - return calc_const_tables((TABLE_LIST *) unit->outer_select()-> - table_list.first); + return calc_const_tables((TABLE_LIST *) unit->outer_select()->leaf_tables); } @@ -1682,16 +2242,16 @@ void subselect_union_engine::print(String *str) void subselect_uniquesubquery_engine::print(String *str) { - str->append("<primary_index_lookup>(", 23); + str->append(STRING_WITH_LEN("<primary_index_lookup>(")); tab->ref.items[0]->print(str); - str->append(" in ", 4); - str->append(tab->table->real_name); + str->append(STRING_WITH_LEN(" in ")); + str->append(tab->table->s->table_name); KEY *key_info= tab->table->key_info+ tab->ref.key; - str->append(" on ", 4); + str->append(STRING_WITH_LEN(" on ")); str->append(key_info->name); if (cond) { - str->append(" where ", 7); + str->append(STRING_WITH_LEN(" where ")); cond->print(str); } str->append(')'); @@ -1700,38 +2260,43 @@ void subselect_uniquesubquery_engine::print(String *str) void subselect_indexsubquery_engine::print(String *str) { - str->append("<index_lookup>(", 15); + str->append(STRING_WITH_LEN("<index_lookup>(")); tab->ref.items[0]->print(str); - str->append(" in ", 4); - str->append(tab->table->real_name); + str->append(STRING_WITH_LEN(" in ")); + str->append(tab->table->s->table_name); KEY *key_info= tab->table->key_info+ tab->ref.key; - str->append(" on ", 4); + str->append(STRING_WITH_LEN(" on ")); str->append(key_info->name); if (check_null) - str->append(" chicking NULL", 14); - if (cond) + str->append(STRING_WITH_LEN(" checking NULL")); + if (cond) { - str->append(" where ", 7); + str->append(STRING_WITH_LEN(" where ")); cond->print(str); } + if (having) + { + str->append(STRING_WITH_LEN(" having ")); + having->print(str); + } str->append(')'); } /* change select_result object of engine - SINOPSYS + SYNOPSIS subselect_single_select_engine::change_result() si new subselect Item res new select_result object RETURN - 0 OK - -1 error + FALSE OK + TRUE error */ -int subselect_single_select_engine::change_item(Item_subselect *si, - select_subselect *res) +bool subselect_single_select_engine::change_result(Item_subselect *si, + select_subselect *res) { item= si; result= res; @@ -1742,18 +2307,18 @@ int subselect_single_select_engine::change_item(Item_subselect *si, /* change select_result object of engine - SINOPSYS + SYNOPSIS subselect_single_select_engine::change_result() si new subselect Item res new select_result object RETURN - 0 OK - -1 error + FALSE OK + TRUE error */ -int subselect_union_engine::change_item(Item_subselect *si, - select_subselect *res) +bool subselect_union_engine::change_result(Item_subselect *si, + select_subselect *res) { item= si; int rc= unit->change_result(res, result); @@ -1765,27 +2330,28 @@ int subselect_union_engine::change_item(Item_subselect *si, /* change select_result emulation, never should be called - SINOPSYS + SYNOPSIS subselect_single_select_engine::change_result() si new subselect Item res new select_result object RETURN - -1 error + FALSE OK + TRUE error */ -int subselect_uniquesubquery_engine::change_item(Item_subselect *si, - select_subselect *res) +bool subselect_uniquesubquery_engine::change_result(Item_subselect *si, + select_subselect *res) { DBUG_ASSERT(0); - return -1; + return TRUE; } /* Report about presence of tables in subquery - SINOPSYS + SYNOPSIS subselect_single_select_engine::no_tables() RETURN @@ -1817,7 +2383,7 @@ bool subselect_single_select_engine::may_be_null() /* Report about presence of tables in subquery - SINOPSYS + SYNOPSIS subselect_union_engine::no_tables() RETURN @@ -1838,7 +2404,7 @@ bool subselect_union_engine::no_tables() /* Report about presence of tables in subquery - SINOPSYS + SYNOPSIS subselect_uniquesubquery_engine::no_tables() RETURN diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 539dcc5676a..6b605e96432 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -26,7 +25,6 @@ class JOIN; class select_subselect; class subselect_engine; class Item_bool_func2; -class Statement; /* base class for subselects */ @@ -61,6 +59,9 @@ public: /* subquery is transformed */ bool changed; + /* TRUE <=> The underlying SELECT is correlated w.r.t some ancestor select */ + bool is_correlated; + enum trans_res {RES_OK, RES_REDUCE, RES_ERROR}; enum subs_type {UNKNOWN_SUBS, SINGLEROW_SUBS, EXISTS_SUBS, IN_SUBS, ALL_SUBS, ANY_SUBS}; @@ -89,19 +90,21 @@ public: enum Type type() const; bool is_null() { - val_int(); + update_null_value(); return null_value; } - bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + bool fix_fields(THD *thd, Item **ref); virtual bool exec(); virtual void fix_length_and_dec(); table_map used_tables() const; + table_map not_null_tables() const { return 0; } bool const_item() const; inline table_map get_used_tables_cache() { return used_tables_cache; } inline bool get_const_item_cache() { return const_item_cache; } Item *get_tmp_table_item(THD *thd); void update_used_tables(); void print(String *str); + virtual bool have_guarded_conds() { return FALSE; } bool change_engine(subselect_engine *eng) { old_engine= engine; @@ -110,15 +113,33 @@ public: return eng == 0; } /* + True if this subquery has been already evaluated. Implemented only for + single select and union subqueries only. + */ + bool is_evaluated() const; + bool is_uncacheable() const; + + /* Used by max/min subquery to initialize value presence registration mechanism. Engine call this method before rexecution query. */ virtual void reset_value_registration() {} + enum_parsing_place place() { return parsing_place; } + + /** + Get the SELECT_LEX structure associated with this Item. + @return the SELECT_LEX structure associated with this Item + */ + st_select_lex* get_select_lex(); friend class select_subselect; friend class Item_in_optimizer; - friend bool Item_field::fix_fields(THD *, TABLE_LIST *, Item **); - friend bool Item_ref::fix_fields(THD *, TABLE_LIST *, Item **); + friend bool Item_field::fix_fields(THD *, Item **); + friend int Item_field::fix_outer_field(THD *, Field **, Item **); + friend bool Item_ref::fix_fields(THD *, Item **); + friend void mark_select_range_as_dependent(THD*, + st_select_lex*, st_select_lex*, + Field*, Item*, Item_ident*); }; /* single value subselect */ @@ -138,24 +159,40 @@ public: void reset(); trans_res select_transformer(JOIN *join); void store(uint i, Item* item); - double val(); + double val_real(); longlong val_int (); String *val_str (String *); + my_decimal *val_decimal(my_decimal *); + bool val_bool(); enum Item_result result_type() const; enum_field_types field_type() const; void fix_length_and_dec(); uint cols(); - Item* el(uint i) { return my_reinterpret_cast(Item*)(row[i]); } + Item* element_index(uint i) { return my_reinterpret_cast(Item*)(row[i]); } Item** addr(uint i) { return (Item**)row + i; } bool check_cols(uint c); bool null_inside(); void bring_value(); + /** + This method is used to implement a special case of semantic tree + rewriting, mandated by a SQL:2003 exception in the specification. + The only caller of this method is handle_sql2003_note184_exception(), + see the code there for more details. + Note that this method breaks the object internal integrity, by + removing it's association with the corresponding SELECT_LEX, + making this object orphan from the parse tree. + No other method, beside the destructor, should be called on this + object, as it is now invalid. + @return the SELECT_LEX structure that was given in the constructor. + */ + st_select_lex* invalidate_and_restore_select_lex(); + friend class select_singlerow_subselect; }; -/* used in static ALL/ANY optimisation */ +/* used in static ALL/ANY optimization */ class select_max_min_finder_subselect; class Item_maxmin_subselect :public Item_singlerow_subselect { @@ -163,7 +200,7 @@ protected: bool max; bool was_values; // Set if we have found at least one row public: - Item_maxmin_subselect(Item_subselect *parent, + Item_maxmin_subselect(THD *thd, Item_subselect *parent, st_select_lex *select_lex, bool max); void print(String *str); void cleanup(); @@ -177,7 +214,7 @@ public: class Item_exists_subselect :public Item_subselect { protected: - longlong value; /* value of this item (boolean: exists/not-exists) */ + bool value; /* value of this item (boolean: exists/not-exists) */ public: Item_exists_subselect(st_select_lex *select_lex); @@ -191,8 +228,10 @@ public: enum Item_result result_type() const { return INT_RESULT;} longlong val_int(); - double val(); + double val_real(); String *val_str(String*); + my_decimal *val_decimal(my_decimal *); + bool val_bool(); void fix_length_and_dec(); void print(String *str); @@ -201,7 +240,20 @@ public: friend class subselect_indexsubquery_engine; }; -/* IN subselect */ + +/* + IN subselect: this represents "left_exr IN (SELECT ...)" + + This class has: + - (as a descendant of Item_subselect) a "subquery execution engine" which + allows it to evaluate subqueries. (and this class participates in + execution by having was_null variable where part of execution result + is stored. + - Transformation methods (todo: more on this). + + This class is not used directly, it is "wrapped" into Item_in_optimizer + which provides some small bits of subquery evaluation. +*/ class Item_in_subselect :public Item_exists_subselect { @@ -217,12 +269,22 @@ protected: bool abort_on_null; bool transformed; public: + /* Used to trigger on/off conditions that were pushed down to subselect */ + bool *pushed_cond_guards; + + bool *get_cond_guard(int i) + { + return pushed_cond_guards ? pushed_cond_guards + i : NULL; + } + void set_cond_guard_var(int i, bool v) { pushed_cond_guards[i]= v; } + bool have_guarded_conds() { return test(pushed_cond_guards); } + Item_func_not_all *upper_item; // point on NOT/NOP before ALL/SOME subquery Item_in_subselect(Item * left_expr, st_select_lex *select_lex); Item_in_subselect() :Item_exists_subselect(), optimizer(0), abort_on_null(0), transformed(0), - upper_item(0) + pushed_cond_guards(NULL), upper_item(0) {} subs_type substype() { return IN_SUBS; } @@ -237,11 +299,15 @@ public: trans_res single_value_transformer(JOIN *join, Comp_creator *func); trans_res row_value_transformer(JOIN * join); longlong val_int(); - double val(); + double val_real(); String *val_str(String*); + my_decimal *val_decimal(my_decimal *); + bool val_bool(); void top_level_item() { abort_on_null=1; } + inline bool is_top_level_item() { return abort_on_null; } bool test_limit(st_select_lex_unit *unit); void print(String *str); + bool fix_fields(THD *thd, Item **ref); friend class Item_ref_null_helper; friend class Item_is_not_null_test; @@ -290,13 +356,36 @@ public: virtual ~subselect_engine() {}; // to satisfy compiler virtual void cleanup()= 0; - // set_thd should be called before prepare() - void set_thd(THD *thd_arg) { thd= thd_arg; } + /* + Also sets "thd" for subselect_engine::result. + Should be called before prepare(). + */ + void set_thd(THD *thd_arg); THD * get_thd() { return thd; } virtual int prepare()= 0; virtual void fix_length_and_dec(Item_cache** row)= 0; + /* + Execute the engine + + SYNOPSIS + exec() + + DESCRIPTION + Execute the engine. The result of execution is subquery value that is + either captured by previously set up select_result-based 'sink' or + stored somewhere by the exec() method itself. + + A required side effect: If at least one pushed-down predicate is + disabled, subselect_engine->no_rows() must return correct result after + the exec() call. + + RETURN + 0 - OK + 1 - Either an execution error, or the engine was "changed", and the + caller should call exec() again for the new engine. + */ virtual int exec()= 0; - virtual uint cols()= 0; /* return number of columnss in select */ + virtual uint cols()= 0; /* return number of columns in select */ virtual uint8 uncacheable()= 0; /* query is uncacheable */ enum Item_result type() { return res_type; } enum_field_types field_type() { return res_field_type; } @@ -305,8 +394,11 @@ public: virtual table_map upper_select_const_tables()= 0; static table_map calc_const_tables(TABLE_LIST *); virtual void print(String *str)= 0; - virtual int change_item(Item_subselect *si, select_subselect *result)= 0; + virtual bool change_result(Item_subselect *si, select_subselect *result)= 0; virtual bool no_tables()= 0; + virtual bool is_executed() const { return FALSE; } + /* Check if subquery produced any rows during last query execution */ + virtual bool no_rows() = 0; protected: void set_row(List<Item> &item_list, Item_cache **row); @@ -333,9 +425,11 @@ public: void exclude(); table_map upper_select_const_tables(); void print (String *str); - int change_item(Item_subselect *si, select_subselect *result); + bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); bool may_be_null(); + bool is_executed() const { return executed; } + bool no_rows(); }; @@ -355,17 +449,44 @@ public: void exclude(); table_map upper_select_const_tables(); void print (String *str); - int change_item(Item_subselect *si, select_subselect *result); + bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); + bool is_executed() const; + bool no_rows(); }; struct st_join_table; + + +/* + A subquery execution engine that evaluates the subquery by doing one index + lookup in a unique index. + + This engine is used to resolve subqueries in forms + + outer_expr IN (SELECT tbl.unique_key FROM tbl WHERE subq_where) + + or, tuple-based: + + (oe1, .. oeN) IN (SELECT uniq_key_part1, ... uniq_key_partK + FROM tbl WHERE subqwhere) + + i.e. the subquery is a single table SELECT without GROUP BY, aggregate + functions, etc. +*/ + class subselect_uniquesubquery_engine: public subselect_engine { protected: st_join_table *tab; - Item *cond; + Item *cond; /* The WHERE condition of subselect */ + /* + TRUE<=> last execution produced empty set. Valid only when left + expression is NULL. + */ + bool empty_result_set; + bool null_keypart; /* TRUE <=> constructed search tuple has a NULL */ public: // constructor can assign THD because it will be called after JOIN::prepare @@ -385,23 +506,71 @@ public: void exclude(); table_map upper_select_const_tables() { return 0; } void print (String *str); - int change_item(Item_subselect *si, select_subselect *result); + bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); + int scan_table(); + bool copy_ref_key(); + bool no_rows() { return empty_result_set; } }; class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine { + /* FALSE for 'ref', TRUE for 'ref-or-null'. */ bool check_null; + /* + The "having" clause. This clause (further reffered to as "artificial + having") was inserted by subquery transformation code. It contains + Item(s) that have a side-effect: they record whether the subquery has + produced a row with NULL certain components. We need to use it for cases + like + (oe1, oe2) IN (SELECT t.key, t.no_key FROM t1) + where we do index lookup on t.key=oe1 but need also to check if there + was a row such that t.no_key IS NULL. + + NOTE: This is currently here and not in the uniquesubquery_engine. Ideally + it should have been in uniquesubquery_engine in order to allow execution of + subqueries like + + (oe1, oe2) IN (SELECT primary_key, non_key_maybe_null_field FROM tbl) + + We could use uniquesubquery_engine for the first component and let + Item_is_not_null_test( non_key_maybe_null_field) to handle the second. + + However, subqueries like the above are currently not handled by index + lookup-based subquery engines, the engine applicability check misses + them: it doesn't switch the engine for case of artificial having and + [eq_]ref access (only for artifical having + ref_or_null or no having). + The above example subquery is handled as a full-blown SELECT with eq_ref + access to one table. + + Due to this limitation, the "artificial having" currently needs to be + checked by only in indexsubquery_engine. + */ + Item *having; public: // constructor can assign THD because it will be called after JOIN::prepare - subselect_indexsubquery_engine(THD *thd, st_join_table *tab_arg, + subselect_indexsubquery_engine(THD *thd_arg, st_join_table *tab_arg, Item_subselect *subs, Item *where, - bool chk_null) - :subselect_uniquesubquery_engine(thd, tab_arg, subs, where), - check_null(chk_null) + Item *having_arg, bool chk_null) + :subselect_uniquesubquery_engine(thd_arg, tab_arg, subs, where), + check_null(chk_null), + having(having_arg) {} int exec(); void print (String *str); }; + + +inline bool Item_subselect::is_evaluated() const +{ + return engine->is_executed(); +} + +inline bool Item_subselect::is_uncacheable() const +{ + return engine->uncacheable(); +} + + diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 419efd07615..359b4516c3c 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,9 +21,267 @@ #endif #include "mysql_priv.h" +#include "sql_select.h" -Item_sum::Item_sum(List<Item> &list) - :arg_count(list.elements) +/* + Prepare an aggregate function item for checking context conditions + + SYNOPSIS + init_sum_func_check() + thd reference to the thread context info + + DESCRIPTION + The function initializes the members of the Item_sum object created + for a set function that are used to check validity of the set function + occurrence. + If the set function is not allowed in any subquery where it occurs + an error is reported immediately. + + NOTES + This function is to be called for any item created for a set function + object when the traversal of trees built for expressions used in the query + is performed at the phase of context analysis. This function is to + be invoked at the descent of this traversal. + + RETURN + TRUE if an error is reported + FALSE otherwise +*/ + +bool Item_sum::init_sum_func_check(THD *thd) +{ + if (!thd->lex->allow_sum_func) + { + my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), + MYF(0)); + return TRUE; + } + /* Set a reference to the nesting set function if there is any */ + in_sum_func= thd->lex->in_sum_func; + /* Save a pointer to object to be used in items for nested set functions */ + thd->lex->in_sum_func= this; + nest_level= thd->lex->current_select->nest_level; + ref_by= 0; + aggr_level= -1; + aggr_sel= NULL; + max_arg_level= -1; + max_sum_func_level= -1; + return FALSE; +} + +/* + Check constraints imposed on a usage of a set function + + SYNOPSIS + check_sum_func() + thd reference to the thread context info + ref location of the pointer to this item in the embedding expression + + DESCRIPTION + The method verifies whether context conditions imposed on a usage + of any set function are met for this occurrence. + It checks whether the set function occurs in the position where it + can be aggregated and, when it happens to occur in argument of another + set function, the method checks that these two functions are aggregated in + different subqueries. + If the context conditions are not met the method reports an error. + If the set function is aggregated in some outer subquery the method + adds it to the chain of items for such set functions that is attached + to the the st_select_lex structure for this subquery. + + NOTES + This function is to be called for any item created for a set function + object when the traversal of trees built for expressions used in the query + is performed at the phase of context analysis. This function is to + be invoked at the ascent of this traversal. + + IMPLEMENTATION + A number of designated members of the object are used to check the + conditions. They are specified in the comment before the Item_sum + class declaration. + Additionally a bitmap variable called allow_sum_func is employed. + It is included into the thd->lex structure. + The bitmap contains 1 at n-th position if the set function happens + to occur under a construct of the n-th level subquery where usage + of set functions are allowed (i.e either in the SELECT list or + in the HAVING clause of the corresponding subquery) + Consider the query: + SELECT SUM(t1.b) FROM t1 GROUP BY t1.a + HAVING t1.a IN (SELECT t2.c FROM t2 WHERE AVG(t1.b) > 20) AND + t1.a > (SELECT MIN(t2.d) FROM t2); + allow_sum_func will contain: + for SUM(t1.b) - 1 at the first position + for AVG(t1.b) - 1 at the first position, 0 at the second position + for MIN(t2.d) - 1 at the first position, 1 at the second position. + + RETURN + TRUE if an error is reported + FALSE otherwise +*/ + +bool Item_sum::check_sum_func(THD *thd, Item **ref) +{ + bool invalid= FALSE; + nesting_map allow_sum_func= thd->lex->allow_sum_func; + /* + The value of max_arg_level is updated if an argument of the set function + contains a column reference resolved against a subquery whose level is + greater than the current value of max_arg_level. + max_arg_level cannot be greater than nest level. + nest level is always >= 0 + */ + if (nest_level == max_arg_level) + { + /* + The function must be aggregated in the current subquery, + If it is there under a construct where it is not allowed + we report an error. + */ + invalid= !(allow_sum_func & (1 << max_arg_level)); + } + else if (max_arg_level >= 0 || !(allow_sum_func & (1 << nest_level))) + { + /* + The set function can be aggregated only in outer subqueries. + Try to find a subquery where it can be aggregated; + If we fail to find such a subquery report an error. + */ + if (register_sum_func(thd, ref)) + return TRUE; + invalid= aggr_level < 0 && !(allow_sum_func & (1 << nest_level)); + if (!invalid && thd->variables.sql_mode & MODE_ANSI) + invalid= aggr_level < 0 && max_arg_level < nest_level; + } + if (!invalid && aggr_level < 0) + { + aggr_level= nest_level; + aggr_sel= thd->lex->current_select; + } + /* + By this moment we either found a subquery where the set function is + to be aggregated and assigned a value that is >= 0 to aggr_level, + or set the value of 'invalid' to TRUE to report later an error. + */ + /* + Additionally we have to check whether possible nested set functions + are acceptable here: they are not, if the level of aggregation of + some of them is less than aggr_level. + */ + if (!invalid) + invalid= aggr_level <= max_sum_func_level; + if (invalid) + { + my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), + MYF(0)); + return TRUE; + } + if (in_sum_func && in_sum_func->nest_level == nest_level) + { + /* + If the set function is nested adjust the value of + max_sum_func_level for the nesting set function. + */ + set_if_bigger(in_sum_func->max_sum_func_level, aggr_level); + } + update_used_tables(); + thd->lex->in_sum_func= in_sum_func; + return FALSE; +} + +/* + Attach a set function to the subquery where it must be aggregated + + SYNOPSIS + register_sum_func() + thd reference to the thread context info + ref location of the pointer to this item in the embedding expression + + DESCRIPTION + The function looks for an outer subquery where the set function must be + aggregated. If it finds such a subquery then aggr_level is set to + the nest level of this subquery and the item for the set function + is added to the list of set functions used in nested subqueries + inner_sum_func_list defined for each subquery. When the item is placed + there the field 'ref_by' is set to ref. + + NOTES. + Now we 'register' only set functions that are aggregated in outer + subqueries. Actually it makes sense to link all set function for + a subquery in one chain. It would simplify the process of 'splitting' + for set functions. + + RETURN + FALSE if the executes without failures (currently always) + TRUE otherwise +*/ + +bool Item_sum::register_sum_func(THD *thd, Item **ref) +{ + SELECT_LEX *sl; + nesting_map allow_sum_func= thd->lex->allow_sum_func; + for (sl= thd->lex->current_select->master_unit()->outer_select() ; + sl && sl->nest_level > max_arg_level; + sl= sl->master_unit()->outer_select() ) + { + if (aggr_level < 0 && (allow_sum_func & (1 << sl->nest_level))) + { + /* Found the most nested subquery where the function can be aggregated */ + aggr_level= sl->nest_level; + aggr_sel= sl; + } + } + if (sl && (allow_sum_func & (1 << sl->nest_level))) + { + /* + We reached the subquery of level max_arg_level and checked + that the function can be aggregated here. + The set function will be aggregated in this subquery. + */ + aggr_level= sl->nest_level; + aggr_sel= sl; + + } + if (aggr_level >= 0) + { + ref_by= ref; + /* Add the object to the list of registered objects assigned to aggr_sel */ + if (!aggr_sel->inner_sum_func_list) + next= this; + else + { + next= aggr_sel->inner_sum_func_list->next; + aggr_sel->inner_sum_func_list->next= this; + } + aggr_sel->inner_sum_func_list= this; + aggr_sel->with_sum_func= 1; + + /* + Mark Item_subselect(s) as containing aggregate function all the way up + to aggregate function's calculation context. + Note that we must not mark the Item of calculation context itself + because with_sum_func on the calculation context st_select_lex is + already set above. + + with_sum_func being set for an Item means that this Item refers + (somewhere in it, e.g. one of its arguments if it's a function) directly + or through intermediate items to an aggregate function that is calculated + in a context "outside" of the Item (e.g. in the current or outer select). + + with_sum_func being set for an st_select_lex means that this st_select_lex + has aggregate functions directly referenced (i.e. not through a sub-select). + */ + for (sl= thd->lex->current_select; + sl && sl != aggr_sel && sl->master_unit()->item; + sl= sl->master_unit()->outer_select() ) + sl->master_unit()->item->with_sum_func= 1; + } + thd->lex->current_select->mark_as_dependent(aggr_sel); + return FALSE; +} + + +Item_sum::Item_sum(List<Item> &list) :arg_count(list.elements), + forced_const(FALSE) { if ((args=(Item**) sql_alloc(sizeof(Item*)*arg_count))) { @@ -48,7 +305,10 @@ Item_sum::Item_sum(List<Item> &list) Item_sum::Item_sum(THD *thd, Item_sum *item): Item_result_field(thd, item), arg_count(item->arg_count), - quick_group(item->quick_group) + aggr_sel(item->aggr_sel), + nest_level(item->nest_level), aggr_level(item->aggr_level), + quick_group(item->quick_group), used_tables_cache(item->used_tables_cache), + forced_const(item->forced_const) { if (arg_count <= 2) args=tmp_args; @@ -61,7 +321,9 @@ Item_sum::Item_sum(THD *thd, Item_sum *item): void Item_sum::mark_as_sum_func() { - current_thd->lex->current_select->with_sum_func= 1; + SELECT_LEX *cur_select= current_thd->lex->current_select; + cur_select->n_sum_items++; + cur_select->with_sum_func= 1; with_sum_func= 1; } @@ -89,7 +351,6 @@ void Item_sum::make_field(Send_field *tmp_field) void Item_sum::print(String *str) { str->append(func_name()); - str->append('('); for (uint i=0 ; i < arg_count ; i++) { if (i) @@ -128,6 +389,7 @@ Item *Item_sum::get_tmp_table_item(THD *thd) return sum_item; } + bool Item_sum::walk (Item_processor processor, byte *argument) { if (arg_count) @@ -142,96 +404,180 @@ bool Item_sum::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } + +Field *Item_sum::create_tmp_field(bool group, TABLE *table, + uint convert_blob_length) +{ + switch (result_type()) { + case REAL_RESULT: + return new Field_double(max_length, maybe_null, name, table, decimals, + TRUE); + case INT_RESULT: + return new Field_longlong(max_length,maybe_null,name,table,unsigned_flag); + case STRING_RESULT: + /* + Make sure that the blob fits into a Field_varstring which has + 2-byte lenght. + */ + if (max_length/collation.collation->mbmaxlen > 255 && + convert_blob_length < UINT_MAX16 && convert_blob_length) + return new Field_varstring(convert_blob_length, maybe_null, + name, table, + collation.collation); + return make_string_field(table); +case DECIMAL_RESULT: + return new Field_new_decimal(max_length, maybe_null, name, table, + decimals, unsigned_flag); + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + return 0; + } +} + + +void Item_sum::update_used_tables () +{ + if (!forced_const) + { + used_tables_cache= 0; + for (uint i=0 ; i < arg_count ; i++) + { + args[i]->update_used_tables(); + used_tables_cache|= args[i]->used_tables(); + } + + used_tables_cache&= PSEUDO_TABLE_BITS; + + /* the aggregate function is aggregated into its local context */ + used_tables_cache |= (1 << aggr_sel->join->tables) - 1; + } +} + + String * Item_sum_num::val_str(String *str) { - DBUG_ASSERT(fixed == 1); - double nr=val(); - if (null_value) - return 0; - str->set(nr,decimals, &my_charset_bin); - return str; + return val_string_from_real(str); +} + + +my_decimal *Item_sum_num::val_decimal(my_decimal *decimal_value) +{ + return val_decimal_from_real(decimal_value); } String * Item_sum_int::val_str(String *str) { - DBUG_ASSERT(fixed == 1); - longlong nr= val_int(); - if (null_value) - return 0; - if (unsigned_flag) - str->set((ulonglong) nr, &my_charset_bin); - else - str->set(nr, &my_charset_bin); - return str; + return val_string_from_int(str); +} + + +my_decimal *Item_sum_int::val_decimal(my_decimal *decimal_value) +{ + return val_decimal_from_int(decimal_value); } bool -Item_sum_num::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_sum_num::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); - if (!thd->allow_sum_func) - { - my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); - return 1; - } - thd->allow_sum_func=0; // No included group funcs + if (init_sum_func_check(thd)) + return TRUE; + decimals=0; maybe_null=0; for (uint i=0 ; i < arg_count ; i++) { - if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1)) - return 1; - if (decimals < args[i]->decimals) - decimals=args[i]->decimals; + if (args[i]->fix_fields(thd, args + i) || args[i]->check_cols(1)) + return TRUE; + set_if_bigger(decimals, args[i]->decimals); maybe_null |= args[i]->maybe_null; } result_field=0; max_length=float_length(decimals); null_value=1; fix_length_and_dec(); - thd->allow_sum_func=1; // Allow group functions + + if (check_sum_func(thd, ref)) + return TRUE; + fixed= 1; - return 0; + return FALSE; } +Item_sum_hybrid::Item_sum_hybrid(THD *thd, Item_sum_hybrid *item) + :Item_sum(thd, item), value(item->value), hybrid_type(item->hybrid_type), + hybrid_field_type(item->hybrid_field_type), cmp_sign(item->cmp_sign), + was_values(item->was_values) +{ + /* copy results from old value */ + switch (hybrid_type) { + case INT_RESULT: + sum_int= item->sum_int; + break; + case DECIMAL_RESULT: + my_decimal2decimal(&item->sum_dec, &sum_dec); + break; + case REAL_RESULT: + sum= item->sum; + break; + case STRING_RESULT: + /* + This can happen with ROLLUP. Note that the value is already + copied at function call. + */ + break; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } + collation.set(item->collation); +} + bool -Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_sum_hybrid::fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); Item *item= args[0]; - if (!thd->allow_sum_func) - { - my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); - return 1; - } - thd->allow_sum_func=0; // No included group funcs + + if (init_sum_func_check(thd)) + return TRUE; // 'item' can be changed during fix_fields if (!item->fixed && - item->fix_fields(thd, tables, args) || + item->fix_fields(thd, args) || (item= args[0])->check_cols(1)) - return 1; - - hybrid_type= item->result_type(); - if (hybrid_type == INT_RESULT) - { - max_length=20; - } - else if (hybrid_type == REAL_RESULT) - { - max_length=float_length(decimals); - }else - { - max_length=item->max_length; - } + return TRUE; decimals=item->decimals; + + switch (hybrid_type= item->result_type()) { + case INT_RESULT: + max_length= 20; + sum_int= 0; + break; + case DECIMAL_RESULT: + max_length= item->max_length; + my_decimal_set_zero(&sum_dec); + break; + case REAL_RESULT: + max_length= float_length(decimals); + sum= 0.0; + break; + case STRING_RESULT: + max_length= item->max_length; + break; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + }; /* MIN/MAX can return NULL for empty set indepedent of the used column */ maybe_null= 1; unsigned_flag=item->unsigned_flag; @@ -239,13 +585,47 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) result_field=0; null_value=1; fix_length_and_dec(); - thd->allow_sum_func=1; // Allow group functions if (item->type() == Item::FIELD_ITEM) hybrid_field_type= ((Item_field*) item)->field->type(); else hybrid_field_type= Item::field_type(); + + if (check_sum_func(thd, ref)) + return TRUE; + fixed= 1; - return 0; + return FALSE; +} + +Field *Item_sum_hybrid::create_tmp_field(bool group, TABLE *table, + uint convert_blob_length) +{ + if (args[0]->type() == Item::FIELD_ITEM) + { + Field *field= ((Item_field*) args[0])->field; + + if ((field= create_tmp_field_from_field(current_thd, field, name, table, + NULL, convert_blob_length))) + field->flags&= ~NOT_NULL_FLAG; + return field; + } + /* + DATE/TIME fields have STRING_RESULT result types. + In order to preserve field type, it's needed to handle DATE/TIME + fields creations separately. + */ + switch (args[0]->field_type()) { + case MYSQL_TYPE_DATE: + return new Field_date(maybe_null, name, table, collation.collation); + case MYSQL_TYPE_TIME: + return new Field_time(maybe_null, name, table, collation.collation); + case MYSQL_TYPE_TIMESTAMP: + case MYSQL_TYPE_DATETIME: + return new Field_datetime(maybe_null, name, table, collation.collation); + default: + break; + } + return Item_sum::create_tmp_field(group, table, convert_blob_length); } @@ -253,6 +633,20 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) ** reset and add of sum_func ***********************************************************************/ +Item_sum_sum::Item_sum_sum(THD *thd, Item_sum_sum *item) + :Item_sum_num(thd, item), hybrid_type(item->hybrid_type), + curr_dec_buff(item->curr_dec_buff) +{ + /* TODO: check if the following assignments are really needed */ + if (hybrid_type == DECIMAL_RESULT) + { + my_decimal2decimal(item->dec_buffs, dec_buffs); + my_decimal2decimal(item->dec_buffs + 1, dec_buffs + 1); + } + else + sum= item->sum; +} + Item *Item_sum_sum::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_sum(thd, this); @@ -261,26 +655,417 @@ Item *Item_sum_sum::copy_or_same(THD* thd) void Item_sum_sum::clear() { - null_value=1; sum=0.0; + DBUG_ENTER("Item_sum_sum::clear"); + null_value=1; + if (hybrid_type == DECIMAL_RESULT) + { + curr_dec_buff= 0; + my_decimal_set_zero(dec_buffs); + } + else + sum= 0.0; + DBUG_VOID_RETURN; +} + + +void Item_sum_sum::fix_length_and_dec() +{ + DBUG_ENTER("Item_sum_sum::fix_length_and_dec"); + maybe_null=null_value=1; + decimals= args[0]->decimals; + switch (args[0]->result_type()) { + case REAL_RESULT: + case STRING_RESULT: + hybrid_type= REAL_RESULT; + sum= 0.0; + break; + case INT_RESULT: + case DECIMAL_RESULT: + { + /* SUM result can't be longer than length(arg) + length(MAX_ROWS) */ + int precision= args[0]->decimal_precision() + DECIMAL_LONGLONG_DIGITS; + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); + curr_dec_buff= 0; + hybrid_type= DECIMAL_RESULT; + my_decimal_set_zero(dec_buffs); + break; + } + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } + DBUG_PRINT("info", ("Type: %s (%d, %d)", + (hybrid_type == REAL_RESULT ? "REAL_RESULT" : + hybrid_type == DECIMAL_RESULT ? "DECIMAL_RESULT" : + hybrid_type == INT_RESULT ? "INT_RESULT" : + "--ILLEGAL!!!--"), + max_length, + (int)decimals)); + DBUG_VOID_RETURN; } bool Item_sum_sum::add() { - sum+=args[0]->val(); - if (!args[0]->null_value) - null_value= 0; - return 0; + DBUG_ENTER("Item_sum_sum::add"); + if (hybrid_type == DECIMAL_RESULT) + { + my_decimal value, *val= args[0]->val_decimal(&value); + if (!args[0]->null_value) + { + my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs + (curr_dec_buff^1), + val, dec_buffs + curr_dec_buff); + curr_dec_buff^= 1; + null_value= 0; + } + } + else + { + sum+= args[0]->val_real(); + if (!args[0]->null_value) + null_value= 0; + } + DBUG_RETURN(0); } -double Item_sum_sum::val() +longlong Item_sum_sum::val_int() { DBUG_ASSERT(fixed == 1); + if (hybrid_type == DECIMAL_RESULT) + { + longlong result; + my_decimal2int(E_DEC_FATAL_ERROR, dec_buffs + curr_dec_buff, unsigned_flag, + &result); + return result; + } + return (longlong) rint(val_real()); +} + + +double Item_sum_sum::val_real() +{ + DBUG_ASSERT(fixed == 1); + if (hybrid_type == DECIMAL_RESULT) + my_decimal2double(E_DEC_FATAL_ERROR, dec_buffs + curr_dec_buff, &sum); return sum; } +String *Item_sum_sum::val_str(String *str) +{ + if (hybrid_type == DECIMAL_RESULT) + return val_string_from_decimal(str); + return val_string_from_real(str); +} + + +my_decimal *Item_sum_sum::val_decimal(my_decimal *val) +{ + if (hybrid_type == DECIMAL_RESULT) + return (dec_buffs + curr_dec_buff); + return val_decimal_from_real(val); +} + +/***************************************************************************/ + +C_MODE_START + +/* Declarations for auxilary C-callbacks */ + +static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2) +{ + return memcmp(key1, key2, *(uint *) arg); +} + + +static int item_sum_distinct_walk(void *element, element_count num_of_dups, + void *item) +{ + return ((Item_sum_distinct*) (item))->unique_walk_function(element); +} + +C_MODE_END + +/* Item_sum_distinct */ + +Item_sum_distinct::Item_sum_distinct(Item *item_arg) + :Item_sum_num(item_arg), tree(0) +{ + /* + quick_group is an optimizer hint, which means that GROUP BY can be + handled with help of index on grouped columns. + By setting quick_group to zero we force creation of temporary table + to perform GROUP BY. + */ + quick_group= 0; +} + + +Item_sum_distinct::Item_sum_distinct(THD *thd, Item_sum_distinct *original) + :Item_sum_num(thd, original), val(original->val), tree(0), + table_field_type(original->table_field_type) +{ + quick_group= 0; +} + + +/* + Behaves like an Integer except to fix_length_and_dec(). + Additionally div() converts val with this traits to a val with true + decimal traits along with conversion of integer value to decimal value. + This is to speedup SUM/AVG(DISTINCT) evaluation for 8-32 bit integer + values. +*/ +struct Hybrid_type_traits_fast_decimal: public + Hybrid_type_traits_integer +{ + virtual Item_result type() const { return DECIMAL_RESULT; } + virtual void fix_length_and_dec(Item *item, Item *arg) const + { Hybrid_type_traits_decimal::instance()->fix_length_and_dec(item, arg); } + + virtual void div(Hybrid_type *val, ulonglong u) const + { + int2my_decimal(E_DEC_FATAL_ERROR, val->integer, 0, val->dec_buf); + val->used_dec_buf_no= 0; + val->traits= Hybrid_type_traits_decimal::instance(); + val->traits->div(val, u); + } + static const Hybrid_type_traits_fast_decimal *instance(); + Hybrid_type_traits_fast_decimal() {}; +}; + +static const Hybrid_type_traits_fast_decimal fast_decimal_traits_instance; + +const Hybrid_type_traits_fast_decimal + *Hybrid_type_traits_fast_decimal::instance() +{ + return &fast_decimal_traits_instance; +} + +void Item_sum_distinct::fix_length_and_dec() +{ + DBUG_ASSERT(args[0]->fixed); + + table_field_type= args[0]->field_type(); + + /* Adjust tmp table type according to the chosen aggregation type */ + switch (args[0]->result_type()) { + case STRING_RESULT: + case REAL_RESULT: + val.traits= Hybrid_type_traits::instance(); + if (table_field_type != MYSQL_TYPE_FLOAT) + table_field_type= MYSQL_TYPE_DOUBLE; + break; + case INT_RESULT: + /* + Preserving int8, int16, int32 field types gives ~10% performance boost + as the size of result tree becomes significantly smaller. + Another speed up we gain by using longlong for intermediate + calculations. The range of int64 is enough to hold sum 2^32 distinct + integers each <= 2^32. + */ + if (table_field_type == MYSQL_TYPE_INT24 || + table_field_type >= MYSQL_TYPE_TINY && + table_field_type <= MYSQL_TYPE_LONG) + { + val.traits= Hybrid_type_traits_fast_decimal::instance(); + break; + } + table_field_type= MYSQL_TYPE_LONGLONG; + /* fallthrough */ + case DECIMAL_RESULT: + val.traits= Hybrid_type_traits_decimal::instance(); + if (table_field_type != MYSQL_TYPE_LONGLONG) + table_field_type= MYSQL_TYPE_NEWDECIMAL; + break; + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } + val.traits->fix_length_and_dec(this, args[0]); +} + + +bool Item_sum_distinct::setup(THD *thd) +{ + List<create_field> field_list; + create_field field_def; /* field definition */ + DBUG_ENTER("Item_sum_distinct::setup"); + DBUG_ASSERT(tree == 0); + + /* + Virtual table and the tree are created anew on each re-execution of + PS/SP. Hence all further allocations are performed in the runtime + mem_root. + */ + if (field_list.push_back(&field_def)) + return TRUE; + + null_value= maybe_null= 1; + quick_group= 0; + + DBUG_ASSERT(args[0]->fixed); + + field_def.init_for_tmp_table(table_field_type, args[0]->max_length, + args[0]->decimals, args[0]->maybe_null, + args[0]->unsigned_flag); + + if (! (table= create_virtual_tmp_table(thd, field_list))) + return TRUE; + + /* XXX: check that the case of CHAR(0) works OK */ + tree_key_length= table->s->reclength - table->s->null_bytes; + + /* + Unique handles all unique elements in a tree until they can't fit + in. Then the tree is dumped to the temporary file. We can use + simple_raw_key_cmp because the table contains numbers only; decimals + are converted to binary representation as well. + */ + tree= new Unique(simple_raw_key_cmp, &tree_key_length, tree_key_length, + thd->variables.max_heap_table_size); + + is_evaluated= FALSE; + DBUG_RETURN(tree == 0); +} + + +bool Item_sum_distinct::add() +{ + args[0]->save_in_field(table->field[0], FALSE); + is_evaluated= FALSE; + if (!table->field[0]->is_null()) + { + DBUG_ASSERT(tree); + null_value= 0; + /* + '0' values are also stored in the tree. This doesn't matter + for SUM(DISTINCT), but is important for AVG(DISTINCT) + */ + return tree->unique_add(table->field[0]->ptr); + } + return 0; +} + + +bool Item_sum_distinct::unique_walk_function(void *element) +{ + memcpy(table->field[0]->ptr, element, tree_key_length); + ++count; + val.traits->add(&val, table->field[0]); + return 0; +} + + +void Item_sum_distinct::clear() +{ + DBUG_ENTER("Item_sum_distinct::clear"); + DBUG_ASSERT(tree != 0); /* we always have a tree */ + null_value= 1; + tree->reset(); + is_evaluated= FALSE; + DBUG_VOID_RETURN; +} + +void Item_sum_distinct::cleanup() +{ + Item_sum_num::cleanup(); + delete tree; + tree= 0; + table= 0; + is_evaluated= FALSE; +} + +Item_sum_distinct::~Item_sum_distinct() +{ + delete tree; + /* no need to free the table */ +} + + +void Item_sum_distinct::calculate_val_and_count() +{ + if (!is_evaluated) + { + count= 0; + val.traits->set_zero(&val); + /* + We don't have a tree only if 'setup()' hasn't been called; + this is the case of sql_select.cc:return_zero_rows. + */ + if (tree) + { + table->field[0]->set_notnull(); + tree->walk(item_sum_distinct_walk, (void*) this); + } + is_evaluated= TRUE; + } +} + + +double Item_sum_distinct::val_real() +{ + calculate_val_and_count(); + return val.traits->val_real(&val); +} + + +my_decimal *Item_sum_distinct::val_decimal(my_decimal *to) +{ + calculate_val_and_count(); + if (null_value) + return 0; + return val.traits->val_decimal(&val, to); +} + + +longlong Item_sum_distinct::val_int() +{ + calculate_val_and_count(); + return val.traits->val_int(&val, unsigned_flag); +} + + +String *Item_sum_distinct::val_str(String *str) +{ + calculate_val_and_count(); + if (null_value) + return 0; + return val.traits->val_str(&val, str, decimals); +} + +/* end of Item_sum_distinct */ + +/* Item_sum_avg_distinct */ + +void +Item_sum_avg_distinct::fix_length_and_dec() +{ + Item_sum_distinct::fix_length_and_dec(); + prec_increment= current_thd->variables.div_precincrement; + /* + AVG() will divide val by count. We need to reserve digits + after decimal point as the result can be fractional. + */ + decimals= min(decimals + prec_increment, NOT_FIXED_DEC); +} + + +void +Item_sum_avg_distinct::calculate_val_and_count() +{ + if (!is_evaluated) + { + Item_sum_distinct::calculate_val_and_count(); + if (count) + val.traits->div(&val, count); + is_evaluated= TRUE; + } +} + + Item *Item_sum_count::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_count(thd, this); @@ -299,7 +1084,7 @@ bool Item_sum_count::add() count++; else { - (void) args[0]->val_int(); + args[0]->update_null_value(); if (!args[0]->null_value) count++; } @@ -316,9 +1101,8 @@ longlong Item_sum_count::val_int() void Item_sum_count::cleanup() { DBUG_ENTER("Item_sum_count::cleanup"); - clear(); + count= 0; Item_sum_int::cleanup(); - used_table_cache= ~(table_map) 0; DBUG_VOID_RETURN; } @@ -326,6 +1110,27 @@ void Item_sum_count::cleanup() /* Avgerage */ +void Item_sum_avg::fix_length_and_dec() +{ + Item_sum_sum::fix_length_and_dec(); + maybe_null=null_value=1; + prec_increment= current_thd->variables.div_precincrement; + if (hybrid_type == DECIMAL_RESULT) + { + int precision= args[0]->decimal_precision() + prec_increment; + decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); + f_precision= min(precision+DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_PRECISION); + f_scale= args[0]->decimals; + dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale); + } + else { + decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC); + max_length= args[0]->max_length + prec_increment; + } +} + Item *Item_sum_avg::copy_or_same(THD* thd) { @@ -333,24 +1138,44 @@ Item *Item_sum_avg::copy_or_same(THD* thd) } +Field *Item_sum_avg::create_tmp_field(bool group, TABLE *table, + uint convert_blob_len) +{ + if (group) + { + /* + We must store both value and counter in the temporary table in one field. + The easiest way is to do this is to store both value in a string + and unpack on access. + */ + return new Field_string(((hybrid_type == DECIMAL_RESULT) ? + dec_bin_size : sizeof(double)) + sizeof(longlong), + 0, name, table, &my_charset_bin); + } + if (hybrid_type == DECIMAL_RESULT) + return new Field_new_decimal(max_length, maybe_null, name, table, + decimals, unsigned_flag); + return new Field_double(max_length, maybe_null, name, table, decimals, TRUE); +} + + void Item_sum_avg::clear() { - sum=0.0; count=0; + Item_sum_sum::clear(); + count=0; } bool Item_sum_avg::add() { - double nr=args[0]->val(); + if (Item_sum_sum::add()) + return TRUE; if (!args[0]->null_value) - { - sum+=nr; count++; - } - return 0; + return FALSE; } -double Item_sum_avg::val() +double Item_sum_avg::val_real() { DBUG_ASSERT(fixed == 1); if (!count) @@ -358,8 +1183,32 @@ double Item_sum_avg::val() null_value=1; return 0.0; } - null_value=0; - return sum/ulonglong2double(count); + return Item_sum_sum::val_real() / ulonglong2double(count); +} + + +my_decimal *Item_sum_avg::val_decimal(my_decimal *val) +{ + my_decimal sum_buff, cnt; + const my_decimal *sum_dec; + DBUG_ASSERT(fixed == 1); + if (!count) + { + null_value=1; + return NULL; + } + sum_dec= Item_sum_sum::val_decimal(&sum_buff); + int2my_decimal(E_DEC_FATAL_ERROR, count, 0, &cnt); + my_decimal_div(E_DEC_FATAL_ERROR, val, sum_dec, &cnt, prec_increment); + return val; +} + + +String *Item_sum_avg::val_str(String *str) +{ + if (hybrid_type == DECIMAL_RESULT) + return val_string_from_decimal(str); + return val_string_from_real(str); } @@ -367,11 +1216,12 @@ double Item_sum_avg::val() Standard deviation */ -double Item_sum_std::val() +double Item_sum_std::val_real() { DBUG_ASSERT(fixed == 1); - double tmp= Item_sum_variance::val(); - return tmp <= 0.0 ? 0.0 : sqrt(tmp); + double nr= Item_sum_variance::val_real(); + DBUG_ASSERT(nr >= 0.0); + return sqrt(nr); } Item *Item_sum_std::copy_or_same(THD* thd) @@ -384,110 +1234,264 @@ Item *Item_sum_std::copy_or_same(THD* thd) Variance */ + +/** + Variance implementation for floating-point implementations, without + catastrophic cancellation, from Knuth's _TAoCP_, 3rd ed, volume 2, pg232. + This alters the value at m, s, and increments count. +*/ + +/* + These two functions are used by the Item_sum_variance and the + Item_variance_field classes, which are unrelated, and each need to calculate + variance. The difference between the two classes is that the first is used + for a mundane SELECT, while the latter is used in a GROUPing SELECT. +*/ +static void variance_fp_recurrence_next(double *m, double *s, ulonglong *count, double nr) +{ + *count += 1; + + if (*count == 1) + { + *m= nr; + *s= 0; + } + else + { + double m_kminusone= *m; + *m= m_kminusone + (nr - m_kminusone) / (double) *count; + *s= *s + (nr - m_kminusone) * (nr - *m); + } +} + + +static double variance_fp_recurrence_result(double s, ulonglong count, bool is_sample_variance) +{ + if (count == 1) + return 0.0; + + if (is_sample_variance) + return s / (count - 1); + + /* else, is a population variance */ + return s / count; +} + + +Item_sum_variance::Item_sum_variance(THD *thd, Item_sum_variance *item): + Item_sum_num(thd, item), hybrid_type(item->hybrid_type), + count(item->count), sample(item->sample), + prec_increment(item->prec_increment) +{ + recurrence_m= item->recurrence_m; + recurrence_s= item->recurrence_s; +} + + +void Item_sum_variance::fix_length_and_dec() +{ + DBUG_ENTER("Item_sum_variance::fix_length_and_dec"); + maybe_null= null_value= 1; + prec_increment= current_thd->variables.div_precincrement; + + /* + According to the SQL2003 standard (Part 2, Foundations; sec 10.9, + aggregate function; paragraph 7h of Syntax Rules), "the declared + type of the result is an implementation-defined aproximate numeric + type. + */ + hybrid_type= REAL_RESULT; + + switch (args[0]->result_type()) { + case REAL_RESULT: + case STRING_RESULT: + decimals= min(args[0]->decimals + 4, NOT_FIXED_DEC); + break; + case INT_RESULT: + case DECIMAL_RESULT: + { + int precision= args[0]->decimal_precision()*2 + prec_increment; + decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE); + max_length= my_decimal_precision_to_length(precision, decimals, + unsigned_flag); + + break; + } + case ROW_RESULT: + default: + DBUG_ASSERT(0); + } + DBUG_PRINT("info", ("Type: REAL_RESULT (%d, %d)", max_length, (int)decimals)); + DBUG_VOID_RETURN; +} + + Item *Item_sum_variance::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_variance(thd, this); } +/** + Create a new field to match the type of value we're expected to yield. + If we're grouping, then we need some space to serialize variables into, to + pass around. +*/ +Field *Item_sum_variance::create_tmp_field(bool group, TABLE *table, + uint convert_blob_len) +{ + if (group) + { + /* + We must store both value and counter in the temporary table in one field. + The easiest way is to do this is to store both value in a string + and unpack on access. + */ + return new Field_string(sizeof(double)*2 + sizeof(longlong), 0, name, table, &my_charset_bin); + } + return new Field_double(max_length, maybe_null, name, table, decimals, TRUE); +} + + void Item_sum_variance::clear() { - sum=sum_sqr=0.0; - count=0; + count= 0; } bool Item_sum_variance::add() { - double nr=args[0]->val(); + /* + Why use a temporary variable? We don't know if it is null until we + evaluate it, which has the side-effect of setting null_value . + */ + double nr= args[0]->val_real(); + if (!args[0]->null_value) - { - sum+=nr; - sum_sqr+=nr*nr; - count++; - } + variance_fp_recurrence_next(&recurrence_m, &recurrence_s, &count, nr); return 0; } -double Item_sum_variance::val() +double Item_sum_variance::val_real() { DBUG_ASSERT(fixed == 1); - if (!count) + + /* + 'sample' is a 1/0 boolean value. If it is 1/true, id est this is a sample + variance call, then we should set nullness when the count of the items + is one or zero. If it's zero, i.e. a population variance, then we only + set nullness when the count is zero. + + Another way to read it is that 'sample' is the numerical threshhold, at and + below which a 'count' number of items is called NULL. + */ + DBUG_ASSERT((sample == 0) || (sample == 1)); + if (count <= sample) { null_value=1; return 0.0; } + null_value=0; - /* Avoid problems when the precision isn't good enough */ - double tmp=ulonglong2double(count); - double tmp2=(sum_sqr - sum*sum/tmp)/tmp; - return tmp2 <= 0.0 ? 0.0 : tmp2; + return variance_fp_recurrence_result(recurrence_s, count, sample); +} + + +my_decimal *Item_sum_variance::val_decimal(my_decimal *dec_buf) +{ + DBUG_ASSERT(fixed == 1); + return val_decimal_from_real(dec_buf); } + void Item_sum_variance::reset_field() { - double nr=args[0]->val(); - char *res=result_field->ptr; + double nr; + char *res= result_field->ptr; + + nr= args[0]->val_real(); /* sets null_value as side-effect */ if (args[0]->null_value) bzero(res,sizeof(double)*2+sizeof(longlong)); else { - float8store(res,nr); - nr*=nr; - float8store(res+sizeof(double),nr); - longlong tmp=1; - int8store(res+sizeof(double)*2,tmp); + /* Serialize format is (double)m, (double)s, (longlong)count */ + ulonglong tmp_count; + double tmp_s; + float8store(res, nr); /* recurrence variable m */ + tmp_s= 0.0; + float8store(res + sizeof(double), tmp_s); + tmp_count= 1; + int8store(res + sizeof(double)*2, tmp_count); } } + void Item_sum_variance::update_field() { - double nr,old_nr,old_sqr; - longlong field_count; + ulonglong field_count; char *res=result_field->ptr; - float8get(old_nr, res); - float8get(old_sqr, res+sizeof(double)); + double nr= args[0]->val_real(); /* sets null_value as side-effect */ + + if (args[0]->null_value) + return; + + /* Serialize format is (double)m, (double)s, (longlong)count */ + double field_recurrence_m, field_recurrence_s; + float8get(field_recurrence_m, res); + float8get(field_recurrence_s, res + sizeof(double)); field_count=sint8korr(res+sizeof(double)*2); - nr=args[0]->val(); - if (!args[0]->null_value) - { - old_nr+=nr; - old_sqr+=nr*nr; - field_count++; - } - float8store(res,old_nr); - float8store(res+sizeof(double),old_sqr); - int8store(res+sizeof(double)*2,field_count); + variance_fp_recurrence_next(&field_recurrence_m, &field_recurrence_s, &field_count, nr); + + float8store(res, field_recurrence_m); + float8store(res + sizeof(double), field_recurrence_s); + res+= sizeof(double)*2; + int8store(res,field_count); } + /* min & max */ void Item_sum_hybrid::clear() { - sum= 0.0; - sum_int= 0; - value.length(0); + switch (hybrid_type) { + case INT_RESULT: + sum_int= 0; + break; + case DECIMAL_RESULT: + my_decimal_set_zero(&sum_dec); + break; + case REAL_RESULT: + sum= 0.0; + break; + default: + value.length(0); + } null_value= 1; } -double Item_sum_hybrid::val() +double Item_sum_hybrid::val_real() { DBUG_ASSERT(fixed == 1); - int err; - char *end_not_used; if (null_value) return 0.0; switch (hybrid_type) { case STRING_RESULT: + { + char *end_not_used; + int err_not_used; String *res; res=val_str(&str_value); - return (res ? my_strntod(res->charset(), (char*) res->ptr(),res->length(), - &end_not_used, &err) : 0.0); + return (res ? my_strntod(res->charset(), (char*) res->ptr(), res->length(), + &end_not_used, &err_not_used) : 0.0); + } case INT_RESULT: if (unsigned_flag) return ulonglong2double(sum_int); return (double) sum_int; + case DECIMAL_RESULT: + my_decimal2double(E_DEC_FATAL_ERROR, &sum_dec, &sum); + return sum; case REAL_RESULT: return sum; case ROW_RESULT: @@ -496,7 +1500,6 @@ double Item_sum_hybrid::val() DBUG_ASSERT(0); return 0; } - return 0; // Keep compiler happy } longlong Item_sum_hybrid::val_int() @@ -504,9 +1507,46 @@ longlong Item_sum_hybrid::val_int() DBUG_ASSERT(fixed == 1); if (null_value) return 0; - if (hybrid_type == INT_RESULT) + switch (hybrid_type) { + case INT_RESULT: + return sum_int; + case DECIMAL_RESULT: + { + longlong result; + my_decimal2int(E_DEC_FATAL_ERROR, &sum_dec, unsigned_flag, &result); return sum_int; - return (longlong) Item_sum_hybrid::val(); + } + default: + return (longlong) rint(Item_sum_hybrid::val_real()); + } +} + + +my_decimal *Item_sum_hybrid::val_decimal(my_decimal *val) +{ + DBUG_ASSERT(fixed == 1); + if (null_value) + return 0; + switch (hybrid_type) { + case STRING_RESULT: + string2my_decimal(E_DEC_FATAL_ERROR, &value, val); + break; + case REAL_RESULT: + double2my_decimal(E_DEC_FATAL_ERROR, sum, val); + break; + case DECIMAL_RESULT: + val= &sum_dec; + break; + case INT_RESULT: + int2my_decimal(E_DEC_FATAL_ERROR, sum_int, unsigned_flag, val); + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; + } + return val; // Keep compiler happy } @@ -522,6 +1562,9 @@ Item_sum_hybrid::val_str(String *str) case REAL_RESULT: str->set(sum,decimals, &my_charset_bin); break; + case DECIMAL_RESULT: + my_decimal2string(E_DEC_FATAL_ERROR, &sum_dec, 0, 0, 0, str); + return str; case INT_RESULT: if (unsigned_flag) str->set((ulonglong) sum_int, &my_charset_bin); @@ -542,7 +1585,7 @@ void Item_sum_hybrid::cleanup() { DBUG_ENTER("Item_sum_hybrid::cleanup"); Item_sum::cleanup(); - used_table_cache= ~(table_map) 0; + forced_const= FALSE; /* by default it is TRUE to avoid TRUE reporting by @@ -595,9 +1638,20 @@ bool Item_sum_min::add() } } break; + case DECIMAL_RESULT: + { + my_decimal value_buff, *val= args[0]->val_decimal(&value_buff); + if (!args[0]->null_value && + (null_value || (my_decimal_cmp(&sum_dec, val) > 0))) + { + my_decimal2decimal(val, &sum_dec); + null_value= 0; + } + } + break; case REAL_RESULT: { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (!args[0]->null_value && (null_value || nr < sum)) { sum=nr; @@ -648,9 +1702,20 @@ bool Item_sum_max::add() } } break; + case DECIMAL_RESULT: + { + my_decimal value_buff, *val= args[0]->val_decimal(&value_buff); + if (!args[0]->null_value && + (null_value || (my_decimal_cmp(val, &sum_dec) > 0))) + { + my_decimal2decimal(val, &sum_dec); + null_value= 0; + } + } + break; case REAL_RESULT: { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (!args[0]->null_value && (null_value || nr > sum)) { sum=nr; @@ -730,7 +1795,7 @@ bool Item_sum_and::add() void Item_sum_num::reset_field() { - double nr=args[0]->val(); + double nr= args[0]->val_real(); char *res=result_field->ptr; if (maybe_null) @@ -749,7 +1814,8 @@ void Item_sum_num::reset_field() void Item_sum_hybrid::reset_field() { - if (hybrid_type == STRING_RESULT) + switch(hybrid_type) { + case STRING_RESULT: { char buff[MAX_FIELD_WIDTH]; String tmp(buff,sizeof(buff),result_field->charset()),*res; @@ -765,8 +1831,9 @@ void Item_sum_hybrid::reset_field() result_field->set_notnull(); result_field->store(res->ptr(),res->length(),tmp.charset()); } + break; } - else if (hybrid_type == INT_RESULT) + case INT_RESULT: { longlong nr=args[0]->val_int(); @@ -780,11 +1847,12 @@ void Item_sum_hybrid::reset_field() else result_field->set_notnull(); } - result_field->store(nr); + result_field->store(nr, unsigned_flag); + break; } - else // REAL_RESULT + case REAL_RESULT: { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (maybe_null) { @@ -797,14 +1865,50 @@ void Item_sum_hybrid::reset_field() result_field->set_notnull(); } result_field->store(nr); + break; + } + case DECIMAL_RESULT: + { + my_decimal value_buff, *arg_dec= args[0]->val_decimal(&value_buff); + + if (maybe_null) + { + if (args[0]->null_value) + result_field->set_null(); + else + result_field->set_notnull(); + } + /* + We must store zero in the field as we will use the field value in + add() + */ + if (!arg_dec) // Null + arg_dec= &decimal_zero; + result_field->store_decimal(arg_dec); + break; + } + case ROW_RESULT: + default: + DBUG_ASSERT(0); } } void Item_sum_sum::reset_field() { - double nr=args[0]->val(); // Nulls also return 0 - float8store(result_field->ptr,nr); + if (hybrid_type == DECIMAL_RESULT) + { + my_decimal value, *arg_val= args[0]->val_decimal(&value); + if (!arg_val) // Null + arg_val= &decimal_zero; + result_field->store_decimal(arg_val); + } + else + { + DBUG_ASSERT(hybrid_type == REAL_RESULT); + double nr= args[0]->val_real(); // Nulls also return 0 + float8store(result_field->ptr, nr); + } if (args[0]->null_value) result_field->set_null(); else @@ -821,7 +1925,7 @@ void Item_sum_count::reset_field() nr=1; else { - (void) args[0]->val_int(); + args[0]->update_null_value(); if (!args[0]->null_value) nr=1; } @@ -831,20 +1935,39 @@ void Item_sum_count::reset_field() void Item_sum_avg::reset_field() { - double nr=args[0]->val(); char *res=result_field->ptr; - - if (args[0]->null_value) - bzero(res,sizeof(double)+sizeof(longlong)); + if (hybrid_type == DECIMAL_RESULT) + { + longlong tmp; + my_decimal value, *arg_dec= args[0]->val_decimal(&value); + if (args[0]->null_value) + { + arg_dec= &decimal_zero; + tmp= 0; + } + else + tmp= 1; + my_decimal2binary(E_DEC_FATAL_ERROR, arg_dec, res, f_precision, f_scale); + res+= dec_bin_size; + int8store(res, tmp); + } else { - float8store(res,nr); - res+=sizeof(double); - longlong tmp=1; - int8store(res,tmp); + double nr= args[0]->val_real(); + + if (args[0]->null_value) + bzero(res,sizeof(double)+sizeof(longlong)); + else + { + longlong tmp= 1; + float8store(res,nr); + res+=sizeof(double); + int8store(res,tmp); + } } } + void Item_sum_bit::reset_field() { reset(); @@ -859,23 +1982,46 @@ void Item_sum_bit::update_field() int8store(res, bits); } + /* ** calc next value and merge it with field_value */ void Item_sum_sum::update_field() { - double old_nr,nr; - char *res=result_field->ptr; - - float8get(old_nr,res); - nr=args[0]->val(); - if (!args[0]->null_value) + if (hybrid_type == DECIMAL_RESULT) { - old_nr+=nr; - result_field->set_notnull(); + my_decimal value, *arg_val= args[0]->val_decimal(&value); + if (!args[0]->null_value) + { + if (!result_field->is_null()) + { + my_decimal field_value, + *field_val= result_field->val_decimal(&field_value); + my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs, arg_val, field_val); + result_field->store_decimal(dec_buffs); + } + else + { + result_field->store_decimal(arg_val); + result_field->set_notnull(); + } + } + } + else + { + double old_nr,nr; + char *res=result_field->ptr; + + float8get(old_nr,res); + nr= args[0]->val_real(); + if (!args[0]->null_value) + { + old_nr+=nr; + result_field->set_notnull(); + } + float8store(res,old_nr); } - float8store(res,old_nr); } @@ -889,7 +2035,7 @@ void Item_sum_count::update_field() nr++; else { - (void) args[0]->val_int(); + args[0]->update_null_value(); if (!args[0]->null_value) nr++; } @@ -899,32 +2045,59 @@ void Item_sum_count::update_field() void Item_sum_avg::update_field() { - double nr,old_nr; longlong field_count; char *res=result_field->ptr; - - float8get(old_nr,res); - field_count=sint8korr(res+sizeof(double)); - - nr=args[0]->val(); - if (!args[0]->null_value) + if (hybrid_type == DECIMAL_RESULT) { - old_nr+=nr; - field_count++; + my_decimal value, *arg_val= args[0]->val_decimal(&value); + if (!args[0]->null_value) + { + binary2my_decimal(E_DEC_FATAL_ERROR, res, + dec_buffs + 1, f_precision, f_scale); + field_count= sint8korr(res + dec_bin_size); + my_decimal_add(E_DEC_FATAL_ERROR, dec_buffs, arg_val, dec_buffs + 1); + my_decimal2binary(E_DEC_FATAL_ERROR, dec_buffs, + res, f_precision, f_scale); + res+= dec_bin_size; + field_count++; + int8store(res, field_count); + } + } + else + { + double nr; + + nr= args[0]->val_real(); + if (!args[0]->null_value) + { + double old_nr; + float8get(old_nr, res); + field_count= sint8korr(res + sizeof(double)); + old_nr+= nr; + float8store(res,old_nr); + res+= sizeof(double); + field_count++; + int8store(res, field_count); + } } - float8store(res,old_nr); - res+=sizeof(double); - int8store(res,field_count); } + void Item_sum_hybrid::update_field() { - if (hybrid_type == STRING_RESULT) + switch (hybrid_type) { + case STRING_RESULT: min_max_update_str_field(); - else if (hybrid_type == INT_RESULT) + break; + case INT_RESULT: min_max_update_int_field(); - else + break; + case DECIMAL_RESULT: + min_max_update_decimal_field(); + break; + default: min_max_update_real_field(); + } } @@ -951,7 +2124,7 @@ Item_sum_hybrid::min_max_update_real_field() double nr,old_nr; old_nr=result_field->val_real(); - nr=args[0]->val(); + nr= args[0]->val_real(); if (!args[0]->null_value) { if (result_field->is_null(0) || @@ -989,118 +2162,196 @@ Item_sum_hybrid::min_max_update_int_field() } else if (result_field->is_null(0)) result_field->set_null(); - result_field->store(old_nr); + result_field->store(old_nr, unsigned_flag); +} + + +void +Item_sum_hybrid::min_max_update_decimal_field() +{ + /* TODO: optimize: do not get result_field in case of args[0] is NULL */ + my_decimal old_val, nr_val; + const my_decimal *old_nr= result_field->val_decimal(&old_val); + const my_decimal *nr= args[0]->val_decimal(&nr_val); + if (!args[0]->null_value) + { + if (result_field->is_null(0)) + old_nr=nr; + else + { + bool res= my_decimal_cmp(old_nr, nr) > 0; + /* (cmp_sign > 0 && res) || (!(cmp_sign > 0) && !res) */ + if ((cmp_sign > 0) ^ (!res)) + old_nr=nr; + } + result_field->set_notnull(); + } + else if (result_field->is_null(0)) + result_field->set_null(); + result_field->store_decimal(old_nr); } -Item_avg_field::Item_avg_field(Item_sum_avg *item) +Item_avg_field::Item_avg_field(Item_result res_type, Item_sum_avg *item) { name=item->name; decimals=item->decimals; - max_length=item->max_length; + max_length= item->max_length; + unsigned_flag= item->unsigned_flag; field=item->result_field; maybe_null=1; + hybrid_type= res_type; + prec_increment= item->prec_increment; + if (hybrid_type == DECIMAL_RESULT) + { + f_scale= item->f_scale; + f_precision= item->f_precision; + dec_bin_size= item->dec_bin_size; + } } - -double Item_avg_field::val() +double Item_avg_field::val_real() { // fix_fields() never calls for this Item double nr; longlong count; + char *res; + + if (hybrid_type == DECIMAL_RESULT) + return val_real_from_decimal(); + float8get(nr,field->ptr); - char *res=(field->ptr+sizeof(double)); - count=sint8korr(res); + res= (field->ptr+sizeof(double)); + count= sint8korr(res); - if (!count) - { - null_value=1; + if ((null_value= !count)) return 0.0; - } - null_value=0; return nr/(double) count; } -String *Item_avg_field::val_str(String *str) + +longlong Item_avg_field::val_int() +{ + return (longlong) rint(val_real()); +} + + +my_decimal *Item_avg_field::val_decimal(my_decimal *dec_buf) { // fix_fields() never calls for this Item - double nr=Item_avg_field::val(); - if (null_value) + if (hybrid_type == REAL_RESULT) + return val_decimal_from_real(dec_buf); + + longlong count= sint8korr(field->ptr + dec_bin_size); + if ((null_value= !count)) return 0; - str->set(nr,decimals, &my_charset_bin); - return str; + + my_decimal dec_count, dec_field; + binary2my_decimal(E_DEC_FATAL_ERROR, + field->ptr, &dec_field, f_precision, f_scale); + int2my_decimal(E_DEC_FATAL_ERROR, count, 0, &dec_count); + my_decimal_div(E_DEC_FATAL_ERROR, dec_buf, + &dec_field, &dec_count, prec_increment); + return dec_buf; +} + + +String *Item_avg_field::val_str(String *str) +{ + // fix_fields() never calls for this Item + if (hybrid_type == DECIMAL_RESULT) + return val_string_from_decimal(str); + return val_string_from_real(str); } + Item_std_field::Item_std_field(Item_sum_std *item) : Item_variance_field(item) { } -double Item_std_field::val() + +double Item_std_field::val_real() { + double nr; // fix_fields() never calls for this Item - double tmp= Item_variance_field::val(); - return tmp <= 0.0 ? 0.0 : sqrt(tmp); + nr= Item_variance_field::val_real(); + DBUG_ASSERT(nr >= 0.0); + return sqrt(nr); } + +my_decimal *Item_std_field::val_decimal(my_decimal *dec_buf) +{ + /* + We can't call val_decimal_from_real() for DECIMAL_RESULT as + Item_variance_field::val_real() would cause an infinite loop + */ + my_decimal tmp_dec, *dec; + double nr; + if (hybrid_type == REAL_RESULT) + return val_decimal_from_real(dec_buf); + + dec= Item_variance_field::val_decimal(dec_buf); + if (!dec) + return 0; + my_decimal2double(E_DEC_FATAL_ERROR, dec, &nr); + DBUG_ASSERT(nr >= 0.0); + nr= sqrt(nr); + double2my_decimal(E_DEC_FATAL_ERROR, nr, &tmp_dec); + my_decimal_round(E_DEC_FATAL_ERROR, &tmp_dec, decimals, FALSE, dec_buf); + return dec_buf; +} + + Item_variance_field::Item_variance_field(Item_sum_variance *item) { name=item->name; decimals=item->decimals; max_length=item->max_length; + unsigned_flag= item->unsigned_flag; field=item->result_field; maybe_null=1; + sample= item->sample; + prec_increment= item->prec_increment; + if ((hybrid_type= item->hybrid_type) == DECIMAL_RESULT) + { + f_scale0= item->f_scale0; + f_precision0= item->f_precision0; + dec_bin_size0= item->dec_bin_size0; + f_scale1= item->f_scale1; + f_precision1= item->f_precision1; + dec_bin_size1= item->dec_bin_size1; + } } -double Item_variance_field::val() + +double Item_variance_field::val_real() { // fix_fields() never calls for this Item - double sum,sum_sqr; - longlong count; - float8get(sum,field->ptr); - float8get(sum_sqr,(field->ptr+sizeof(double))); + if (hybrid_type == DECIMAL_RESULT) + return val_real_from_decimal(); + + double recurrence_s; + ulonglong count; + float8get(recurrence_s, (field->ptr + sizeof(double))); count=sint8korr(field->ptr+sizeof(double)*2); - if (!count) - { - null_value=1; + if ((null_value= (count <= sample))) return 0.0; - } - null_value=0; - double tmp= (double) count; - double tmp2=(sum_sqr - sum*sum/tmp)/tmp; - return tmp2 <= 0.0 ? 0.0 : tmp2; -} -String *Item_variance_field::val_str(String *str) -{ - // fix_fields() never calls for this Item - double nr=val(); - if (null_value) - return 0; - str->set(nr,decimals, &my_charset_bin); - return str; + return variance_fp_recurrence_result(recurrence_s, count, sample); } + /**************************************************************************** ** COUNT(DISTINCT ...) ****************************************************************************/ -#include "sql_select.h" - -int simple_raw_key_cmp(void* arg, byte* key1, byte* key2) -{ - return memcmp(key1, key2, *(uint*) arg); -} - int simple_str_key_cmp(void* arg, byte* key1, byte* key2) { - Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg; - CHARSET_INFO *cs=item->key_charset; - uint len=item->key_length; - return cs->coll->strnncollsp(cs, - (const uchar*) key1, len, - (const uchar*) key2, len); + Field *f= (Field*) arg; + return f->cmp((const char*)key1, (const char*)key2); } /* @@ -1114,13 +2365,13 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2) { Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg; Field **field = item->table->field; - Field **field_end= field + item->table->fields; + Field **field_end= field + item->table->s->fields; uint32 *lengths=item->field_lengths; for (; field < field_end; ++field) { Field* f = *field; int len = *lengths++; - int res = f->key_cmp(key1, key2); + int res = f->cmp((char *) key1, (char *) key2); if (res) return res; key1 += len; @@ -1129,54 +2380,43 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2) return 0; } -/* - helper function for walking the tree when we dump it to MyISAM - - tree_walk will call it for each leaf -*/ -int dump_leaf(byte* key, uint32 count __attribute__((unused)), - Item_sum_count_distinct* item) +C_MODE_START + +static int count_distinct_walk(void *elem, element_count count, void *arg) { - byte* buf = item->table->record[0]; - int error; - /* - The first item->rec_offset bytes are taken care of with - restore_record(table,default_values) in setup() - */ - memcpy(buf + item->rec_offset, key, item->tree->size_of_element); - if ((error = item->table->file->write_row(buf))) - { - if (error != HA_ERR_FOUND_DUPP_KEY && - error != HA_ERR_FOUND_DUPP_UNIQUE) - return 1; - } + (*((ulonglong*)arg))++; return 0; } +C_MODE_END + void Item_sum_count_distinct::cleanup() { DBUG_ENTER("Item_sum_count_distinct::cleanup"); Item_sum_int::cleanup(); - /* - Free table and tree if they belong to this item (if item have not pointer - to original item from which was made copy => it own its objects ) - */ + + /* Free objects only if we own them. */ if (!original) { + /* + We need to delete the table and the tree in cleanup() as + they were allocated in the runtime memroot. Using the runtime + memroot reduces memory footprint for PS/SP and simplifies setup(). + */ + delete tree; + tree= 0; + is_evaluated= FALSE; if (table) { - free_tmp_table(current_thd, table); + free_tmp_table(table->in_use, table); table= 0; } delete tmp_table_param; tmp_table_param= 0; - if (use_tree) - { - delete_tree(tree); - use_tree= 0; - } } + always_null= FALSE; DBUG_VOID_RETURN; } @@ -1187,9 +2427,17 @@ void Item_sum_count_distinct::make_unique() { table=0; original= 0; - use_tree= 0; // to prevent delete_tree call on uninitialized tree - tree= &tree_base; force_copy_fields= 1; + tree= 0; + is_evaluated= FALSE; + tmp_table_param= 0; + always_null= FALSE; +} + + +Item_sum_count_distinct::~Item_sum_count_distinct() +{ + cleanup(); } @@ -1197,160 +2445,115 @@ bool Item_sum_count_distinct::setup(THD *thd) { List<Item> list; SELECT_LEX *select_lex= thd->lex->current_select; - if (select_lex->linkage == GLOBAL_OPTIONS_TYPE) - return 1; - + + /* + Setup can be called twice for ROLLUP items. This is a bug. + Please add DBUG_ASSERT(tree == 0) here when it's fixed. + */ + if (tree || table || tmp_table_param) + return FALSE; + if (!(tmp_table_param= new TMP_TABLE_PARAM)) - return 1; + return TRUE; /* Create a table with an unique key over all parameters */ for (uint i=0; i < arg_count ; i++) { Item *item=args[i]; if (list.push_back(item)) - return 1; // End of memory + return TRUE; // End of memory if (item->const_item()) { - (void) item->val_int(); + item->update_null_value(); if (item->null_value) always_null=1; } } if (always_null) - return 0; + return FALSE; count_field_types(tmp_table_param,list,0); - if (table) - { - free_tmp_table(thd, table); - tmp_table_param->cleanup(); - } tmp_table_param->force_copy_fields= force_copy_fields; + DBUG_ASSERT(table == 0); if (!(table= create_tmp_table(thd, tmp_table_param, list, (ORDER*) 0, 1, 0, - select_lex->options | thd->options, + (select_lex->options | thd->options), HA_POS_ERROR, (char*)""))) - return 1; + return TRUE; table->file->extra(HA_EXTRA_NO_ROWS); // Don't update rows table->no_rows=1; - - // no blobs, otherwise it would be MyISAM - if (table->db_type == DB_TYPE_HEAP) + if (table->s->db_type == DB_TYPE_HEAP) { + /* + No blobs, otherwise it would have been MyISAM: set up a compare + function and its arguments to use with Unique. + */ qsort_cmp2 compare_key; void* cmp_arg; + Field **field= table->field; + Field **field_end= field + table->s->fields; + bool all_binary= TRUE; - // to make things easier for dump_leaf if we ever have to dump to MyISAM - restore_record(table,default_values); - - if (table->fields == 1) + for (tree_key_length= 0; field < field_end; ++field) { - /* - If we have only one field, which is the most common use of - count(distinct), it is much faster to use a simpler key - compare method that can take advantage of not having to worry - about other fields - */ - Field* field = table->field[0]; - switch(field->type()) + Field *f= *field; + enum enum_field_types f_type= f->type(); + tree_key_length+= f->pack_length(); + if ((f_type == MYSQL_TYPE_VARCHAR) || + !f->binary() && (f_type == MYSQL_TYPE_STRING || + f_type == MYSQL_TYPE_VAR_STRING)) { - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: - if (field->binary()) - { - compare_key = (qsort_cmp2)simple_raw_key_cmp; - cmp_arg = (void*) &key_length; - } - else - { - /* - If we have a string, we must take care of charsets and case - sensitivity - */ - compare_key = (qsort_cmp2)simple_str_key_cmp; - cmp_arg = (void*) this; - } - break; - default: - /* - Since at this point we cannot have blobs anything else can - be compared with memcmp - */ - compare_key = (qsort_cmp2)simple_raw_key_cmp; - cmp_arg = (void*) &key_length; - break; + all_binary= FALSE; + break; } - key_charset = field->charset(); - key_length = field->pack_length(); - rec_offset = 1; } - else // too bad, cannot cheat - there is more than one field + if (all_binary) { - bool all_binary = 1; - Field** field, **field_end; - field_end = (field = table->field) + table->fields; - uint32 *lengths; - if (!(field_lengths= - (uint32*) thd->alloc(sizeof(uint32) * table->fields))) - return 1; - - for (key_length = 0, lengths=field_lengths; field < field_end; ++field) - { - uint32 length= (*field)->pack_length(); - key_length += length; - *lengths++ = length; - if (!(*field)->binary()) - all_binary = 0; // Can't break loop here - } - rec_offset = table->reclength - key_length; - if (all_binary) + cmp_arg= (void*) &tree_key_length; + compare_key= (qsort_cmp2) simple_raw_key_cmp; + } + else + { + if (table->s->fields == 1) { - compare_key = (qsort_cmp2)simple_raw_key_cmp; - cmp_arg = (void*) &key_length; + /* + If we have only one field, which is the most common use of + count(distinct), it is much faster to use a simpler key + compare method that can take advantage of not having to worry + about other fields. + */ + compare_key= (qsort_cmp2) simple_str_key_cmp; + cmp_arg= (void*) table->field[0]; + /* tree_key_length has been set already */ } else { - compare_key = (qsort_cmp2) composite_key_cmp ; - cmp_arg = (void*) this; + uint32 *length; + compare_key= (qsort_cmp2) composite_key_cmp; + cmp_arg= (void*) this; + field_lengths= (uint32*) thd->alloc(table->s->fields * sizeof(uint32)); + for (tree_key_length= 0, length= field_lengths, field= table->field; + field < field_end; ++field, ++length) + { + *length= (*field)->pack_length(); + tree_key_length+= *length; + } } } - - if (use_tree) - delete_tree(tree); - init_tree(tree, min(thd->variables.max_heap_table_size, - thd->variables.sortbuff_size/16), 0, - key_length, compare_key, 0, NULL, cmp_arg); - use_tree = 1; - + DBUG_ASSERT(tree == 0); + tree= new Unique(compare_key, cmp_arg, tree_key_length, + thd->variables.max_heap_table_size); /* - The only time key_length could be 0 is if someone does + The only time tree_key_length could be 0 is if someone does count(distinct) on a char(0) field - stupid thing to do, but this has to be handled - otherwise someone can crash the server with a DoS attack */ - max_elements_in_tree = ((key_length) ? - thd->variables.max_heap_table_size/key_length : 1); - + is_evaluated= FALSE; + if (! tree) + return TRUE; } - if (original) - { - original->table= table; - original->use_tree= use_tree; - } - return 0; -} - - -int Item_sum_count_distinct::tree_to_myisam() -{ - if (create_myisam_from_heap(current_thd, table, tmp_table_param, - HA_ERR_RECORD_FILE_FULL, 1) || - tree_walk(tree, (tree_walk_action)&dump_leaf, (void*)this, - left_root_right)) - return 1; - delete_tree(tree); - use_tree = 0; - return 0; + return FALSE; } @@ -1362,8 +2565,12 @@ Item *Item_sum_count_distinct::copy_or_same(THD* thd) void Item_sum_count_distinct::clear() { - if (use_tree) - reset_tree(tree); + /* tree and table can be both null only if always_null */ + is_evaluated= FALSE; + if (tree) + { + tree->reset(); + } else if (table) { table->file->extra(HA_EXTRA_NO_CACHE); @@ -1384,32 +2591,22 @@ bool Item_sum_count_distinct::add() if ((*field)->is_real_null(0)) return 0; // Don't count NULL - if (use_tree) + is_evaluated= FALSE; + if (tree) { /* - If the tree got too big, convert to MyISAM, otherwise insert into the - tree. + The first few bytes of record (at least one) are just markers + for deleted and NULLs. We want to skip them since they will + bloat the tree without providing any valuable info. Besides, + key_length used to initialize the tree didn't include space for them. */ - if (tree->elements_in_tree > max_elements_in_tree) - { - if (tree_to_myisam()) - return 1; - } - else if (!tree_insert(tree, table->record[0] + rec_offset, 0, - tree->custom_arg)) - return 1; - } - else if ((error=table->file->write_row(table->record[0]))) - { - if (error != HA_ERR_FOUND_DUPP_KEY && - error != HA_ERR_FOUND_DUPP_UNIQUE) - { - if (create_myisam_from_heap(current_thd, table, tmp_table_param, error, - 1)) - return 1; // Not a table_is_full error - } + return tree->unique_add(table->record[0] + table->s->null_bytes); } - return 0; + if ((error= table->file->write_row(table->record[0])) && + error != HA_ERR_FOUND_DUPP_KEY && + error != HA_ERR_FOUND_DUPP_UNIQUE) + return TRUE; + return FALSE; } @@ -1419,8 +2616,18 @@ longlong Item_sum_count_distinct::val_int() DBUG_ASSERT(fixed == 1); if (!table) // Empty query return LL(0); - if (use_tree) - return tree->elements_in_tree; + if (tree) + { + if (is_evaluated) + return count; + + if (tree->elements == 0) + return (longlong) tree->elements_in_tree(); // everything fits in memory + count= 0; + tree->walk(count_distinct_walk, (void*) &count); + is_evaluated= TRUE; + return (longlong) count; + } error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); @@ -1433,13 +2640,6 @@ longlong Item_sum_count_distinct::val_int() } -void Item_sum_count_distinct::print(String *str) -{ - str->append("count(distinct ", 15); - args[0]->print(str); - str->append(')'); -} - /**************************************************************************** ** Functions to handle dynamic loadable aggregates ** Original source by: Alexis Mikhailov <root@medinf.chuvashia.su> @@ -1474,12 +2674,26 @@ void Item_udf_sum::cleanup() } +void Item_udf_sum::print(String *str) +{ + str->append(func_name()); + str->append('('); + for (uint i=0 ; i < arg_count ; i++) + { + if (i) + str->append(','); + args[i]->print(str); + } + str->append(')'); +} + + Item *Item_sum_udf_float::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_udf_float(thd, this); } -double Item_sum_udf_float::val() +double Item_sum_udf_float::val_real() { DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_sum_udf_float::val"); @@ -1488,14 +2702,51 @@ double Item_sum_udf_float::val() DBUG_RETURN(udf.val(&null_value)); } + String *Item_sum_udf_float::val_str(String *str) { + return val_string_from_real(str); +} + + +my_decimal *Item_sum_udf_float::val_decimal(my_decimal *dec) +{ + return val_decimal_from_real(dec); +} + + +String *Item_sum_udf_decimal::val_str(String *str) +{ + return val_string_from_decimal(str); +} + + +double Item_sum_udf_decimal::val_real() +{ + return val_real_from_decimal(); +} + + +longlong Item_sum_udf_decimal::val_int() +{ + return val_int_from_decimal(); +} + + +my_decimal *Item_sum_udf_decimal::val_decimal(my_decimal *dec_buf) +{ DBUG_ASSERT(fixed == 1); - double nr=val(); - if (null_value) - return 0; /* purecov: inspected */ - str->set(nr,decimals, &my_charset_bin); - return str; + DBUG_ENTER("Item_func_udf_decimal::val_decimal"); + DBUG_PRINT("info",("result_type: %d arg_count: %d", + args[0]->result_type(), arg_count)); + + DBUG_RETURN(udf.val_decimal(&null_value, dec_buf)); +} + + +Item *Item_sum_udf_decimal::copy_or_same(THD* thd) +{ + return new (thd->mem_root) Item_sum_udf_decimal(thd, this); } @@ -1504,7 +2755,6 @@ Item *Item_sum_udf_int::copy_or_same(THD* thd) return new (thd->mem_root) Item_sum_udf_int(thd, this); } - longlong Item_sum_udf_int::val_int() { DBUG_ASSERT(fixed == 1); @@ -1517,14 +2767,15 @@ longlong Item_sum_udf_int::val_int() String *Item_sum_udf_int::val_str(String *str) { - DBUG_ASSERT(fixed == 1); - longlong nr=val_int(); - if (null_value) - return 0; - str->set(nr, &my_charset_bin); - return str; + return val_string_from_int(str); +} + +my_decimal *Item_sum_udf_int::val_decimal(my_decimal *dec) +{ + return val_decimal_from_int(dec); } + /* Default max_length is max argument length */ void Item_sum_udf_str::fix_length_and_dec() @@ -1543,6 +2794,11 @@ Item *Item_sum_udf_str::copy_or_same(THD* thd) } +my_decimal *Item_sum_udf_str::val_decimal(my_decimal *dec) +{ + return val_decimal_from_string(dec); +} + String *Item_sum_udf_str::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -1559,7 +2815,7 @@ String *Item_sum_udf_str::val_str(String *str) GROUP_CONCAT function SQL SYNTAX: - GROUP_CONCAT([DISTINCT] expr,... [ORDER BY col [ASC|DESC],...] + GROUP_CONCAT([DISTINCT] expr,... [ORDER BY col [ASC|DESC],...] [SEPARATOR str_const]) concat of values from "group by" operation @@ -1579,6 +2835,7 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1, byte* key2) { Item_func_group_concat* grp_item= (Item_func_group_concat*)arg; + TABLE *table= grp_item->table; Item **field_item, **end; for (field_item= grp_item->args, end= field_item + grp_item->arg_count_field; @@ -1598,8 +2855,8 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1, if (field && !(*field_item)->const_item()) { int res; - uint offset= field->offset(); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + uint offset= field->offset() - table->s->null_bytes; + if ((res= field->cmp((char *) key1 + offset, (char *) key2 + offset))) return res; } } @@ -1616,6 +2873,7 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) { Item_func_group_concat* grp_item= (Item_func_group_concat*) arg; ORDER **order_item, **end; + TABLE *table= grp_item->table; for (order_item= grp_item->order, end=order_item+ grp_item->arg_count_order; order_item < end; @@ -1635,15 +2893,15 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) if (field && !item->const_item()) { int res; - uint offset= field->offset(); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + uint offset= field->offset() - table->s->null_bytes; + if ((res= field->cmp((char *) key1 + offset, (char *) key2 + offset))) return (*order_item)->asc ? res : -res; } } /* We can't return 0 because in that case the tree class would remove this item as double value. This would cause problems for case-changes and - if the the returned values are not the same we do the sort on. + if the returned values are not the same we do the sort on. */ return 1; } @@ -1672,53 +2930,53 @@ int group_concat_key_cmp_with_distinct_and_order(void* arg,byte* key1, Append data from current leaf to item->result */ -int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), +int dump_leaf_key(byte* key, element_count count __attribute__((unused)), Item_func_group_concat *item) { - char buff[MAX_FIELD_WIDTH]; - String tmp((char *)&buff,sizeof(buff),default_charset_info), tmp2; - uint old_length= item->result.length(); + TABLE *table= item->table; + String tmp((char *)table->record[1], table->s->reclength, + default_charset_info); + String tmp2; + String *result= &item->result; + Item **arg= item->args, **arg_end= item->args + item->arg_count_field; + uint old_length= result->length(); if (item->no_appended) item->no_appended= FALSE; else - item->result.append(*item->separator); + result->append(*item->separator); tmp.length(0); - - for (uint i= 0; i < item->arg_count_field; i++) + + for (; arg < arg_end; arg++) { - Item *show_item= item->args[i]; - if (!show_item->const_item()) + String *res; + if (! (*arg)->const_item()) { /* We have to use get_tmp_table_field() instead of real_item()->get_tmp_table_field() because we want the field in the temporary table, not the original field + We also can't use table->field array to access the fields + because it contains both order and arg list fields. */ - Field *field= show_item->get_tmp_table_field(); - String *res; - char *save_ptr= field->ptr; - DBUG_ASSERT(field->offset() < item->table->reclength); - field->ptr= (char *) key + field->offset(); - res= field->val_str(&tmp,&tmp2); - item->result.append(*res); - field->ptr= save_ptr; - } - else - { - String *res= show_item->val_str(&tmp); - if (res) - item->result.append(*res); + Field *field= (*arg)->get_tmp_table_field(); + uint offset= field->offset() - table->s->null_bytes; + DBUG_ASSERT(offset < table->s->reclength); + res= field->val_str(&tmp, (char *) key + offset); } + else + res= (*arg)->val_str(&tmp); + if (res) + result->append(*res); } - /* stop if length of result more than group_concat_max_len */ - if (item->result.length() > item->group_concat_max_len) + /* stop if length of result more than max_length */ + if (result->length() > item->max_length) { int well_formed_error; CHARSET_INFO *cs= item->collation.collation; - const char *ptr= item->result.ptr(); + const char *ptr= result->ptr(); uint add_length; /* It's ok to use item->result.length() as the fourth argument @@ -1727,10 +2985,10 @@ int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), */ add_length= cs->cset->well_formed_len(cs, ptr + old_length, - ptr + item->group_concat_max_len, - item->result.length(), + ptr + item->max_length, + result->length(), &well_formed_error); - item->result.length(old_length + add_length); + result->length(old_length + add_length); item->count_cut_values++; item->warning_for_row= TRUE; return 1; @@ -1741,37 +2999,32 @@ int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), /* Constructor of Item_func_group_concat - is_distinct - distinct - is_select - list of expression for show values - is_order - list of sort columns - is_separator - string value of separator + distinct_arg - distinct + select_list - list of expression for show values + order_list - list of sort columns + separator_arg - string value of separator */ -Item_func_group_concat::Item_func_group_concat(bool is_distinct, - List<Item> *is_select, - SQL_LIST *is_order, - String *is_separator) - :Item_sum(), tmp_table_param(0), max_elements_in_tree(0), warning(0), - key_length(0), tree_mode(0), distinct(is_distinct), warning_for_row(0), - force_copy_fields(0), - separator(is_separator), tree(&tree_base), table(0), - order(0), tables_list(0), - arg_count_order(0), arg_count_field(0), - count_cut_values(0) +Item_func_group_concat:: +Item_func_group_concat(Name_resolution_context *context_arg, + bool distinct_arg, List<Item> *select_list, + SQL_LIST *order_list, String *separator_arg) + :tmp_table_param(0), warning(0), + separator(separator_arg), tree(0), table(0), + order(0), context(context_arg), + arg_count_order(order_list ? order_list->elements : 0), + arg_count_field(select_list->elements), + count_cut_values(0), + distinct(distinct_arg), + warning_for_row(FALSE), + force_copy_fields(0), original(0) { Item *item_select; Item **arg_ptr; - original= 0; - quick_group= 0; - mark_as_sum_func(); - order= 0; - group_concat_max_len= current_thd->variables.group_concat_max_len; - - arg_count_field= is_select->elements; - arg_count_order= is_order ? is_order->elements : 0; + quick_group= FALSE; arg_count= arg_count_field + arg_count_order; - + /* We need to allocate: args - arg_count_field+arg_count_order @@ -1779,23 +3032,23 @@ Item_func_group_concat::Item_func_group_concat(bool is_distinct, order - arg_count_order */ if (!(args= (Item**) sql_alloc(sizeof(Item*) * arg_count + - sizeof(ORDER*)*arg_count_order))) + sizeof(ORDER*)*arg_count_order))) return; order= (ORDER**)(args + arg_count); /* fill args items of show and sort */ - List_iterator_fast<Item> li(*is_select); + List_iterator_fast<Item> li(*select_list); for (arg_ptr=args ; (item_select= li++) ; arg_ptr++) *arg_ptr= item_select; - if (arg_count_order) + if (arg_count_order) { ORDER **order_ptr= order; - for (ORDER *order_item= (ORDER*) is_order->first; - order_item != NULL; - order_item= order_item->next) + for (ORDER *order_item= (ORDER*) order_list->first; + order_item != NULL; + order_item= order_item->next) { (*order_ptr++)= order_item; *arg_ptr= *order_item->item; @@ -1803,29 +3056,25 @@ Item_func_group_concat::Item_func_group_concat(bool is_distinct, } } } - + Item_func_group_concat::Item_func_group_concat(THD *thd, - Item_func_group_concat *item) - :Item_sum(thd, item),item_thd(thd), + Item_func_group_concat *item) + :Item_sum(thd, item), tmp_table_param(item->tmp_table_param), - max_elements_in_tree(item->max_elements_in_tree), warning(item->warning), - key_length(item->key_length), - tree_mode(item->tree_mode), - distinct(item->distinct), - warning_for_row(item->warning_for_row), - force_copy_fields(item->force_copy_fields), separator(item->separator), tree(item->tree), table(item->table), order(item->order), - tables_list(item->tables_list), - group_concat_max_len(item->group_concat_max_len), + context(item->context), arg_count_order(item->arg_count_order), arg_count_field(item->arg_count_field), - field_list_offset(item->field_list_offset), count_cut_values(item->count_cut_values), + distinct(item->distinct), + warning_for_row(item->warning_for_row), + always_null(item->always_null), + force_copy_fields(item->force_copy_fields), original(item) { quick_group= item->quick_group; @@ -1835,8 +3084,6 @@ Item_func_group_concat::Item_func_group_concat(THD *thd, void Item_func_group_concat::cleanup() { - THD *thd= current_thd; - DBUG_ENTER("Item_func_group_concat::cleanup"); Item_sum::cleanup(); @@ -1845,7 +3092,7 @@ void Item_func_group_concat::cleanup() { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values); - warning->set_msg(thd, warn_buff); + warning->set_msg(current_thd, warn_buff); warning= 0; } @@ -1855,28 +3102,32 @@ void Item_func_group_concat::cleanup() */ if (!original) { + delete tmp_table_param; + tmp_table_param= 0; if (table) { + THD *thd= table->in_use; free_tmp_table(thd, table); table= 0; + if (tree) + { + delete_tree(tree); + tree= 0; + } + if (warning) + { + char warn_buff[MYSQL_ERRMSG_SIZE]; + sprintf(warn_buff, ER(ER_CUT_VALUE_GROUP_CONCAT), count_cut_values); + warning->set_msg(thd, warn_buff); + warning= 0; + } } - delete tmp_table_param; - tmp_table_param= 0; - if (tree_mode) - { - tree_mode= 0; - delete_tree(tree); - } + DBUG_ASSERT(tree == 0 && warning == 0); } DBUG_VOID_RETURN; } -Item_func_group_concat::~Item_func_group_concat() -{ -} - - Item *Item_func_group_concat::copy_or_same(THD* thd) { return new (thd->mem_root) Item_func_group_concat(thd, this); @@ -1890,8 +3141,9 @@ void Item_func_group_concat::clear() null_value= TRUE; warning_for_row= FALSE; no_appended= TRUE; - if (tree_mode) + if (tree) reset_tree(tree); + /* No need to reset the table as we never call write_row */ } @@ -1902,79 +3154,76 @@ bool Item_func_group_concat::add() copy_fields(tmp_table_param); copy_funcs(tmp_table_param->items_to_copy); - for (Item **arg= args, **arg_end= args + arg_count_field; - arg < arg_end; arg++) + for (uint i= 0; i < arg_count_field; i++) { - if (!(*arg)->const_item() && - (*arg)->get_tmp_table_field()->is_null_in_record( - (const uchar*) table->record[0])) - return 0; // Skip row if it contains null + Item *show_item= args[i]; + if (!show_item->const_item()) + { + Field *f= show_item->get_tmp_table_field(); + if (f->is_null_in_record((const uchar*) table->record[0])) + return 0; // Skip row if it contains null + } } null_value= FALSE; TREE_ELEMENT *el= 0; // Only for safety - if (tree_mode) - el= tree_insert(tree, table->record[0], 0, tree->custom_arg); + if (tree) + el= tree_insert(tree, table->record[0] + table->s->null_bytes, 0, + tree->custom_arg); /* If the row is not a duplicate (el->count == 1) we can dump the row here in case of GROUP_CONCAT(DISTINCT...) instead of doing tree traverse later. */ if (!warning_for_row && - (!tree_mode || (el->count == 1 && distinct && !arg_count_order))) - dump_leaf_key(table->record[0], 1, this); + (!tree || (el->count == 1 && distinct && !arg_count_order))) + dump_leaf_key(table->record[0] + table->s->null_bytes, 1, this); return 0; } -void Item_func_group_concat::reset_field() -{ - DBUG_ASSERT(0); -} - - bool -Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +Item_func_group_concat::fix_fields(THD *thd, Item **ref) { - uint i; /* for loop variable */ + uint i; /* for loop variable */ DBUG_ASSERT(fixed == 0); - if (!thd->allow_sum_func) - { - my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); - return 1; - } - - thd->allow_sum_func= 0; + if (init_sum_func_check(thd)) + return TRUE; + maybe_null= 1; - item_thd= thd; /* Fix fields for select list and ORDER clause */ - for (i=0 ; i < arg_count ; i++) + for (i=0 ; i < arg_count ; i++) { - if ((!args[i]->fixed && - args[i]->fix_fields(thd, tables, args + i)) || + if ((!args[i]->fixed && + args[i]->fix_fields(thd, args + i)) || args[i]->check_cols(1)) - return 1; + return TRUE; } if (agg_item_charsets(collation, func_name(), - args, arg_count, MY_COLL_ALLOW_CONV)) + args, + /* skip charset aggregation for order columns */ + arg_count - arg_count_order, + MY_COLL_ALLOW_CONV, 1)) return 1; result.set_charset(collation.collation); result_field= 0; null_value= 1; - max_length= group_concat_max_len; - thd->allow_sum_func= 1; - tables_list= tables; + max_length= thd->variables.group_concat_max_len; + + if (check_sum_func(thd, ref)) + return TRUE; + fixed= 1; - return 0; + return FALSE; } @@ -1982,90 +3231,82 @@ bool Item_func_group_concat::setup(THD *thd) { List<Item> list; SELECT_LEX *select_lex= thd->lex->current_select; - uint const_fields; - byte *record; qsort_cmp2 compare_key; DBUG_ENTER("Item_func_group_concat::setup"); - if (select_lex->linkage == GLOBAL_OPTIONS_TYPE) - DBUG_RETURN(1); + /* + Currently setup() can be called twice. Please add + assertion here when this is fixed. + */ + if (table || tree) + DBUG_RETURN(FALSE); if (!(tmp_table_param= new TMP_TABLE_PARAM)) - return 1; - /* We'll convert all blobs to varchar fields in the temporary table */ - tmp_table_param->convert_blob_length= group_concat_max_len; + DBUG_RETURN(TRUE); - /* - push all not constant fields to list and create temp table - */ - const_fields= 0; + /* We'll convert all blobs to varchar fields in the temporary table */ + tmp_table_param->convert_blob_length= max_length * + collation.collation->mbmaxlen; + /* Push all not constant fields to the list and create a temp table */ always_null= 0; for (uint i= 0; i < arg_count_field; i++) { Item *item= args[i]; if (list.push_back(item)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); if (item->const_item()) { - const_fields++; - (void) item->val_int(); - if (item->null_value) - always_null= 1; + if (item->is_null()) + { + always_null= 1; + DBUG_RETURN(FALSE); + } } } - if (always_null) - DBUG_RETURN(0); - + List<Item> all_fields(list); - if (arg_count_order) - { - bool hidden_group_fields; - setup_group(thd, args, tables_list, list, all_fields, *order, - &hidden_group_fields); - } - + /* + Try to find every ORDER expression in the list of GROUP_CONCAT + arguments. If an expression is not found, prepend it to + "all_fields". The resulting field list is used as input to create + tmp table columns. + */ + if (arg_count_order && + setup_order(thd, args, context->table_list, list, all_fields, *order)) + DBUG_RETURN(TRUE); + count_field_types(tmp_table_param,all_fields,0); - if (table) - { - /* - We come here when we are getting the result from a temporary table, - not the original tables used in the query - */ - free_tmp_table(thd, table); - tmp_table_param->cleanup(); - } tmp_table_param->force_copy_fields= force_copy_fields; + DBUG_ASSERT(table == 0); /* - We have to create a temporary table to get descriptions of fields + We have to create a temporary table to get descriptions of fields (types, sizes and so on). Note that in the table, we first have the ORDER BY fields, then the field list. - We need to set set_sum_field in true for storing value of blob in buffer - of a record instead of a pointer of one. + We need to set set_sum_field in true for storing value of blob in buffer + of a record instead of a pointer of one. */ - if (!(table=create_tmp_table(thd, tmp_table_param, all_fields, - (ORDER*) 0, 0, TRUE, - select_lex->options | thd->options, - HA_POS_ERROR,(char *) ""))) - DBUG_RETURN(1); + if (!(table= create_tmp_table(thd, tmp_table_param, all_fields, + (ORDER*) 0, 0, TRUE, + (select_lex->options | thd->options), + HA_POS_ERROR, (char*) ""))) + DBUG_RETURN(TRUE); table->file->extra(HA_EXTRA_NO_ROWS); table->no_rows= 1; - key_length= table->reclength; - record= table->record[0]; - /* Offset to first result field in table */ - field_list_offset= table->fields - (list.elements - const_fields); - - if (tree_mode) - delete_tree(tree); - - /* choose function of sort */ - tree_mode= distinct || arg_count_order; - if (tree_mode) + if (distinct || arg_count_order) { + /* + Need sorting: init tree and choose a function to sort. + Don't reserve space for NULLs: if any of gconcat arguments is NULL, + the row is not added to the result. + */ + uint tree_key_length= table->s->reclength - table->s->null_bytes; + + tree= &tree_base; if (arg_count_order) { if (distinct) @@ -2078,27 +3319,16 @@ bool Item_func_group_concat::setup(THD *thd) compare_key= (qsort_cmp2) group_concat_key_cmp_with_distinct; } /* - Create a tree of sort. Tree is used for a sort and a remove double - values (according with syntax of the function). If function doesn't - contain DISTINCT and ORDER BY clauses, we don't create this tree. + Create a tree for sorting. The tree is used to sort and to remove + duplicate values (according to the syntax of this function). If there + is no DISTINCT or ORDER BY clauses, we don't create this tree. */ - init_tree(tree, min(thd->variables.max_heap_table_size, - thd->variables.sortbuff_size/16), 0, - key_length, compare_key, 0, NULL, (void*) this); - max_elements_in_tree= (key_length ? - thd->variables.max_heap_table_size/key_length : 1); - }; - - /* - Copy table and tree_mode if they belong to this item (if item have not - pointer to original item from which was made copy => it own its objects) - */ - if (original) - { - original->table= table; - original->tree_mode= tree_mode; + init_tree(tree, (uint) min(thd->variables.max_heap_table_size, + thd->variables.sortbuff_size/16), 0, + tree_key_length, compare_key, 0, NULL, (void*) this); } - DBUG_RETURN(0); + + DBUG_RETURN(FALSE); } @@ -2106,11 +3336,11 @@ bool Item_func_group_concat::setup(THD *thd) void Item_func_group_concat::make_unique() { + tmp_table_param= 0; table=0; original= 0; - tree_mode= 0; // to prevent delete_tree call on uninitialized tree - tree= &tree_base; force_copy_fields= 1; + tree= 0; } @@ -2120,29 +3350,30 @@ String* Item_func_group_concat::val_str(String* str) if (null_value) return 0; if (count_cut_values && !warning) + { /* ER_CUT_VALUE_GROUP_CONCAT needs an argument, but this gets set in Item_func_group_concat::cleanup(). */ - warning= push_warning(item_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + DBUG_ASSERT(table); + warning= push_warning(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, ER_CUT_VALUE_GROUP_CONCAT, ER(ER_CUT_VALUE_GROUP_CONCAT)); + } if (result.length()) return &result; - if (tree_mode) - { + if (tree) tree_walk(tree, (tree_walk_action)&dump_leaf_key, (void*)this, left_root_right); - } return &result; } void Item_func_group_concat::print(String *str) { - str->append("group_concat(", 13); + str->append(STRING_WITH_LEN("group_concat(")); if (distinct) - str->append("distinct ", 9); + str->append(STRING_WITH_LEN("distinct ")); for (uint i= 0; i < arg_count_field; i++) { if (i) @@ -2151,15 +3382,19 @@ void Item_func_group_concat::print(String *str) } if (arg_count_order) { - str->append(" order by ", 10); + str->append(STRING_WITH_LEN(" order by ")); for (uint i= 0 ; i < arg_count_order ; i++) { if (i) - str->append(','); + str->append(','); (*order[i]->item)->print(str); + if (order[i]->asc) + str->append(STRING_WITH_LEN(" ASC")); + else + str->append(STRING_WITH_LEN(" DESC")); } } - str->append(" separator \'", 12); + str->append(STRING_WITH_LEN(" separator \'")); str->append(*separator); - str->append("\')", 2); + str->append(STRING_WITH_LEN("\')")); } diff --git a/sql/item_sum.h b/sql/item_sum.h index ea0863fc41c..66c73e1d416 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -23,32 +22,243 @@ #include <my_tree.h> +/* + Class Item_sum is the base class used for special expressions that SQL calls + 'set functions'. These expressions are formed with the help of aggregate + functions such as SUM, MAX, GROUP_CONCAT etc. + + GENERAL NOTES + + A set function cannot be used in certain positions where expressions are + accepted. There are some quite explicable restrictions for the usage of + set functions. + + In the query: + SELECT AVG(b) FROM t1 WHERE SUM(b) > 20 GROUP by a + the usage of the set function AVG(b) is legal, while the usage of SUM(b) + is illegal. A WHERE condition must contain expressions that can be + evaluated for each row of the table. Yet the expression SUM(b) can be + evaluated only for each group of rows with the same value of column a. + In the query: + SELECT AVG(b) FROM t1 WHERE c > 30 GROUP BY a HAVING SUM(b) > 20 + both set function expressions AVG(b) and SUM(b) are legal. + + We can say that in a query without nested selects an occurrence of a + set function in an expression of the SELECT list or/and in the HAVING + clause is legal, while in the WHERE clause it's illegal. + + The general rule to detect whether a set function is legal in a query with + nested subqueries is much more complicated. + + Consider the the following query: + SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a > ALL (SELECT t2.c FROM t2 WHERE SUM(t1.b) < t2.c). + The set function SUM(b) is used here in the WHERE clause of the subquery. + Nevertheless it is legal since it is under the HAVING clause of the query + to which this function relates. The expression SUM(t1.b) is evaluated + for each group defined in the main query, not for groups of the subquery. + + The problem of finding the query where to aggregate a particular + set function is not so simple as it seems to be. + + In the query: + SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a > ALL(SELECT t2.c FROM t2 GROUP BY t2.c + HAVING SUM(t1.a) < t2.c) + the set function can be evaluated for both outer and inner selects. + If we evaluate SUM(t1.a) for the outer query then we get the value of t1.a + multiplied by the cardinality of a group in table t1. In this case + in each correlated subquery SUM(t1.a) is used as a constant. But we also + can evaluate SUM(t1.a) for the inner query. In this case t1.a will be a + constant for each correlated subquery and summation is performed + for each group of table t2. + (Here it makes sense to remind that the query + SELECT c FROM t GROUP BY a HAVING SUM(1) < a + is quite legal in our SQL). + + So depending on what query we assign the set function to we + can get different result sets. + + The general rule to detect the query where a set function is to be + evaluated can be formulated as follows. + Consider a set function S(E) where E is an expression with occurrences + of column references C1, ..., CN. Resolve these column references against + subqueries that contain the set function S(E). Let Q be the innermost + subquery of those subqueries. (It should be noted here that S(E) + in no way can be evaluated in the subquery embedding the subquery Q, + otherwise S(E) would refer to at least one unbound column reference) + If S(E) is used in a construct of Q where set functions are allowed then + we evaluate S(E) in Q. + Otherwise we look for a innermost subquery containing S(E) of those where + usage of S(E) is allowed. + + Let's demonstrate how this rule is applied to the following queries. + + 1. SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a > ALL(SELECT t2.b FROM t2 GROUP BY t2.b + HAVING t2.b > ALL(SELECT t3.c FROM t3 GROUP BY t3.c + HAVING SUM(t1.a+t2.b) < t3.c)) + For this query the set function SUM(t1.a+t2.b) depends on t1.a and t2.b + with t1.a defined in the outermost query, and t2.b defined for its + subquery. The set function is in the HAVING clause of the subquery and can + be evaluated in this subquery. + + 2. SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a > ALL(SELECT t2.b FROM t2 + WHERE t2.b > ALL (SELECT t3.c FROM t3 GROUP BY t3.c + HAVING SUM(t1.a+t2.b) < t3.c)) + Here the set function SUM(t1.a+t2.b)is in the WHERE clause of the second + subquery - the most upper subquery where t1.a and t2.b are defined. + If we evaluate the function in this subquery we violate the context rules. + So we evaluate the function in the third subquery (over table t3) where it + is used under the HAVING clause. + + 3. SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a > ALL(SELECT t2.b FROM t2 + WHERE t2.b > ALL (SELECT t3.c FROM t3 + WHERE SUM(t1.a+t2.b) < t3.c)) + In this query evaluation of SUM(t1.a+t2.b) is not legal neither in the second + nor in the third subqueries. So this query is invalid. + + Mostly set functions cannot be nested. In the query + SELECT t1.a from t1 GROUP BY t1.a HAVING AVG(SUM(t1.b)) > 20 + the expression SUM(b) is not acceptable, though it is under a HAVING clause. + Yet it is acceptable in the query: + SELECT t.1 FROM t1 GROUP BY t1.a HAVING SUM(t1.b) > 20. + + An argument of a set function does not have to be a reference to a table + column as we saw it in examples above. This can be a more complex expression + SELECT t1.a FROM t1 GROUP BY t1.a HAVING SUM(t1.b+1) > 20. + The expression SUM(t1.b+1) has a very clear semantics in this context: + we sum up the values of t1.b+1 where t1.b varies for all values within a + group of rows that contain the same t1.a value. + + A set function for an outer query yields a constant within a subquery. So + the semantics of the query + SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a IN (SELECT t2.c FROM t2 GROUP BY t2.c + HAVING AVG(t2.c+SUM(t1.b)) > 20) + is still clear. For a group of the rows with the same t1.a values we + calculate the value of SUM(t1.b). This value 's' is substituted in the + the subquery: + SELECT t2.c FROM t2 GROUP BY t2.c HAVING AVG(t2.c+s) + than returns some result set. + + By the same reason the following query with a subquery + SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a IN (SELECT t2.c FROM t2 GROUP BY t2.c + HAVING AVG(SUM(t1.b)) > 20) + is also acceptable. + + IMPLEMENTATION NOTES + + Three methods were added to the class to check the constraints specified + in the previous section. These methods utilize several new members. + + The field 'nest_level' contains the number of the level for the subquery + containing the set function. The main SELECT is of level 0, its subqueries + are of levels 1, the subqueries of the latter are of level 2 and so on. + + The field 'aggr_level' is to contain the nest level of the subquery + where the set function is aggregated. + + The field 'max_arg_level' is for the maximun of the nest levels of the + unbound column references occurred in the set function. A column reference + is unbound within a set function if it is not bound by any subquery + used as a subexpression in this function. A column reference is bound by + a subquery if it is a reference to the column by which the aggregation + of some set function that is used in the subquery is calculated. + For the set function used in the query + SELECT t1.a FROM t1 GROUP BY t1.a + HAVING t1.a > ALL(SELECT t2.b FROM t2 GROUP BY t2.b + HAVING t2.b > ALL(SELECT t3.c FROM t3 GROUP BY t3.c + HAVING SUM(t1.a+t2.b) < t3.c)) + the value of max_arg_level is equal to 1 since t1.a is bound in the main + query, and t2.b is bound by the first subquery whose nest level is 1. + Obviously a set function cannot be aggregated in the subquery whose + nest level is less than max_arg_level. (Yet it can be aggregated in the + subqueries whose nest level is greater than max_arg_level.) + In the query + SELECT t.a FROM t1 HAVING AVG(t1.a+(SELECT MIN(t2.c) FROM t2)) + the value of the max_arg_level for the AVG set function is 0 since + the reference t2.c is bound in the subquery. + + The field 'max_sum_func_level' is to contain the maximum of the + nest levels of the set functions that are used as subexpressions of + the arguments of the given set function, but not aggregated in any + subquery within this set function. A nested set function s1 can be + used within set function s0 only if s1.max_sum_func_level < + s0.max_sum_func_level. Set function s1 is considered as nested + for set function s0 if s1 is not calculated in any subquery + within s0. + + A set function that is used as a subexpression in an argument of another + set function refers to the latter via the field 'in_sum_func'. + + The condition imposed on the usage of set functions are checked when + we traverse query subexpressions with the help of the recursive method + fix_fields. When we apply this method to an object of the class + Item_sum, first, on the descent, we call the method init_sum_func_check + that initialize members used at checking. Then, on the ascent, we + call the method check_sum_func that validates the set function usage + and reports an error if it is illegal. + The method register_sum_func serves to link the items for the set functions + that are aggregated in the embedding (sub)queries. Circular chains of such + functions are attached to the corresponding st_select_lex structures + through the field inner_sum_func_list. + + Exploiting the fact that the members mentioned above are used in one + recursive function we could have allocated them on the thread stack. + Yet we don't do it now. + + We assume that the nesting level of subquries does not exceed 127. + TODO: to catch queries where the limit is exceeded to make the + code clean here. + +*/ + +class st_select_lex; + class Item_sum :public Item_result_field { public: enum Sumfunctype - { COUNT_FUNC,COUNT_DISTINCT_FUNC,SUM_FUNC,AVG_FUNC,MIN_FUNC, - MAX_FUNC, UNIQUE_USERS_FUNC,STD_FUNC,VARIANCE_FUNC,SUM_BIT_FUNC, - UDF_SUM_FUNC, GROUP_CONCAT_FUNC + { COUNT_FUNC, COUNT_DISTINCT_FUNC, SUM_FUNC, SUM_DISTINCT_FUNC, AVG_FUNC, + AVG_DISTINCT_FUNC, MIN_FUNC, MAX_FUNC, UNIQUE_USERS_FUNC, STD_FUNC, + VARIANCE_FUNC, SUM_BIT_FUNC, UDF_SUM_FUNC, GROUP_CONCAT_FUNC }; Item **args, *tmp_args[2]; + Item **ref_by; /* pointer to a ref to the object used to register it */ + Item_sum *next; /* next in the circular chain of registered objects */ uint arg_count; + Item_sum *in_sum_func; /* embedding set function if any */ + st_select_lex * aggr_sel; /* select where the function is aggregated */ + int8 nest_level; /* number of the nesting level of the set function */ + int8 aggr_level; /* nesting level of the aggregating subquery */ + int8 max_arg_level; /* max level of unbound column references */ + int8 max_sum_func_level;/* max level of aggregation for embedded functions */ bool quick_group; /* If incremental update of fields */ +protected: + table_map used_tables_cache; + bool forced_const; + +public: + void mark_as_sum_func(); - Item_sum() :arg_count(0), quick_group(1) + Item_sum() :arg_count(0), quick_group(1), forced_const(FALSE) { mark_as_sum_func(); } - Item_sum(Item *a) - :args(tmp_args), arg_count(1), quick_group(1) + Item_sum(Item *a) :args(tmp_args), arg_count(1), quick_group(1), + forced_const(FALSE) { args[0]=a; mark_as_sum_func(); } - Item_sum( Item *a, Item *b ) - :args(tmp_args), arg_count(2), quick_group(1) + Item_sum( Item *a, Item *b ) :args(tmp_args), arg_count(2), quick_group(1), + forced_const(FALSE) { args[0]=a; args[1]=b; mark_as_sum_func(); @@ -87,6 +297,9 @@ public: a temporary table. Similar to reset(), but must also store value in result_field. Like reset() it is supposed to reset start value to default. + This set of methods (reult_field(), reset_field, update_field()) of + Item_sum is used only if quick_group is not null. Otherwise + copy_or_same() is used to obtain a copy of this item. */ virtual void reset_field()=0; /* @@ -97,13 +310,38 @@ public: virtual void update_field()=0; virtual bool keep_field_type(void) const { return 0; } virtual void fix_length_and_dec() { maybe_null=1; null_value=1; } - virtual const char *func_name() const { return "?"; } + /* + This method is used for debug purposes to print the name of an + item to the debug log. The second use of this method is as + a helper function of print(), where it is applicable. + To suit both goals it should return a meaningful, + distinguishable and sintactically correct string. This method + should not be used for runtime type identification, use enum + {Sum}Functype and Item_func::functype()/Item_sum::sum_func() + instead. + + NOTE: for Items inherited from Item_sum, func_name() return part of + function name till first argument (including '(') to make difference in + names for functions with 'distinct' clause and without 'distinct' and + also to make printing of items inherited from Item_sum uniform. + */ + virtual const char *func_name() const= 0; virtual Item *result_item(Field *field) - { return new Item_field(field);} - table_map used_tables() const { return ~(table_map) 0; } /* Not used */ - bool const_item() const { return 0; } + { return new Item_field(field); } + table_map used_tables() const { return used_tables_cache; } + void update_used_tables (); + void cleanup() + { + Item::cleanup(); + forced_const= FALSE; + } bool is_null() { return null_value; } - void update_used_tables() { } + void make_const () + { + used_tables_cache= 0; + forced_const= TRUE; + } + virtual bool const_item() const { return forced_const; } void make_field(Send_field *field); void print(String *str); void fix_num_length_and_dec(); @@ -121,23 +359,44 @@ public: virtual bool setup(THD *thd) {return 0;} virtual void make_unique() {} Item *get_tmp_table_item(THD *thd); - + virtual Field *create_tmp_field(bool group, TABLE *table, + uint convert_blob_length); bool walk (Item_processor processor, byte *argument); + bool init_sum_func_check(THD *thd); + bool check_sum_func(THD *thd, Item **ref); + bool register_sum_func(THD *thd, Item **ref); + st_select_lex *depended_from() + { return (nest_level == aggr_level ? 0 : aggr_sel); } }; class Item_sum_num :public Item_sum { +protected: + /* + val_xxx() functions may be called several times during the execution of a + query. Derived classes that require extensive calculation in val_xxx() + maintain cache of aggregate value. This variable governs the validity of + that cache. + */ + bool is_evaluated; public: - Item_sum_num() :Item_sum() {} - Item_sum_num(Item *item_par) :Item_sum(item_par) {} - Item_sum_num(Item *a, Item* b) :Item_sum(a,b) {} - Item_sum_num(List<Item> &list) :Item_sum(list) {} - Item_sum_num(THD *thd, Item_sum_num *item) :Item_sum(thd, item) {} - bool fix_fields(THD *, TABLE_LIST *, Item **); + Item_sum_num() :Item_sum(),is_evaluated(FALSE) {} + Item_sum_num(Item *item_par) + :Item_sum(item_par), is_evaluated(FALSE) {} + Item_sum_num(Item *a, Item* b) :Item_sum(a,b),is_evaluated(FALSE) {} + Item_sum_num(List<Item> &list) + :Item_sum(list), is_evaluated(FALSE) {} + Item_sum_num(THD *thd, Item_sum_num *item) + :Item_sum(thd, item),is_evaluated(item->is_evaluated) {} + bool fix_fields(THD *, Item **); longlong val_int() - { DBUG_ASSERT(fixed == 1); return (longlong) val(); } /* Real as default */ + { + DBUG_ASSERT(fixed == 1); + return (longlong) rint(val_real()); /* Real as default */ + } String *val_str(String*str); + my_decimal *val_decimal(my_decimal *); void reset_field(); }; @@ -148,8 +407,9 @@ public: Item_sum_int(Item *item_par) :Item_sum_num(item_par) {} Item_sum_int(List<Item> &list) :Item_sum_num(list) {} Item_sum_int(THD *thd, Item_sum_int *item) :Item_sum_num(thd, item) {} - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String*str); + my_decimal *val_decimal(my_decimal *); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() { decimals=0; max_length=21; maybe_null=null_value=0; } @@ -158,50 +418,140 @@ public: class Item_sum_sum :public Item_sum_num { +protected: + Item_result hybrid_type; double sum; - void fix_length_and_dec() { maybe_null=null_value=1; } + my_decimal dec_buffs[2]; + uint curr_dec_buff; + void fix_length_and_dec(); - public: - Item_sum_sum(Item *item_par) :Item_sum_num(item_par),sum(0.0) {} - Item_sum_sum(THD *thd, Item_sum_sum *item) - :Item_sum_num(thd, item), sum(item->sum) {} +public: + Item_sum_sum(Item *item_par) :Item_sum_num(item_par) {} + Item_sum_sum(THD *thd, Item_sum_sum *item); enum Sumfunctype sum_func () const {return SUM_FUNC;} void clear(); bool add(); - double val(); + double val_real(); + longlong val_int(); + String *val_str(String*str); + my_decimal *val_decimal(my_decimal *); + enum Item_result result_type () const { return hybrid_type; } void reset_field(); void update_field(); void no_rows_in_result() {} - const char *func_name() const { return "sum"; } + const char *func_name() const { return "sum("; } Item *copy_or_same(THD* thd); }; + +/* Common class for SUM(DISTINCT), AVG(DISTINCT) */ + +class Unique; + +class Item_sum_distinct :public Item_sum_num +{ +protected: + /* storage for the summation result */ + ulonglong count; + Hybrid_type val; + /* storage for unique elements */ + Unique *tree; + TABLE *table; + enum enum_field_types table_field_type; + uint tree_key_length; +protected: + Item_sum_distinct(THD *thd, Item_sum_distinct *item); +public: + Item_sum_distinct(Item *item_par); + ~Item_sum_distinct(); + + bool setup(THD *thd); + void clear(); + void cleanup(); + bool add(); + double val_real(); + my_decimal *val_decimal(my_decimal *); + longlong val_int(); + String *val_str(String *str); + + /* XXX: does it need make_unique? */ + + enum Sumfunctype sum_func () const { return SUM_DISTINCT_FUNC; } + void reset_field() {} // not used + void update_field() {} // not used + virtual void no_rows_in_result() {} + void fix_length_and_dec(); + enum Item_result result_type () const { return val.traits->type(); } + virtual void calculate_val_and_count(); + virtual bool unique_walk_function(void *elem); +}; + + +/* + Item_sum_sum_distinct - implementation of SUM(DISTINCT expr). + See also: MySQL manual, chapter 'Adding New Functions To MySQL' + and comments in item_sum.cc. +*/ + +class Item_sum_sum_distinct :public Item_sum_distinct +{ +private: + Item_sum_sum_distinct(THD *thd, Item_sum_sum_distinct *item) + :Item_sum_distinct(thd, item) {} +public: + Item_sum_sum_distinct(Item *item_arg) :Item_sum_distinct(item_arg) {} + + enum Sumfunctype sum_func () const { return SUM_DISTINCT_FUNC; } + const char *func_name() const { return "sum(distinct "; } + Item *copy_or_same(THD* thd) { return new Item_sum_sum_distinct(thd, this); } +}; + + +/* Item_sum_avg_distinct - SELECT AVG(DISTINCT expr) FROM ... */ + +class Item_sum_avg_distinct: public Item_sum_distinct +{ +private: + Item_sum_avg_distinct(THD *thd, Item_sum_avg_distinct *original) + :Item_sum_distinct(thd, original) {} +public: + uint prec_increment; + Item_sum_avg_distinct(Item *item_arg) : Item_sum_distinct(item_arg) {} + + void fix_length_and_dec(); + virtual void calculate_val_and_count(); + enum Sumfunctype sum_func () const { return AVG_DISTINCT_FUNC; } + const char *func_name() const { return "avg(distinct "; } + Item *copy_or_same(THD* thd) { return new Item_sum_avg_distinct(thd, this); } +}; + + class Item_sum_count :public Item_sum_int { longlong count; - table_map used_table_cache; public: Item_sum_count(Item *item_par) - :Item_sum_int(item_par),count(0),used_table_cache(~(table_map) 0) + :Item_sum_int(item_par),count(0) {} Item_sum_count(THD *thd, Item_sum_count *item) - :Item_sum_int(thd, item), count(item->count), - used_table_cache(item->used_table_cache) + :Item_sum_int(thd, item), count(item->count) {} - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } enum Sumfunctype sum_func () const { return COUNT_FUNC; } void clear(); void no_rows_in_result() { count=0; } bool add(); - void make_const(longlong count_arg) { count=count_arg; used_table_cache=0; } + void make_const(longlong count_arg) + { + count=count_arg; + Item_sum::make_const(); + } longlong val_int(); void reset_field(); void cleanup(); void update_field(); - const char *func_name() const { return "count"; } + const char *func_name() const { return "count("; } Item *copy_or_same(THD* thd); }; @@ -211,82 +561,64 @@ class TMP_TABLE_PARAM; class Item_sum_count_distinct :public Item_sum_int { TABLE *table; - table_map used_table_cache; uint32 *field_lengths; TMP_TABLE_PARAM *tmp_table_param; - TREE tree_base; - TREE *tree; bool force_copy_fields; /* - Following is 0 normal object and pointer to original one for copy - (to correctly free resources) + If there are no blobs, we can use a tree, which + is faster than heap table. In that case, we still use the table + to help get things set up, but we insert nothing in it */ - Item_sum_count_distinct *original; - - uint key_length; - CHARSET_INFO *key_charset; - + Unique *tree; /* - Calculated based on max_heap_table_size. If reached, - walk the tree and dump it into MyISAM table + Storage for the value of count between calls to val_int() so val_int() + will not recalculate on each call. Validitiy of the value is stored in + is_evaluated. */ - uint max_elements_in_tree; - + longlong count; /* - The first few bytes of record ( at least one) - are just markers for deleted and NULLs. We want to skip them since - they will just bloat the tree without providing any valuable info + Following is 0 normal object and pointer to original one for copy + (to correctly free resources) */ - int rec_offset; + Item_sum_count_distinct *original; + uint tree_key_length; + - /* - If there are no blobs, we can use a tree, which - is faster than heap table. In that case, we still use the table - to help get things set up, but we insert nothing in it - */ - bool use_tree; bool always_null; // Set to 1 if the result is always NULL - int tree_to_myisam(); friend int composite_key_cmp(void* arg, byte* key1, byte* key2); friend int simple_str_key_cmp(void* arg, byte* key1, byte* key2); - friend int simple_raw_key_cmp(void* arg, byte* key1, byte* key2); - friend int dump_leaf(byte* key, uint32 count __attribute__((unused)), - Item_sum_count_distinct* item); - public: +public: Item_sum_count_distinct(List<Item> &list) - :Item_sum_int(list), table(0), used_table_cache(~(table_map) 0), - tmp_table_param(0), tree(&tree_base), force_copy_fields(0), original(0), - use_tree(0), always_null(0) + :Item_sum_int(list), table(0), field_lengths(0), tmp_table_param(0), + force_copy_fields(0), tree(0), count(0), + original(0), always_null(FALSE) { quick_group= 0; } Item_sum_count_distinct(THD *thd, Item_sum_count_distinct *item) :Item_sum_int(thd, item), table(item->table), - used_table_cache(item->used_table_cache), field_lengths(item->field_lengths), tmp_table_param(item->tmp_table_param), - tree(item->tree), force_copy_fields(item->force_copy_fields), - original(item), key_length(item->key_length), - max_elements_in_tree(item->max_elements_in_tree), - rec_offset(item->rec_offset), use_tree(item->use_tree), + force_copy_fields(0), tree(item->tree), count(item->count), + original(item), tree_key_length(item->tree_key_length), always_null(item->always_null) {} + ~Item_sum_count_distinct(); + void cleanup(); - table_map used_tables() const { return used_table_cache; } enum Sumfunctype sum_func () const { return COUNT_DISTINCT_FUNC; } void clear(); bool add(); longlong val_int(); void reset_field() { return ;} // Never called void update_field() { return ; } // Never called - const char *func_name() const { return "count_distinct"; } + const char *func_name() const { return "count(distinct "; } bool setup(THD *thd); void make_unique(); Item *copy_or_same(THD* thd); void no_rows_in_result() {} - void print(String *str); }; @@ -298,47 +630,59 @@ class Item_avg_field :public Item_result_field { public: Field *field; - Item_avg_field(Item_sum_avg *item); + Item_result hybrid_type; + uint f_precision, f_scale, dec_bin_size; + uint prec_increment; + Item_avg_field(Item_result res_type, Item_sum_avg *item); enum Type type() const { return FIELD_AVG_ITEM; } - double val(); - longlong val_int() { /* can't be fix_fields()ed */ return (longlong) val(); } - bool is_null() { (void) val_int(); return null_value; } + double val_real(); + longlong val_int(); + my_decimal *val_decimal(my_decimal *); + bool is_null() { update_null_value(); return null_value; } String *val_str(String*); - enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } + enum_field_types field_type() const + { + return hybrid_type == DECIMAL_RESULT ? + MYSQL_TYPE_NEWDECIMAL : MYSQL_TYPE_DOUBLE; + } void fix_length_and_dec() {} + enum Item_result result_type () const { return hybrid_type; } }; -class Item_sum_avg :public Item_sum_num +class Item_sum_avg :public Item_sum_sum { - void fix_length_and_dec() - { - decimals=min(decimals+4, NOT_FIXED_DEC); - maybe_null=1; - } - - double sum; +public: ulonglong count; + uint prec_increment; + uint f_precision, f_scale, dec_bin_size; - public: - Item_sum_avg(Item *item_par) :Item_sum_num(item_par), sum(0.0), count(0) {} + Item_sum_avg(Item *item_par) :Item_sum_sum(item_par), count(0) {} Item_sum_avg(THD *thd, Item_sum_avg *item) - :Item_sum_num(thd, item), sum(item->sum), count(item->count) {} + :Item_sum_sum(thd, item), count(item->count), + prec_increment(item->prec_increment) {} + + void fix_length_and_dec(); enum Sumfunctype sum_func () const {return AVG_FUNC;} void clear(); bool add(); - double val(); + double val_real(); + // In SPs we might force the "wrong" type with select into a declare variable + longlong val_int() { return (longlong) rint(val_real()); } + my_decimal *val_decimal(my_decimal *); + String *val_str(String *str); void reset_field(); void update_field(); Item *result_item(Field *field) - { return new Item_avg_field(this); } + { return new Item_avg_field(hybrid_type, this); } void no_rows_in_result() {} - const char *func_name() const { return "avg"; } + const char *func_name() const { return "avg("; } Item *copy_or_same(THD* thd); + Field *create_tmp_field(bool group, TABLE *table, uint convert_blob_length); void cleanup() { - clear(); - Item_sum_num::cleanup(); + count= 0; + Item_sum_sum::cleanup(); } }; @@ -348,14 +692,29 @@ class Item_variance_field :public Item_result_field { public: Field *field; + Item_result hybrid_type; + uint f_precision0, f_scale0; + uint f_precision1, f_scale1; + uint dec_bin_size0, dec_bin_size1; + uint sample; + uint prec_increment; Item_variance_field(Item_sum_variance *item); enum Type type() const {return FIELD_VARIANCE_ITEM; } - double val(); - longlong val_int() { /* can't be fix_fields()ed */ return (longlong) val(); } - String *val_str(String*); - bool is_null() { (void) val_int(); return null_value; } - enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } + double val_real(); + longlong val_int() + { /* can't be fix_fields()ed */ return (longlong) rint(val_real()); } + String *val_str(String *str) + { return val_string_from_real(str); } + my_decimal *val_decimal(my_decimal *dec_buf) + { return val_decimal_from_real(dec_buf); } + bool is_null() { update_null_value(); return null_value; } + enum_field_types field_type() const + { + return hybrid_type == DECIMAL_RESULT ? + MYSQL_TYPE_NEWDECIMAL : MYSQL_TYPE_DOUBLE; + } void fix_length_and_dec() {} + enum Item_result result_type () const { return hybrid_type; } }; @@ -369,37 +728,53 @@ public: = (sum(ai^2) - 2*sum(a)*sum(a)/count(a) + count(a)*sum(a)^2/count(a)^2 )/count(a) = = (sum(ai^2) - 2*sum(a)^2/count(a) + sum(a)^2/count(a) )/count(a) = = (sum(ai^2) - sum(a)^2/count(a))/count(a) + +But, this falls prey to catastrophic cancellation. Instead, use the recurrence formulas + + M_{1} = x_{1}, ~ M_{k} = M_{k-1} + (x_{k} - M_{k-1}) / k newline + S_{1} = 0, ~ S_{k} = S_{k-1} + (x_{k} - M_{k-1}) times (x_{k} - M_{k}) newline + for 2 <= k <= n newline + ital variance = S_{n} / (n-1) + */ class Item_sum_variance : public Item_sum_num { - double sum, sum_sqr; - ulonglong count; - void fix_length_and_dec() - { - decimals=min(decimals+4, NOT_FIXED_DEC); - maybe_null=1; - } + void fix_length_and_dec(); - public: - Item_sum_variance(Item *item_par) :Item_sum_num(item_par),count(0) {} - Item_sum_variance(THD *thd, Item_sum_variance *item): - Item_sum_num(thd, item), sum(item->sum), sum_sqr(item->sum_sqr), - count(item->count) {} +public: + Item_result hybrid_type; + int cur_dec; + double recurrence_m, recurrence_s; /* Used in recurrence relation. */ + ulonglong count; + uint f_precision0, f_scale0; + uint f_precision1, f_scale1; + uint dec_bin_size0, dec_bin_size1; + uint sample; + uint prec_increment; + + Item_sum_variance(Item *item_par, uint sample_arg) :Item_sum_num(item_par), + hybrid_type(REAL_RESULT), count(0), sample(sample_arg) + {} + Item_sum_variance(THD *thd, Item_sum_variance *item); enum Sumfunctype sum_func () const { return VARIANCE_FUNC; } void clear(); bool add(); - double val(); + double val_real(); + my_decimal *val_decimal(my_decimal *); void reset_field(); void update_field(); Item *result_item(Field *field) { return new Item_variance_field(this); } void no_rows_in_result() {} - const char *func_name() const { return "variance"; } + const char *func_name() const + { return sample ? "var_samp(" : "variance("; } Item *copy_or_same(THD* thd); + Field *create_tmp_field(bool group, TABLE *table, uint convert_blob_length); + enum Item_result result_type () const { return REAL_RESULT; } void cleanup() { - clear(); + count= 0; Item_sum_num::cleanup(); } }; @@ -411,7 +786,10 @@ class Item_std_field :public Item_variance_field public: Item_std_field(Item_sum_std *item); enum Type type() const { return FIELD_STD_ITEM; } - double val(); + double val_real(); + my_decimal *val_decimal(my_decimal *); + enum Item_result result_type () const { return REAL_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE;} }; /* @@ -421,55 +799,49 @@ public: class Item_sum_std :public Item_sum_variance { public: - Item_sum_std(Item *item_par) :Item_sum_variance(item_par) {} + Item_sum_std(Item *item_par, uint sample_arg) + :Item_sum_variance(item_par, sample_arg) {} Item_sum_std(THD *thd, Item_sum_std *item) :Item_sum_variance(thd, item) {} enum Sumfunctype sum_func () const { return STD_FUNC; } - double val(); + double val_real(); Item *result_item(Field *field) { return new Item_std_field(this); } - const char *func_name() const { return "std"; } + const char *func_name() const { return "std("; } Item *copy_or_same(THD* thd); + enum Item_result result_type () const { return REAL_RESULT; } + enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE;} }; // This class is a string or number function depending on num_func class Item_sum_hybrid :public Item_sum { - protected: +protected: String value,tmp_value; double sum; longlong sum_int; + my_decimal sum_dec; Item_result hybrid_type; enum_field_types hybrid_field_type; int cmp_sign; - table_map used_table_cache; bool was_values; // Set if we have found at least one row (for max/min only) public: Item_sum_hybrid(Item *item_par,int sign) :Item_sum(item_par), sum(0.0), sum_int(0), hybrid_type(INT_RESULT), hybrid_field_type(FIELD_TYPE_LONGLONG), - cmp_sign(sign), used_table_cache(~(table_map) 0), was_values(TRUE) + cmp_sign(sign), was_values(TRUE) { collation.set(&my_charset_bin); } - Item_sum_hybrid(THD *thd, Item_sum_hybrid *item): - Item_sum(thd, item), value(item->value), - sum(item->sum), sum_int(item->sum_int), hybrid_type(item->hybrid_type), - hybrid_field_type(item->hybrid_field_type),cmp_sign(item->cmp_sign), - used_table_cache(item->used_table_cache), - was_values(TRUE) - { collation.set(item->collation); } - bool fix_fields(THD *, TABLE_LIST *, Item **); - table_map used_tables() const { return used_table_cache; } - bool const_item() const { return !used_table_cache; } - + Item_sum_hybrid(THD *thd, Item_sum_hybrid *item); + bool fix_fields(THD *, Item **); void clear(); - double val(); + double val_real(); longlong val_int(); + my_decimal *val_decimal(my_decimal *); void reset_field(); String *val_str(String *); - void make_const() { used_table_cache=0; } bool keep_field_type(void) const { return 1; } enum Item_result result_type () const { return hybrid_type; } enum enum_field_types field_type() const { return hybrid_field_type; } @@ -477,9 +849,12 @@ class Item_sum_hybrid :public Item_sum void min_max_update_str_field(); void min_max_update_real_field(); void min_max_update_int_field(); + void min_max_update_decimal_field(); void cleanup(); bool any_value() { return was_values; } void no_rows_in_result(); + Field *create_tmp_field(bool group, TABLE *table, + uint convert_blob_length); }; @@ -491,7 +866,7 @@ public: enum Sumfunctype sum_func () const {return MIN_FUNC;} bool add(); - const char *func_name() const { return "min"; } + const char *func_name() const { return "min("; } Item *copy_or_same(THD* thd); }; @@ -504,7 +879,7 @@ public: enum Sumfunctype sum_func () const {return MAX_FUNC;} bool add(); - const char *func_name() const { return "max"; } + const char *func_name() const { return "max("; } Item *copy_or_same(THD* thd); }; @@ -525,10 +900,10 @@ public: void reset_field(); void update_field(); void fix_length_and_dec() - { decimals=0; max_length=21; unsigned_flag=1; maybe_null=null_value=0; } + { decimals= 0; max_length=21; unsigned_flag= 1; maybe_null= null_value= 0; } void cleanup() { - clear(); + bits= reset_bits; Item_sum_int::cleanup(); } }; @@ -540,7 +915,7 @@ public: Item_sum_or(Item *item_par) :Item_sum_bit(item_par,LL(0)) {} Item_sum_or(THD *thd, Item_sum_or *item) :Item_sum_bit(thd, item) {} bool add(); - const char *func_name() const { return "bit_or"; } + const char *func_name() const { return "bit_or("; } Item *copy_or_same(THD* thd); }; @@ -551,7 +926,7 @@ class Item_sum_and :public Item_sum_bit Item_sum_and(Item *item_par) :Item_sum_bit(item_par, ULONGLONG_MAX) {} Item_sum_and(THD *thd, Item_sum_and *item) :Item_sum_bit(thd, item) {} bool add(); - const char *func_name() const { return "bit_and"; } + const char *func_name() const { return "bit_and("; } Item *copy_or_same(THD* thd); }; @@ -561,7 +936,7 @@ class Item_sum_xor :public Item_sum_bit Item_sum_xor(Item *item_par) :Item_sum_bit(item_par,LL(0)) {} Item_sum_xor(THD *thd, Item_sum_xor *item) :Item_sum_bit(thd, item) {} bool add(); - const char *func_name() const { return "bit_xor"; } + const char *func_name() const { return "bit_xor("; } Item *copy_or_same(THD* thd); }; @@ -578,18 +953,21 @@ protected: udf_handler udf; public: - Item_udf_sum(udf_func *udf_arg) :Item_sum(), udf(udf_arg) { quick_group=0;} - Item_udf_sum( udf_func *udf_arg, List<Item> &list ) - :Item_sum( list ), udf(udf_arg) + Item_udf_sum(udf_func *udf_arg) + :Item_sum(), udf(udf_arg) + { quick_group=0; } + Item_udf_sum(udf_func *udf_arg, List<Item> &list) + :Item_sum(list), udf(udf_arg) { quick_group=0;} Item_udf_sum(THD *thd, Item_udf_sum *item) - :Item_sum(thd, item), udf(item->udf) { udf.not_original= TRUE; } + :Item_sum(thd, item), udf(item->udf) + { udf.not_original= TRUE; } const char *func_name() const { return udf.name(); } - bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) + bool fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); fixed= 1; - return udf.fix_fields(thd,tables,this,this->arg_count,this->args); + return udf.fix_fields(thd, this, this->arg_count, this->args); } enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } virtual bool have_field_update(void) const { return 0; } @@ -599,21 +977,27 @@ public: void reset_field() {}; void update_field() {}; void cleanup(); + void print(String *str); }; class Item_sum_udf_float :public Item_udf_sum { public: - Item_sum_udf_float(udf_func *udf_arg) :Item_udf_sum(udf_arg) {} + Item_sum_udf_float(udf_func *udf_arg) + :Item_udf_sum(udf_arg) {} Item_sum_udf_float(udf_func *udf_arg, List<Item> &list) - :Item_udf_sum(udf_arg,list) {} + :Item_udf_sum(udf_arg, list) {} Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) :Item_udf_sum(thd, item) {} longlong val_int() - { DBUG_ASSERT(fixed == 1); return (longlong) Item_sum_udf_float::val(); } - double val(); + { + DBUG_ASSERT(fixed == 1); + return (longlong) rint(Item_sum_udf_float::val_real()); + } + double val_real(); String *val_str(String*str); + my_decimal *val_decimal(my_decimal *); void fix_length_and_dec() { fix_num_length_and_dec(); } Item *copy_or_same(THD* thd); }; @@ -622,15 +1006,17 @@ class Item_sum_udf_float :public Item_udf_sum class Item_sum_udf_int :public Item_udf_sum { public: - Item_sum_udf_int(udf_func *udf_arg) :Item_udf_sum(udf_arg) {} + Item_sum_udf_int(udf_func *udf_arg) + :Item_udf_sum(udf_arg) {} Item_sum_udf_int(udf_func *udf_arg, List<Item> &list) - :Item_udf_sum(udf_arg,list) {} + :Item_udf_sum(udf_arg, list) {} Item_sum_udf_int(THD *thd, Item_sum_udf_int *item) :Item_udf_sum(thd, item) {} longlong val_int(); - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return (double) Item_sum_udf_int::val_int(); } String *val_str(String*str); + my_decimal *val_decimal(my_decimal *); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() { decimals=0; max_length=21; } Item *copy_or_same(THD* thd); @@ -640,43 +1026,72 @@ public: class Item_sum_udf_str :public Item_udf_sum { public: - Item_sum_udf_str(udf_func *udf_arg) :Item_udf_sum(udf_arg) {} + Item_sum_udf_str(udf_func *udf_arg) + :Item_udf_sum(udf_arg) {} Item_sum_udf_str(udf_func *udf_arg, List<Item> &list) :Item_udf_sum(udf_arg,list) {} Item_sum_udf_str(THD *thd, Item_sum_udf_str *item) :Item_udf_sum(thd, item) {} String *val_str(String *); - double val() + double val_real() { - int err; + int err_not_used; char *end_not_used; String *res; res=val_str(&str_value); return res ? my_strntod(res->charset(),(char*) res->ptr(),res->length(), - &end_not_used, &err) : 0.0; + &end_not_used, &err_not_used) : 0.0; } longlong val_int() { - int err; - String *res; res=val_str(&str_value); - return res ? my_strntoll(res->charset(),res->ptr(),res->length(),10, (char**) 0, &err) : (longlong) 0; + int err_not_used; + char *end; + String *res; + CHARSET_INFO *cs; + + if (!(res= val_str(&str_value))) + return 0; /* Null value */ + cs= res->charset(); + end= (char*) res->ptr()+res->length(); + return cs->cset->strtoll10(cs, res->ptr(), &end, &err_not_used); } + my_decimal *val_decimal(my_decimal *dec); enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec(); Item *copy_or_same(THD* thd); }; + +class Item_sum_udf_decimal :public Item_udf_sum +{ +public: + Item_sum_udf_decimal(udf_func *udf_arg) + :Item_udf_sum(udf_arg) {} + Item_sum_udf_decimal(udf_func *udf_arg, List<Item> &list) + :Item_udf_sum(udf_arg, list) {} + Item_sum_udf_decimal(THD *thd, Item_sum_udf_decimal *item) + :Item_udf_sum(thd, item) {} + String *val_str(String *); + double val_real(); + longlong val_int(); + my_decimal *val_decimal(my_decimal *); + enum Item_result result_type () const { return DECIMAL_RESULT; } + void fix_length_and_dec() { fix_num_length_and_dec(); } + Item *copy_or_same(THD* thd); +}; + #else /* Dummy functions to get sql_yacc.cc compiled */ class Item_sum_udf_float :public Item_sum_num { public: - Item_sum_udf_float(udf_func *udf_arg) :Item_sum_num() {} + Item_sum_udf_float(udf_func *udf_arg) + :Item_sum_num() {} Item_sum_udf_float(udf_func *udf_arg, List<Item> &list) :Item_sum_num() {} Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) :Item_sum_num(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } void clear() {} bool add() { return 0; } void update_field() {} @@ -686,29 +1101,50 @@ class Item_sum_udf_float :public Item_sum_num class Item_sum_udf_int :public Item_sum_num { public: - Item_sum_udf_int(udf_func *udf_arg) :Item_sum_num() {} + Item_sum_udf_int(udf_func *udf_arg) + :Item_sum_num() {} Item_sum_udf_int(udf_func *udf_arg, List<Item> &list) :Item_sum_num() {} Item_sum_udf_int(THD *thd, Item_sum_udf_int *item) :Item_sum_num(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } longlong val_int() { DBUG_ASSERT(fixed == 1); return 0; } - double val() { DBUG_ASSERT(fixed == 1); return 0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0; } void clear() {} bool add() { return 0; } void update_field() {} }; +class Item_sum_udf_decimal :public Item_sum_num +{ + public: + Item_sum_udf_decimal(udf_func *udf_arg) + :Item_sum_num() {} + Item_sum_udf_decimal(udf_func *udf_arg, List<Item> &list) + :Item_sum_num() {} + Item_sum_udf_decimal(THD *thd, Item_sum_udf_float *item) + :Item_sum_num(thd, item) {} + enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } + my_decimal *val_decimal(my_decimal *) { DBUG_ASSERT(fixed == 1); return 0; } + void clear() {} + bool add() { return 0; } + void update_field() {} +}; + + class Item_sum_udf_str :public Item_sum_num { public: - Item_sum_udf_str(udf_func *udf_arg) :Item_sum_num() {} - Item_sum_udf_str(udf_func *udf_arg, List<Item> &list) :Item_sum_num() {} + Item_sum_udf_str(udf_func *udf_arg) + :Item_sum_num() {} + Item_sum_udf_str(udf_func *udf_arg, List<Item> &list) + :Item_sum_num() {} Item_sum_udf_str(THD *thd, Item_sum_udf_str *item) :Item_sum_num(thd, item) {} String *val_str(String *) { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - double val() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec() { maybe_null=1; max_length=0; } @@ -724,16 +1160,28 @@ class MYSQL_ERROR; class Item_func_group_concat : public Item_sum { - THD *item_thd; TMP_TABLE_PARAM *tmp_table_param; - uint max_elements_in_tree; MYSQL_ERROR *warning; - uint key_length; - bool tree_mode; + String result; + String *separator; + TREE tree_base; + TREE *tree; + TABLE *table; + ORDER **order; + Name_resolution_context *context; + uint arg_count_order; // total count of ORDER BY items + uint arg_count_field; // count of arguments + uint count_cut_values; bool distinct; bool warning_for_row; bool always_null; bool force_copy_fields; + bool no_appended; + /* + Following is 0 normal object and pointer to original one for copy + (to correctly free resources) + */ + Item_func_group_concat *original; friend int group_concat_key_cmp_with_distinct(void* arg, byte* key1, byte* key2); @@ -742,34 +1190,17 @@ class Item_func_group_concat : public Item_sum friend int group_concat_key_cmp_with_distinct_and_order(void* arg, byte* key1, byte* key2); - friend int dump_leaf_key(byte* key, uint32 count __attribute__((unused)), + friend int dump_leaf_key(byte* key, + element_count count __attribute__((unused)), Item_func_group_concat *group_concat_item); - public: - String result; - String *separator; - TREE tree_base; - TREE *tree; - TABLE *table; - ORDER **order; - TABLE_LIST *tables_list; - ulong group_concat_max_len; - uint arg_count_order; - uint arg_count_field; - uint field_list_offset; - uint count_cut_values; - bool no_appended; - /* - Following is 0 normal object and pointer to original one for copy - (to correctly free resources) - */ - Item_func_group_concat *original; - - Item_func_group_concat(bool is_distinct,List<Item> *is_select, - SQL_LIST *is_order,String *is_separator); - +public: + Item_func_group_concat(Name_resolution_context *context_arg, + bool is_distinct, List<Item> *is_select, + SQL_LIST *is_order, String *is_separator); + Item_func_group_concat(THD *thd, Item_func_group_concat *item); - ~Item_func_group_concat(); + ~Item_func_group_concat() {} void cleanup(); enum Sumfunctype sum_func () const {return GROUP_CONCAT_FUNC;} @@ -777,19 +1208,19 @@ class Item_func_group_concat : public Item_sum virtual Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { - if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB) + if (max_length/collation.collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB ) return FIELD_TYPE_BLOB; else - return MYSQL_TYPE_VAR_STRING; + return MYSQL_TYPE_VARCHAR; } void clear(); bool add(); - void reset_field(); - bool fix_fields(THD *, TABLE_LIST *, Item **); + void reset_field() { DBUG_ASSERT(0); } // not used + void update_field() { DBUG_ASSERT(0); } // not used + bool fix_fields(THD *,Item **); bool setup(THD *thd); void make_unique(); - virtual void update_field() {} - double val() + double val_real() { String *res; res=val_str(&str_value); return res ? my_atof(res->c_ptr()) : 0.0; @@ -804,8 +1235,14 @@ class Item_func_group_concat : public Item_sum end_ptr= (char*) res->ptr()+ res->length(); return my_strtoll10(res->ptr(), &end_ptr, &error); } + my_decimal *val_decimal(my_decimal *decimal_value) + { + return val_decimal_from_string(decimal_value); + } String* val_str(String* str); Item *copy_or_same(THD* thd); void no_rows_in_result() {} void print(String *str); + virtual bool change_context_processor(byte *cntx) + { context= (Name_resolution_context *)cntx; return FALSE; } }; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 4bd3d68b9c1..683cd8803d6 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -110,7 +109,6 @@ static bool make_datetime_with_warn(date_time_format_types format, TIME *ltime, String *str) { int warning= 0; - bool rc; if (make_datetime(format, ltime, str)) return 1; @@ -120,7 +118,7 @@ static bool make_datetime_with_warn(date_time_format_types format, TIME *ltime, return 0; make_truncated_value_warning(current_thd, str->ptr(), str->length(), - MYSQL_TIMESTAMP_TIME); + MYSQL_TIMESTAMP_TIME, NullS); return make_datetime(format, ltime, str); } @@ -146,7 +144,7 @@ static bool make_time_with_warn(const DATE_TIME_FORMAT *format, if (warning) { make_truncated_value_warning(current_thd, str->ptr(), str->length(), - MYSQL_TIMESTAMP_TIME); + MYSQL_TIMESTAMP_TIME, NullS); make_time(format, l_time, str); } @@ -207,7 +205,8 @@ overflow: char buf[22]; int len= (int)(longlong10_to_str(seconds, buf, unsigned_flag ? 10 : -10) - buf); - make_truncated_value_warning(current_thd, buf, len, MYSQL_TIMESTAMP_TIME); + make_truncated_value_warning(current_thd, buf, len, MYSQL_TIMESTAMP_TIME, + NullS); return 1; } @@ -260,7 +259,8 @@ static DATE_TIME_FORMAT time_24hrs_format= {{0}, '\0', 0, static bool extract_date_time(DATE_TIME_FORMAT *format, const char *val, uint length, TIME *l_time, timestamp_type cached_timestamp_type, - const char **sub_pattern_end) + const char **sub_pattern_end, + const char *date_time_type) { int weekday= 0, yearday= 0, daypart= 0; int week_number= -1; @@ -288,7 +288,6 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, for (; ptr != end && val != val_end; ptr++) { - /* Skip pre-space between each argument */ while (val != val_end && my_isspace(cs, *val)) val++; @@ -298,6 +297,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, int val_len; char *tmp; + error= 0; + val_len= (uint) (val_end - val); switch (*++ptr) { /* Year */ @@ -341,7 +342,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, tmp= (char*) val + min(2, val_len); l_time->day= (int) my_strtoll10(val, &tmp, &error); /* Skip 'st, 'nd, 'th .. */ - val= tmp + min((int) (end-tmp), 2); + val= tmp + min((int) (val_end-tmp), 2); break; /* Hour */ @@ -450,16 +451,22 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, /* Time in AM/PM notation */ case 'r': - error= extract_date_time(&time_ampm_format, val, - (uint)(val_end - val), l_time, - cached_timestamp_type, &val); + /* + We can't just set error here, as we don't want to generate two + warnings in case of errors + */ + if (extract_date_time(&time_ampm_format, val, + (uint)(val_end - val), l_time, + cached_timestamp_type, &val, "time")) + DBUG_RETURN(1); break; /* Time in 24-hour notation */ case 'T': - error= extract_date_time(&time_24hrs_format, val, - (uint)(val_end - val), l_time, - cached_timestamp_type, &val); + if (extract_date_time(&time_24hrs_format, val, + (uint)(val_end - val), l_time, + cached_timestamp_type, &val, "time")) + DBUG_RETURN(1); break; /* Conversion specifiers that match classes of characters */ @@ -570,7 +577,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (!my_isspace(&my_charset_latin1,*val)) { make_truncated_value_warning(current_thd, val_begin, length, - cached_timestamp_type); + cached_timestamp_type, NullS); break; } } while (++val != val_end); @@ -578,6 +585,13 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, DBUG_RETURN(0); err: + { + char buff[128]; + strmake(buff, val_begin, min(length, sizeof(buff)-1)); + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE), + date_time_type, buff, "str_to_time"); + } DBUG_RETURN(1); } @@ -590,7 +604,6 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, timestamp_type type, String *str) { char intbuff[15]; - uint days_i; uint hours_i; uint weekday; ulong length; @@ -601,7 +614,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, str->length(0); if (l_time->neg) - str->append("-", 1); + str->append('-'); end= (ptr= format->format.str) + format->format.length; for (; ptr != end ; ptr++) @@ -649,21 +662,21 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, length= int10_to_str(l_time->day, intbuff, 10) - intbuff; str->append_with_prefill(intbuff, length, 1, '0'); if (l_time->day >= 10 && l_time->day <= 19) - str->append("th", 2); + str->append(STRING_WITH_LEN("th")); else { switch (l_time->day %10) { case 1: - str->append("st",2); + str->append(STRING_WITH_LEN("st")); break; case 2: - str->append("nd",2); + str->append(STRING_WITH_LEN("nd")); break; case 3: - str->append("rd",2); + str->append(STRING_WITH_LEN("rd")); break; default: - str->append("th",2); + str->append(STRING_WITH_LEN("th")); break; } } @@ -702,8 +715,7 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, break; case 'h': case 'I': - days_i= l_time->hour/24; - hours_i= (l_time->hour%24 + 11)%12+1 + 24*days_i; + hours_i= (l_time->hour%24 + 11)%12+1; length= int10_to_str(hours_i, intbuff, 10) - intbuff; str->append_with_prefill(intbuff, length, 2, '0'); break; @@ -724,7 +736,6 @@ bool make_date_time(DATE_TIME_FORMAT *format, TIME *l_time, str->append_with_prefill(intbuff, length, 1, '0'); break; case 'l': - days_i= l_time->hour/24; hours_i= (l_time->hour%24 + 11)%12+1; length= int10_to_str(hours_i, intbuff, 10) - intbuff; str->append_with_prefill(intbuff, length, 1, '0'); @@ -986,7 +997,7 @@ longlong Item_func_to_days::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day); } @@ -995,7 +1006,7 @@ longlong Item_func_dayofyear::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime,TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day) - calc_daynr(ltime.year,1,1) + 1; @@ -1005,7 +1016,7 @@ longlong Item_func_dayofmonth::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.day; } @@ -1013,7 +1024,7 @@ longlong Item_func_month::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.month; } @@ -1043,7 +1054,8 @@ longlong Item_func_quarter::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + if (get_arg0_date(<ime, TIME_FUZZY_DATE)) + return 0; return (longlong) ((ltime.month+2)/3); } @@ -1115,7 +1127,7 @@ longlong Item_func_week::val_int() DBUG_ASSERT(fixed == 1); uint year; TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_week(<ime, week_mode((uint) args[1]->val_int()), @@ -1128,7 +1140,7 @@ longlong Item_func_yearweek::val_int() DBUG_ASSERT(fixed == 1); uint year,week; TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; week= calc_week(<ime, (week_mode((uint) args[1]->val_int()) | WEEK_YEAR), @@ -1137,16 +1149,17 @@ longlong Item_func_yearweek::val_int() } -/* weekday() has a automatic to_days() on item */ - longlong Item_func_weekday::val_int() { DBUG_ASSERT(fixed == 1); - ulong tmp_value=(ulong) args[0]->val_int(); - if ((null_value=(args[0]->null_value || !tmp_value))) - return 0; /* purecov: inspected */ + TIME ltime; + + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) + return 0; - return (longlong) calc_weekday(tmp_value,odbc_type)+test(odbc_type); + return (longlong) calc_weekday(calc_daynr(ltime.year, ltime.month, + ltime.day), + odbc_type) + test(odbc_type); } @@ -1154,14 +1167,14 @@ String* Item_func_dayname::val_str(String* str) { DBUG_ASSERT(fixed == 1); uint weekday=(uint) val_int(); // Always Item_func_daynr() - const char *name; + const char *day_name; THD *thd= current_thd; if (null_value) return (String*) 0; - name= thd->variables.lc_time_names->day_names->type_names[weekday]; - str->set(name, strlen(name), system_charset_info); + day_name= thd->variables.lc_time_names->day_names->type_names[weekday]; + str->set(day_name, strlen(day_name), system_charset_info); return str; } @@ -1170,7 +1183,7 @@ longlong Item_func_year::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.year; } @@ -1178,7 +1191,7 @@ longlong Item_func_year::val_int() longlong Item_func_unix_timestamp::val_int() { TIME ltime; - bool not_used; + my_bool not_used; DBUG_ASSERT(fixed == 1); if (arg_count == 0) @@ -1269,9 +1282,15 @@ static bool get_interval_value(Item *args,interval_type int_type, case INTERVAL_YEAR: interval->year= (ulong) value; break; + case INTERVAL_QUARTER: + interval->month= (ulong)(value*3); + break; case INTERVAL_MONTH: interval->month= (ulong) value; break; + case INTERVAL_WEEK: + interval->day= (ulong)(value*7); + break; case INTERVAL_DAY: interval->day= (ulong) value; break; @@ -1384,17 +1403,6 @@ String *Item_date::val_str(String *str) } -int Item_date::save_in_field(Field *field, bool no_conversions) -{ - TIME ltime; - if (get_date(<ime, TIME_FUZZY_DATE)) - return set_field_to_null(field); - field->set_notnull(); - field->store_time(<ime, MYSQL_TIMESTAMP_DATE); - return 0; -} - - longlong Item_date::val_int() { DBUG_ASSERT(fixed == 1); @@ -1578,9 +1586,9 @@ void Item_func_now_utc::store_now_in_TIME(TIME *now_time) bool Item_func_now::get_date(TIME *res, - uint fuzzy_date __attribute__((unused))) + uint fuzzy_date __attribute__((unused))) { - *res=ltime; + *res= ltime; return 0; } @@ -1593,10 +1601,75 @@ int Item_func_now::save_in_field(Field *to, bool no_conversions) } +/* + Converts current time in my_time_t to TIME represenatation for local + time zone. Defines time zone (local) used for whole SYSDATE function. +*/ +void Item_func_sysdate_local::store_now_in_TIME(TIME *now_time) +{ + THD *thd= current_thd; + thd->variables.time_zone->gmt_sec_to_TIME(now_time, (my_time_t) time(NULL)); + thd->time_zone_used= 1; +} + + +String *Item_func_sysdate_local::val_str(String *str) +{ + DBUG_ASSERT(fixed == 1); + store_now_in_TIME(<ime); + buff_length= (uint) my_datetime_to_str(<ime, buff); + str_value.set(buff, buff_length, &my_charset_bin); + return &str_value; +} + + +longlong Item_func_sysdate_local::val_int() +{ + DBUG_ASSERT(fixed == 1); + store_now_in_TIME(<ime); + return (longlong) TIME_to_ulonglong_datetime(<ime); +} + + +double Item_func_sysdate_local::val_real() +{ + DBUG_ASSERT(fixed == 1); + store_now_in_TIME(<ime); + return (double) TIME_to_ulonglong_datetime(<ime); +} + + +void Item_func_sysdate_local::fix_length_and_dec() +{ + decimals= 0; + collation.set(&my_charset_bin); + max_length= MAX_DATETIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; +} + + +bool Item_func_sysdate_local::get_date(TIME *res, + uint fuzzy_date __attribute__((unused))) +{ + store_now_in_TIME(<ime); + *res= ltime; + return 0; +} + + +int Item_func_sysdate_local::save_in_field(Field *to, bool no_conversions) +{ + store_now_in_TIME(<ime); + to->set_notnull(); + to->store_time(<ime, MYSQL_TIMESTAMP_DATETIME); + return 0; +} + + String *Item_func_sec_to_time::val_str(String *str) { DBUG_ASSERT(fixed == 1); TIME ltime; + longlong arg_val= args[0]->val_int(); if ((null_value=args[0]->null_value) || str->alloc(19)) { @@ -1604,7 +1677,7 @@ String *Item_func_sec_to_time::val_str(String *str) return (String*) 0; } - sec_to_time(args[0]->val_int(), args[0]->unsigned_flag, <ime); + sec_to_time(arg_val, args[0]->unsigned_flag, <ime); make_time((DATE_TIME_FORMAT *) 0, <ime, str); return str; @@ -1615,11 +1688,12 @@ longlong Item_func_sec_to_time::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; + longlong arg_val= args[0]->val_int(); if ((null_value=args[0]->null_value)) return 0; - sec_to_time(args[0]->val_int(), args[0]->unsigned_flag, <ime); + sec_to_time(arg_val, args[0]->unsigned_flag, <ime); return (ltime.neg ? -1 : 1) * ((ltime.hour)*10000 + ltime.minute*100 + ltime.second); @@ -1629,35 +1703,56 @@ longlong Item_func_sec_to_time::val_int() void Item_func_date_format::fix_length_and_dec() { THD* thd= current_thd; + /* + Must use this_item() in case it's a local SP variable + (for ->max_length and ->str_value) + */ + Item *arg1= args[1]->this_item(); + decimals=0; collation.set(thd->variables.collation_connection); - if (args[1]->type() == STRING_ITEM) + if (arg1->type() == STRING_ITEM) { // Optimize the normal case fixed_length=1; - - /* - Force case sensitive collation on format string. - This needed because format modifiers with different case, - for example %m and %M, have different meaning. Thus eq() - will distinguish them. - */ - args[1]->collation.set( - get_charset_by_csname(args[1]->collation.collation->csname, - MY_CS_BINSORT,MYF(0)), DERIVATION_COERCIBLE); - max_length= format_length(((Item_string*) args[1])->const_string()) * + max_length= format_length(&arg1->str_value) * collation.collation->mbmaxlen; } else { fixed_length=0; - max_length= min(args[1]->max_length,MAX_BLOB_WIDTH) * 10 * - collation.collation->mbmaxlen; + max_length=min(arg1->max_length, MAX_BLOB_WIDTH) * 10 * + collation.collation->mbmaxlen; set_if_smaller(max_length,MAX_BLOB_WIDTH); } maybe_null=1; // If wrong date } +bool Item_func_date_format::eq(const Item *item, bool binary_cmp) const +{ + Item_func_date_format *item_func; + + if (item->type() != FUNC_ITEM) + return 0; + if (func_name() != ((Item_func*) item)->func_name()) + return 0; + if (this == item) + return 1; + item_func= (Item_func_date_format*) item; + if (!args[0]->eq(item_func->args[0], binary_cmp)) + return 0; + /* + We must compare format string case sensitive. + This needed because format modifiers with different case, + for example %m and %M, have different meaning. + */ + if (!args[1]->eq(item_func->args[1], 1)) + return 0; + return 1; +} + + + uint Item_func_date_format::format_length(const String *format) { uint size=0; @@ -1740,7 +1835,7 @@ String *Item_func_date_format::val_str(String *str) if (!is_time_format) { - if (get_arg0_date(&l_time,1)) + if (get_arg0_date(&l_time, TIME_FUZZY_DATE)) return 0; } else @@ -1854,15 +1949,15 @@ void Item_func_convert_tz::fix_length_and_dec() bool -Item_func_convert_tz::fix_fields(THD *thd_arg, TABLE_LIST *tables_arg, Item **ref) +Item_func_convert_tz::fix_fields(THD *thd_arg, Item **ref) { String str; - if (Item_date_func::fix_fields(thd_arg, tables_arg, ref)) - return 1; + if (Item_date_func::fix_fields(thd_arg, ref)) + return TRUE; tz_tables= thd_arg->lex->time_zone_tables_used; - return 0; + return FALSE; } @@ -1896,10 +1991,9 @@ longlong Item_func_convert_tz::val_int() bool Item_func_convert_tz::get_date(TIME *ltime, - uint fuzzy_date __attribute__((unused))) + uint fuzzy_date __attribute__((unused))) { my_time_t my_time_tmp; - bool not_used; String str; if (!from_tz_cached) @@ -1914,17 +2008,20 @@ bool Item_func_convert_tz::get_date(TIME *ltime, to_tz_cached= args[2]->const_item(); } - if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, 0)) + if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, TIME_NO_ZERO_DATE)) { null_value= 1; return 1; } - my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used); - /* my_time_tmp is guranteed to be in the allowed range */ - if (my_time_tmp) - to_tz->gmt_sec_to_TIME(ltime, my_time_tmp); - + { + my_bool not_used; + my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, ¬_used); + /* my_time_tmp is guranteed to be in the allowed range */ + if (my_time_tmp) + to_tz->gmt_sec_to_TIME(ltime, my_time_tmp); + } + null_value= 0; return 0; } @@ -1980,7 +2077,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) INTERVAL interval; ltime->neg= 0; - if (args[0]->get_date(ltime,0) || + if (args[0]->get_date(ltime, TIME_NO_ZERO_DATE) || get_interval_value(args[1],int_type,&value,&interval)) goto null_date; sign= (interval.neg ? -1 : 1); @@ -2034,33 +2131,35 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) daynr= calc_daynr(ltime->year,ltime->month,1) + days; /* Day number from year 0 to 9999-12-31 */ if ((ulonglong) daynr > MAX_DAY_NUMBER) - goto null_date; + goto invalid_date; get_date_from_daynr((long) daynr, <ime->year, <ime->month, <ime->day); break; } case INTERVAL_DAY: + case INTERVAL_WEEK: period= (calc_daynr(ltime->year,ltime->month,ltime->day) + sign * (long) interval.day); /* Daynumber from year 0 to 9999-12-31 */ if ((ulong) period > MAX_DAY_NUMBER) - goto null_date; + goto invalid_date; get_date_from_daynr((long) period,<ime->year,<ime->month,<ime->day); break; case INTERVAL_YEAR: ltime->year+= sign * (long) interval.year; if ((ulong) ltime->year >= 10000L) - goto null_date; + goto invalid_date; if (ltime->month == 2 && ltime->day == 29 && calc_days_in_year(ltime->year) != 366) ltime->day=28; // Was leap-year break; case INTERVAL_YEAR_MONTH: + case INTERVAL_QUARTER: case INTERVAL_MONTH: period= (ltime->year*12 + sign * (long) interval.year*12 + ltime->month-1 + sign * (long) interval.month); if ((ulong) period >= 120000L) - goto null_date; + goto invalid_date; ltime->year= (uint) (period / 12); ltime->month= (uint) (period % 12L)+1; /* Adjust day if the new month doesn't have enough days */ @@ -2076,6 +2175,11 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) } return 0; // Ok +invalid_date: + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_DATETIME_FUNCTION_OVERFLOW, + ER(ER_DATETIME_FUNCTION_OVERFLOW), + "datetime"); null_date: return (null_value=1); } @@ -2087,7 +2191,7 @@ String *Item_date_add_interval::val_str(String *str) TIME ltime; enum date_time_format_types format; - if (Item_date_add_interval::get_date(<ime,0)) + if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) return 0; if (ltime.time_type == MYSQL_TIMESTAMP_DATE) @@ -2110,21 +2214,38 @@ longlong Item_date_add_interval::val_int() DBUG_ASSERT(fixed == 1); TIME ltime; longlong date; - if (Item_date_add_interval::get_date(<ime,0)) + if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) return (longlong) 0; date = (ltime.year*100L + ltime.month)*100L + ltime.day; return ltime.time_type == MYSQL_TIMESTAMP_DATE ? date : ((date*100L + ltime.hour)*100L+ ltime.minute)*100L + ltime.second; } + + +bool Item_date_add_interval::eq(const Item *item, bool binary_cmp) const +{ + Item_date_add_interval *other= (Item_date_add_interval*) item; + if (!Item_func::eq(item, binary_cmp)) + return 0; + return ((int_type == other->int_type) && + (date_sub_interval == other->date_sub_interval)); +} + +/* + 'interval_names' reflects the order of the enumeration interval_type. + See item_timefunc.h + */ + static const char *interval_names[]= { - "year", "month", "day", "hour", "minute", - "second", "microsecond", "year_month", - "day_hour", "day_minute", "day_second", - "hour_minute", "hour_second", "minute_second", - "day_microsecond", "hour_microsecond", - "minute_microsecond", "second_microsecond" + "year", "quarter", "month", "week", "day", + "hour", "minute", "second", "microsecond", + "year_month", "day_hour", "day_minute", + "day_second", "hour_minute", "hour_second", + "minute_second", "day_microsecond", + "hour_microsecond", "minute_microsecond", + "second_microsecond" }; void Item_date_add_interval::print(String *str) @@ -2140,9 +2261,9 @@ void Item_date_add_interval::print(String *str) void Item_extract::print(String *str) { - str->append("extract(", 8); + str->append(STRING_WITH_LEN("extract(")); str->append(interval_names[int_type]); - str->append(" from ", 6); + str->append(STRING_WITH_LEN(" from ")); args[0]->print(str); str->append(')'); } @@ -2155,7 +2276,9 @@ void Item_extract::fix_length_and_dec() switch (int_type) { case INTERVAL_YEAR: max_length=4; date_value=1; break; case INTERVAL_YEAR_MONTH: max_length=6; date_value=1; break; + case INTERVAL_QUARTER: max_length=2; date_value=1; break; case INTERVAL_MONTH: max_length=2; date_value=1; break; + case INTERVAL_WEEK: max_length=2; date_value=1; break; case INTERVAL_DAY: max_length=2; date_value=1; break; case INTERVAL_DAY_HOUR: max_length=9; date_value=0; break; case INTERVAL_DAY_MINUTE: max_length=11; date_value=0; break; @@ -2179,10 +2302,12 @@ longlong Item_extract::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; + uint year; + ulong week_format; long neg; if (date_value) { - if (get_arg0_date(<ime,1)) + if (get_arg0_date(<ime, TIME_FUZZY_DATE)) return 0; neg=1; } @@ -2200,7 +2325,13 @@ longlong Item_extract::val_int() switch (int_type) { case INTERVAL_YEAR: return ltime.year; case INTERVAL_YEAR_MONTH: return ltime.year*100L+ltime.month; + case INTERVAL_QUARTER: return (ltime.month+2)/3; case INTERVAL_MONTH: return ltime.month; + case INTERVAL_WEEK: + { + week_format= current_thd->variables.default_week_format; + return calc_week(<ime, week_mode(week_format), &year); + } case INTERVAL_DAY: return ltime.day; case INTERVAL_DAY_HOUR: return (long) (ltime.day*100L+ltime.hour)*neg; case INTERVAL_DAY_MINUTE: return (long) (ltime.day*10000L+ @@ -2241,7 +2372,7 @@ bool Item_extract::eq(const Item *item, bool binary_cmp) const if (this == item) return 1; if (item->type() != FUNC_ITEM || - func_name() != ((Item_func*)item)->func_name()) + functype() != ((Item_func*)item)->functype()) return 0; Item_extract* ie= (Item_extract*)item; @@ -2259,7 +2390,7 @@ bool Item_char_typecast::eq(const Item *item, bool binary_cmp) const if (this == item) return 1; if (item->type() != FUNC_ITEM || - func_name() != ((Item_func*)item)->func_name()) + functype() != ((Item_func*)item)->functype()) return 0; Item_char_typecast *cast= (Item_char_typecast*)item; @@ -2274,9 +2405,9 @@ bool Item_char_typecast::eq(const Item *item, bool binary_cmp) const void Item_typecast::print(String *str) { - str->append("cast(", 5); + str->append(STRING_WITH_LEN("cast(")); args[0]->print(str); - str->append(" as ", 4); + str->append(STRING_WITH_LEN(" as ")); str->append(cast_type()); str->append(')'); } @@ -2284,9 +2415,9 @@ void Item_typecast::print(String *str) void Item_char_typecast::print(String *str) { - str->append("cast(", 5); + str->append(STRING_WITH_LEN("cast(")); args[0]->print(str); - str->append(" as char", 8); + str->append(STRING_WITH_LEN(" as char")); if (cast_length >= 0) { str->append('('); @@ -2299,8 +2430,8 @@ void Item_char_typecast::print(String *str) } if (cast_cs) { - str->append(" charset ", 9); - str->append(cast_cs->name); + str->append(STRING_WITH_LEN(" charset ")); + str->append(cast_cs->csname); } str->append(')'); } @@ -2336,24 +2467,48 @@ String *Item_char_typecast::val_str(String *str) res->set_charset(cast_cs); /* - Cut the tail if cast with length - and the result is longer than cast length, e.g. - CAST('string' AS CHAR(1)) + Cut the tail if cast with length + and the result is longer than cast length, e.g. + CAST('string' AS CHAR(1)) */ - if (cast_length >= 0 && - (res->length() > (length= (uint32) res->charpos(cast_length)))) - { // Safe even if const arg - if (!res->alloced_length()) - { // Don't change const str - str_value= *res; // Not malloced string - res= &str_value; + if (cast_length >= 0) + { + if (res->length() > (length= (uint32) res->charpos(cast_length))) + { // Safe even if const arg + char char_type[40]; + my_snprintf(char_type, sizeof(char_type), "%s(%lu)", + cast_cs == &my_charset_bin ? "BINARY" : "CHAR", + (ulong) length); + + if (!res->alloced_length()) + { // Don't change const str + str_value= *res; // Not malloced string + res= &str_value; + } + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), char_type, + res->c_ptr_safe()); + res->length((uint) length); + } + else if (cast_cs == &my_charset_bin && res->length() < (uint) cast_length) + { + if (res->alloced_length() < (uint) cast_length) + { + str->alloc(cast_length); + str->copy(*res); + res= str; + } + bzero((char*) res->ptr() + res->length(), + (uint) cast_length - res->length()); + res->length(cast_length); } - res->length((uint) length); } null_value= 0; return res; } + void Item_char_typecast::fix_length_and_dec() { uint32 char_length; @@ -2380,6 +2535,7 @@ void Item_char_typecast::fix_length_and_dec() the argument's charset. */ from_cs= (args[0]->result_type() == INT_RESULT || + args[0]->result_type() == DECIMAL_RESULT || args[0]->result_type() == REAL_RESULT) ? (cast_cs->mbminlen == 1 ? cast_cs : &my_charset_latin1) : args[0]->collation.collation; @@ -2398,7 +2554,7 @@ String *Item_datetime_typecast::val_str(String *str) { DBUG_ASSERT(fixed == 1); TIME ltime; - if (!get_arg0_date(<ime,1) && + if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !make_datetime(ltime.second_part ? DATE_TIME_MICROSECOND : DATE_TIME, <ime, str)) return str; @@ -2464,7 +2620,7 @@ String *Item_time_typecast::val_str(String *str) bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date) { - bool res= get_arg0_date(ltime,1); + bool res= get_arg0_date(ltime, TIME_FUZZY_DATE); ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0; ltime->time_type= MYSQL_TIMESTAMP_DATE; return res; @@ -2476,7 +2632,7 @@ String *Item_date_typecast::val_str(String *str) DBUG_ASSERT(fixed == 1); TIME ltime; - if (!get_arg0_date(<ime,1) && !str->alloc(11)) + if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !str->alloc(11)) { make_date((DATE_TIME_FORMAT *) 0, <ime, str); return str; @@ -2609,7 +2765,7 @@ String *Item_func_add_time::val_str(String *str) null_value=0; if (is_date) // TIMESTAMP function { - if (get_arg0_date(&l_time1,1) || + if (get_arg0_date(&l_time1, TIME_FUZZY_DATE) || args[1]->get_time(&l_time2) || l_time1.time_type == MYSQL_TIMESTAMP_TIME || l_time2.time_type != MYSQL_TIMESTAMP_TIME) @@ -2672,18 +2828,18 @@ void Item_func_add_time::print(String *str) if (is_date) { DBUG_ASSERT(sign > 0); - str->append("timestamp(", 10); + str->append(STRING_WITH_LEN("timestamp(")); } else { if (sign > 0) - str->append("addtime(", 8); + str->append(STRING_WITH_LEN("addtime(")); else - str->append("subtime(", 8); + str->append(STRING_WITH_LEN("subtime(")); } args[0]->print(str); str->append(','); - args[0]->print(str); + args[1]->print(str); str->append(')'); } @@ -2791,7 +2947,8 @@ String *Item_func_maketime::val_str(String *str) char *ptr= longlong10_to_str(hour, buf, args[0]->unsigned_flag ? 10 : -10); int len = (int)(ptr - buf) + my_sprintf(ptr, (ptr, ":%02u:%02u", (uint)minute, (uint)second)); - make_truncated_value_warning(current_thd, buf, len, MYSQL_TIMESTAMP_TIME); + make_truncated_value_warning(current_thd, buf, len, MYSQL_TIMESTAMP_TIME, + NullS); } if (make_time_with_warn((DATE_TIME_FORMAT *) 0, <ime, str)) @@ -2821,6 +2978,157 @@ longlong Item_func_microsecond::val_int() } +longlong Item_func_timestamp_diff::val_int() +{ + TIME ltime1, ltime2; + longlong seconds; + long microseconds; + long months= 0; + int neg= 1; + + null_value= 0; + if (args[0]->get_date(<ime1, TIME_NO_ZERO_DATE) || + args[1]->get_date(<ime2, TIME_NO_ZERO_DATE)) + goto null_date; + + if (calc_time_diff(<ime2,<ime1, 1, + &seconds, µseconds)) + neg= -1; + + if (int_type == INTERVAL_YEAR || + int_type == INTERVAL_QUARTER || + int_type == INTERVAL_MONTH) + { + uint year_beg, year_end, month_beg, month_end, day_beg, day_end; + uint years= 0; + uint second_beg, second_end, microsecond_beg, microsecond_end; + + if (neg == -1) + { + year_beg= ltime2.year; + year_end= ltime1.year; + month_beg= ltime2.month; + month_end= ltime1.month; + day_beg= ltime2.day; + day_end= ltime1.day; + second_beg= ltime2.hour * 3600 + ltime2.minute * 60 + ltime2.second; + second_end= ltime1.hour * 3600 + ltime1.minute * 60 + ltime1.second; + microsecond_beg= ltime2.second_part; + microsecond_end= ltime1.second_part; + } + else + { + year_beg= ltime1.year; + year_end= ltime2.year; + month_beg= ltime1.month; + month_end= ltime2.month; + day_beg= ltime1.day; + day_end= ltime2.day; + second_beg= ltime1.hour * 3600 + ltime1.minute * 60 + ltime1.second; + second_end= ltime2.hour * 3600 + ltime2.minute * 60 + ltime2.second; + microsecond_beg= ltime1.second_part; + microsecond_end= ltime2.second_part; + } + + /* calc years */ + years= year_end - year_beg; + if (month_end < month_beg || (month_end == month_beg && day_end < day_beg)) + years-= 1; + + /* calc months */ + months= 12*years; + if (month_end < month_beg || (month_end == month_beg && day_end < day_beg)) + months+= 12 - (month_beg - month_end); + else + months+= (month_end - month_beg); + + if (day_end < day_beg) + months-= 1; + else if ((day_end == day_beg) && + ((second_end < second_beg) || + (second_end == second_beg && microsecond_end < microsecond_beg))) + months-= 1; + } + + switch (int_type) { + case INTERVAL_YEAR: + return months/12*neg; + case INTERVAL_QUARTER: + return months/3*neg; + case INTERVAL_MONTH: + return months*neg; + case INTERVAL_WEEK: + return seconds/86400L/7L*neg; + case INTERVAL_DAY: + return seconds/86400L*neg; + case INTERVAL_HOUR: + return seconds/3600L*neg; + case INTERVAL_MINUTE: + return seconds/60L*neg; + case INTERVAL_SECOND: + return seconds*neg; + case INTERVAL_MICROSECOND: + /* + In MySQL difference between any two valid datetime values + in microseconds fits into longlong. + */ + return (seconds*1000000L+microseconds)*neg; + default: + break; + } + +null_date: + null_value=1; + return 0; +} + + +void Item_func_timestamp_diff::print(String *str) +{ + str->append(func_name()); + str->append('('); + + switch (int_type) { + case INTERVAL_YEAR: + str->append(STRING_WITH_LEN("YEAR")); + break; + case INTERVAL_QUARTER: + str->append(STRING_WITH_LEN("QUARTER")); + break; + case INTERVAL_MONTH: + str->append(STRING_WITH_LEN("MONTH")); + break; + case INTERVAL_WEEK: + str->append(STRING_WITH_LEN("WEEK")); + break; + case INTERVAL_DAY: + str->append(STRING_WITH_LEN("DAY")); + break; + case INTERVAL_HOUR: + str->append(STRING_WITH_LEN("HOUR")); + break; + case INTERVAL_MINUTE: + str->append(STRING_WITH_LEN("MINUTE")); + break; + case INTERVAL_SECOND: + str->append(STRING_WITH_LEN("SECOND")); + break; + case INTERVAL_MICROSECOND: + str->append(STRING_WITH_LEN("SECOND_FRAC")); + break; + default: + break; + } + + for (uint i=0 ; i < 2 ; i++) + { + str->append(','); + args[i]->print(str); + } + str->append(')'); +} + + String *Item_func_get_format::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -2862,13 +3170,13 @@ void Item_func_get_format::print(String *str) switch (type) { case MYSQL_TIMESTAMP_DATE: - str->append("DATE, "); + str->append(STRING_WITH_LEN("DATE, ")); break; case MYSQL_TIMESTAMP_DATETIME: - str->append("DATETIME, "); + str->append(STRING_WITH_LEN("DATETIME, ")); break; case MYSQL_TIMESTAMP_TIME: - str->append("TIME, "); + str->append(STRING_WITH_LEN("TIME, ")); break; default: DBUG_ASSERT(0); @@ -2927,9 +3235,9 @@ get_date_time_result_type(const char *format, uint length) have all types of date-time components and can end our search. */ return DATE_TIME_MICROSECOND; - } } } + } /* We don't have all three types of date-time components */ if (frac_second_used) @@ -2959,8 +3267,7 @@ Field *Item_func_str_to_date::tmp_table_field(TABLE *t_arg) void Item_func_str_to_date::fix_length_and_dec() { char format_buff[64]; - String format_str(format_buff, sizeof(format_buff), &my_charset_bin); - String *format; + String format_str(format_buff, sizeof(format_buff), &my_charset_bin), *format; maybe_null= 1; decimals=0; cached_field_type= MYSQL_TYPE_STRING; @@ -2995,10 +3302,10 @@ bool Item_func_str_to_date::get_date(TIME *ltime, uint fuzzy_date) { DATE_TIME_FORMAT date_time_format; char val_buff[64], format_buff[64]; - String val_str(val_buff, sizeof(val_buff), &my_charset_bin), *val; + String val_string(val_buff, sizeof(val_buff), &my_charset_bin), *val; String format_str(format_buff, sizeof(format_buff), &my_charset_bin), *format; - val= args[0]->val_str(&val_str); + val= args[0]->val_str(&val_string); format= args[1]->val_str(&format_str); if (args[0]->null_value || args[1]->null_value) goto null_date; @@ -3008,7 +3315,9 @@ bool Item_func_str_to_date::get_date(TIME *ltime, uint fuzzy_date) date_time_format.format.str= (char*) format->ptr(); date_time_format.format.length= format->length(); if (extract_date_time(&date_time_format, val->ptr(), val->length(), - ltime, cached_timestamp_type, 0)) + ltime, cached_timestamp_type, 0, "datetime") || + ((fuzzy_date & TIME_NO_ZERO_DATE) && + (ltime->year == 0 || ltime->month == 0 || ltime->day == 0))) goto null_date; if (cached_timestamp_type == MYSQL_TIMESTAMP_TIME && ltime->day) { @@ -3047,7 +3356,11 @@ bool Item_func_last_day::get_date(TIME *ltime, uint fuzzy_date) { if (get_arg0_date(ltime, fuzzy_date & ~TIME_FUZZY_DATE) || (ltime->month == 0)) + { + null_value= 1; return 1; + } + null_value= 0; uint month_idx= ltime->month-1; ltime->day= days_in_month[month_idx]; if ( month_idx == 1 && calc_days_in_year(ltime->year) == 366) diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 2383b4f86ac..14ceb8dcb28 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -88,7 +87,7 @@ class Item_func_month :public Item_func public: Item_func_month(Item *a) :Item_func(a) {} longlong val_int(); - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); } String *val_str(String *str) { @@ -250,14 +249,17 @@ public: Item_func_weekday(Item *a,bool type_arg) :Item_func(a), odbc_type(type_arg) {} longlong val_int(); - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String *str) { DBUG_ASSERT(fixed == 1); str->set(val_int(), &my_charset_bin); return null_value ? 0 : str; } - const char *func_name() const { return "weekday"; } + const char *func_name() const + { + return (odbc_type ? "dayofweek" : "weekday"); + } enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() { @@ -315,7 +317,9 @@ public: }; -/* This can't be a Item_str_func, because the val() functions are special */ +/* + This can't be a Item_str_func, because the val_real() functions are special +*/ class Item_date :public Item_func { @@ -326,7 +330,7 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_DATE; } String *val_str(String *str); longlong val_int(); - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { return val_real_from_decimal(); } const char *func_name() const { return "date"; } void fix_length_and_dec() { @@ -334,12 +338,20 @@ public: decimals=0; max_length=MAX_DATE_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } - int save_in_field(Field *to, bool no_conversions); Field *tmp_table_field(TABLE *t_arg) { return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); } bool result_as_longlong() { return TRUE; } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -356,22 +368,60 @@ public: return (new Field_datetime(maybe_null, name, t_arg, &my_charset_bin)); } bool result_as_longlong() { return TRUE; } + double val_real() { return (double) val_int(); } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } +}; + + +class Item_str_timefunc :public Item_str_func +{ +public: + Item_str_timefunc() :Item_str_func() {} + Item_str_timefunc(Item *a) :Item_str_func(a) {} + Item_str_timefunc(Item *a,Item *b) :Item_str_func(a,b) {} + Item_str_timefunc(Item *a, Item *b, Item *c) :Item_str_func(a, b ,c) {} + enum_field_types field_type() const { return MYSQL_TYPE_TIME; } + void fix_length_and_dec() + { + decimals= DATETIME_DEC; + max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + } + Field *tmp_table_field(TABLE *t_arg) + { + return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); + } + double val_real() { return val_real_from_decimal(); } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_time(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_time_in_field(field); + } }; /* Abstract CURTIME function. Children should define what time zone is used */ -class Item_func_curtime :public Item_func +class Item_func_curtime :public Item_str_timefunc { longlong value; char buff[9*2+32]; uint buff_length; public: - Item_func_curtime() :Item_func() {} - Item_func_curtime(Item *a) :Item_func(a) {} - enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + Item_func_curtime() :Item_str_timefunc() {} + Item_func_curtime(Item *a) :Item_str_timefunc(a) {} + double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } String *val_str(String *str); void fix_length_and_dec(); @@ -447,6 +497,7 @@ public: class Item_func_now :public Item_date_func { +protected: longlong value; char buff[20*2+32]; // +32 to make my_snprintf_{8bit|ucs2} happy uint buff_length; @@ -455,7 +506,6 @@ public: Item_func_now() :Item_date_func() {} Item_func_now(Item *a) :Item_date_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } int save_in_field(Field *to, bool no_conversions); String *val_str(String *str); @@ -486,6 +536,32 @@ public: }; +/* + This is like NOW(), but always uses the real current time, not the + query_start(). This matches the Oracle behavior. +*/ +class Item_func_sysdate_local :public Item_func_now +{ +public: + Item_func_sysdate_local() :Item_func_now() {} + Item_func_sysdate_local(Item *a) :Item_func_now(a) {} + bool const_item() const { return 0; } + const char *func_name() const { return "sysdate"; } + void store_now_in_TIME(TIME *now_time); + double val_real(); + longlong val_int(); + int save_in_field(Field *to, bool no_conversions); + String *val_str(String *str); + void fix_length_and_dec(); + bool get_date(TIME *res, uint fuzzy_date); + void update_used_tables() + { + Item_func_now::update_used_tables(); + used_tables_cache|= RAND_TABLE_BIT; + } +}; + + class Item_func_from_days :public Item_date { public: @@ -504,9 +580,11 @@ public: Item_func_date_format(Item *a,Item *b,bool is_time_format_arg) :Item_str_func(a,b),is_time_format(is_time_format_arg) {} String *val_str(String *str); - const char *func_name() const { return "date_format"; } + const char *func_name() const + { return is_time_format ? "time_format" : "date_format"; } void fix_length_and_dec(); uint format_length(const String *format); + bool eq(const Item *item, bool binary_cmp) const; }; @@ -515,11 +593,6 @@ class Item_func_from_unixtime :public Item_date_func THD *thd; public: Item_func_from_unixtime(Item *a) :Item_date_func(a) {} - double val() - { - DBUG_ASSERT(fixed == 1); - return (double) Item_func_from_unixtime::val_int(); - } longlong val_int(); String *val_str(String *str); const char *func_name() const { return "from_unixtime"; } @@ -558,21 +631,20 @@ class Item_func_convert_tz :public Item_date_func Item_func_convert_tz(Item *a, Item *b, Item *c): Item_date_func(a, b, c), from_tz_cached(0), to_tz_cached(0) {} longlong val_int(); - double val() { return (double) val_int(); } String *val_str(String *str); const char *func_name() const { return "convert_tz"; } - bool fix_fields(THD *, struct st_table_list *, Item **); + bool fix_fields(THD *, Item **); void fix_length_and_dec(); bool get_date(TIME *res, uint fuzzy_date); void cleanup(); }; -class Item_func_sec_to_time :public Item_str_func +class Item_func_sec_to_time :public Item_str_timefunc { public: - Item_func_sec_to_time(Item *item) :Item_str_func(item) {} - double val() + Item_func_sec_to_time(Item *item) :Item_str_timefunc(item) {} + double val_real() { DBUG_ASSERT(fixed == 1); return (double) Item_func_sec_to_time::val_int(); @@ -581,33 +653,30 @@ public: String *val_str(String *); void fix_length_and_dec() { + Item_str_timefunc::fix_length_and_dec(); collation.set(&my_charset_bin); maybe_null=1; - decimals= DATETIME_DEC; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } const char *func_name() const { return "sec_to_time"; } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } bool result_as_longlong() { return TRUE; } }; /* - The following must be sorted so that simple intervals comes first. - (get_interval_value() depends on this) + 'interval_type' must be sorted so that simple intervals comes first, + ie year, quarter, month, week, day, hour, etc. The order based on + interval size is also important and the intervals should be kept in a + large to smaller order. (get_interval_value() depends on this) */ enum interval_type { - INTERVAL_YEAR, INTERVAL_MONTH, INTERVAL_DAY, INTERVAL_HOUR, INTERVAL_MINUTE, - INTERVAL_SECOND, INTERVAL_MICROSECOND ,INTERVAL_YEAR_MONTH, - INTERVAL_DAY_HOUR, INTERVAL_DAY_MINUTE, INTERVAL_DAY_SECOND, - INTERVAL_HOUR_MINUTE, INTERVAL_HOUR_SECOND, INTERVAL_MINUTE_SECOND, - INTERVAL_DAY_MICROSECOND, INTERVAL_HOUR_MICROSECOND, - INTERVAL_MINUTE_MICROSECOND, INTERVAL_SECOND_MICROSECOND + INTERVAL_YEAR, INTERVAL_QUARTER, INTERVAL_MONTH, INTERVAL_WEEK, + INTERVAL_DAY, INTERVAL_HOUR, INTERVAL_MINUTE, INTERVAL_SECOND, + INTERVAL_MICROSECOND, INTERVAL_YEAR_MONTH, INTERVAL_DAY_HOUR, + INTERVAL_DAY_MINUTE, INTERVAL_DAY_SECOND, INTERVAL_HOUR_MINUTE, + INTERVAL_HOUR_SECOND, INTERVAL_MINUTE_SECOND, INTERVAL_DAY_MICROSECOND, + INTERVAL_HOUR_MICROSECOND, INTERVAL_MINUTE_MICROSECOND, + INTERVAL_SECOND_MICROSECOND }; class Item_date_add_interval :public Item_date_func @@ -624,9 +693,9 @@ public: const char *func_name() const { return "date_add_interval"; } void fix_length_and_dec(); enum_field_types field_type() const { return cached_field_type; } - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } longlong val_int(); bool get_date(TIME *res, uint fuzzy_date); + bool eq(const Item *item, bool binary_cmp) const; void print(String *str); }; @@ -640,6 +709,7 @@ class Item_extract :public Item_int_func Item_extract(interval_type type_arg, Item *a) :Item_int_func(a), int_type(type_arg) {} longlong val_int(); + enum Functype functype() const { return EXTRACT_FUNC; } const char *func_name() const { return "extract"; } void fix_length_and_dec(); bool eq(const Item *item, bool binary_cmp) const; @@ -692,6 +762,7 @@ class Item_char_typecast :public Item_typecast public: Item_char_typecast(Item *a, int length_arg, CHARSET_INFO *cs_arg) :Item_typecast(a), cast_length(length_arg), cast_cs(cs_arg) {} + enum Functype functype() const { return CHAR_TYPECAST_FUNC; } bool eq(const Item *item, bool binary_cmp) const; const char *func_name() const { return "cast_as_char"; } const char* cast_type() const { return "char"; }; @@ -722,6 +793,16 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return (double) val_int(); } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -740,6 +821,16 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_time(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_time_in_field(field); + } }; @@ -763,13 +854,23 @@ public: } bool result_as_longlong() { return TRUE; } longlong val_int(); + double val_real() { return val_real_from_decimal(); } double val() { return (double) val_int(); } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; -class Item_func_makedate :public Item_str_func +class Item_func_makedate :public Item_date_func { public: - Item_func_makedate(Item *a,Item *b) :Item_str_func(a,b) {} + Item_func_makedate(Item *a,Item *b) :Item_date_func(a,b) {} String *val_str(String *str); const char *func_name() const { return "makedate"; } enum_field_types field_type() const { return MYSQL_TYPE_DATE; } @@ -782,8 +883,16 @@ public: { return (new Field_date(maybe_null, name, t_arg, &my_charset_bin)); } - bool result_as_longlong() { return TRUE; } longlong val_int(); + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + return val_decimal_from_date(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + return save_date_in_field(field); + } }; @@ -814,45 +923,48 @@ public: return (new Field_string(max_length, maybe_null, name, t_arg, &my_charset_bin)); } void print(String *str); + const char *func_name() const { return "add_time"; } + double val_real() { return val_real_from_decimal(); } + my_decimal *val_decimal(my_decimal *decimal_value) + { + DBUG_ASSERT(fixed == 1); + if (cached_field_type == MYSQL_TYPE_TIME) + return val_decimal_from_time(decimal_value); + if (cached_field_type == MYSQL_TYPE_DATETIME) + return val_decimal_from_date(decimal_value); + return Item_str_func::val_decimal(decimal_value); + } + int save_in_field(Field *field, bool no_conversions) + { + if (cached_field_type == MYSQL_TYPE_TIME) + return save_time_in_field(field); + if (cached_field_type == MYSQL_TYPE_DATETIME) + return save_date_in_field(field); + return Item_str_func::save_in_field(field, no_conversions); + } }; -class Item_func_timediff :public Item_str_func +class Item_func_timediff :public Item_str_timefunc { public: Item_func_timediff(Item *a, Item *b) - :Item_str_func(a, b) {} + :Item_str_timefunc(a, b) {} String *val_str(String *str); const char *func_name() const { return "timediff"; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } void fix_length_and_dec() { - decimals=0; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; + Item_str_timefunc::fix_length_and_dec(); maybe_null= 1; } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } }; -class Item_func_maketime :public Item_str_func +class Item_func_maketime :public Item_str_timefunc { public: Item_func_maketime(Item *a, Item *b, Item *c) - :Item_str_func(a, b ,c) {} + :Item_str_timefunc(a, b ,c) {} String *val_str(String *str); const char *func_name() const { return "maketime"; } - enum_field_types field_type() const { return MYSQL_TYPE_TIME; } - void fix_length_and_dec() - { - decimals=0; - max_length=MAX_TIME_WIDTH*MY_CHARSET_BIN_MB_MAXLEN; - } - Field *tmp_table_field(TABLE *t_arg) - { - return (new Field_time(maybe_null, name, t_arg, &my_charset_bin)); - } }; class Item_func_microsecond :public Item_int_func @@ -869,6 +981,23 @@ public: }; +class Item_func_timestamp_diff :public Item_int_func +{ + const interval_type int_type; +public: + Item_func_timestamp_diff(Item *a,Item *b,interval_type type_arg) + :Item_int_func(a,b), int_type(type_arg) {} + const char *func_name() const { return "timestampdiff"; } + longlong val_int(); + void fix_length_and_dec() + { + decimals=0; + maybe_null=1; + } + void print(String *str); +}; + + enum date_time_format { USA_FORMAT, JIS_FORMAT, ISO_FORMAT, EUR_FORMAT, INTERNAL_FORMAT diff --git a/sql/item_uniq.cc b/sql/item_uniq.cc index 7701bbbb63e..1a5524eb1e0 100644 --- a/sql/item_uniq.cc +++ b/sql/item_uniq.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -21,3 +20,9 @@ #endif #include "mysql_priv.h" + +Field *Item_sum_unique_users::create_tmp_field(bool group, TABLE *table, + uint convert_blob_length) +{ + return new Field_long(9,maybe_null,name,table,1); +} diff --git a/sql/item_uniq.h b/sql/item_uniq.h index b7e00f9f080..ce43abe3f33 100644 --- a/sql/item_uniq.h +++ b/sql/item_uniq.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -27,9 +26,10 @@ class Item_func_unique_users :public Item_real_func public: Item_func_unique_users(Item *name_arg,int start,int end,List<Item> &list) :Item_real_func(list) {} - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } void fix_length_and_dec() { decimals=0; max_length=6; } - void print(String *str) { str->append("0.0", 3); } + void print(String *str) { str->append(STRING_WITH_LEN("0.0")); } + const char *func_name() const { return "unique_users"; } }; @@ -40,21 +40,23 @@ public: :Item_sum_num(item_arg) {} Item_sum_unique_users(THD *thd, Item_sum_unique_users *item) :Item_sum_num(thd, item) {} - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } enum Sumfunctype sum_func () const {return UNIQUE_USERS_FUNC;} void clear() {} bool add() { return 0; } void reset_field() {} void update_field() {} - bool fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) + bool fix_fields(THD *thd, Item **ref) { DBUG_ASSERT(fixed == 0); fixed= 1; - return 0; + return FALSE; } Item *copy_or_same(THD* thd) { return new Item_sum_unique_users(thd, this); } - void print(String *str) { str->append("0.0", 3); } + void print(String *str) { str->append(STRING_WITH_LEN("0.0")); } + Field *create_tmp_field(bool group, TABLE *table, uint convert_blob_length); + const char *func_name() const { return "sum_unique_users"; } }; diff --git a/sql/key.cc b/sql/key.cc index 7ddd40de2c9..921f3daa201 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -18,6 +17,7 @@ /* Functions to handle keys and fields in forms */ #include "mysql_priv.h" +#include "sql_trigger.h" /* ** Search after with key field is. If no key starts with field test @@ -38,7 +38,9 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) /* Test if some key starts as fieldpos */ - for (i=0, key_info=table->key_info ; i < (int) table->keys ; i++, key_info++) + for (i= 0, key_info= table->key_info ; + i < (int) table->s->keys ; + i++, key_info++) { if (key_info->key_part[0].offset == fieldpos) { /* Found key. Calc keylength */ @@ -48,7 +50,9 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) } /* Test if some key contains fieldpos */ - for (i=0, key_info=table->key_info ; i < (int) table->keys ; i++, key_info++) + for (i= 0, key_info= table->key_info ; + i < (int) table->s->keys ; + i++, key_info++) { uint j; KEY_PART_INFO *key_part; @@ -66,94 +70,160 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) } - /* Copy a key from record to some buffer */ - /* if length == 0 then copy whole key */ +/* + Copy part of a record that forms a key or key prefix to a buffer. + + SYNOPSIS + key_copy() + to_key buffer that will be used as a key + from_record full record to be copied from + key_info descriptor of the index + key_length specifies length of all keyparts that will be copied + + DESCRIPTION + The function takes a complete table record (as e.g. retrieved by + handler::index_read()), and a description of an index on the same table, + and extracts the first key_length bytes of the record which are part of a + key into to_key. If length == 0 then copy all bytes from the record that + form a key. + + RETURN + None +*/ -void key_copy(byte *key,TABLE *table,uint idx,uint key_length) +void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length) { uint length; - KEY *key_info=table->key_info+idx; KEY_PART_INFO *key_part; if (key_length == 0) - key_length=key_info->key_length; - for (key_part=key_info->key_part; - (int) key_length > 0 ; - key_part++) + key_length= key_info->key_length; + for (key_part= key_info->key_part; (int) key_length > 0; key_part++) { if (key_part->null_bit) { - *key++= test(table->record[0][key_part->null_offset] & + *to_key++= test(from_record[key_part->null_offset] & key_part->null_bit); key_length--; } + if (key_part->type == HA_KEYTYPE_BIT) + { + Field_bit *field= (Field_bit *) (key_part->field); + if (field->bit_len) + { + uchar bits= get_rec_bits((uchar*) from_record + + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + *to_key++= bits; + key_length--; + } + } if (key_part->key_part_flag & HA_BLOB_PART) { char *pos; - ulong blob_length=((Field_blob*) key_part->field)->get_length(); - key_length-=2; + ulong blob_length= ((Field_blob*) key_part->field)->get_length(); + key_length-= HA_KEY_BLOB_LENGTH; ((Field_blob*) key_part->field)->get_ptr(&pos); - length=min(key_length,key_part->length); - set_if_smaller(blob_length,length); - int2store(key,(uint) blob_length); - key+=2; // Skip length info - memcpy(key,pos,blob_length); + length=min(key_length, key_part->length); + set_if_smaller(blob_length, length); + int2store(to_key, (uint) blob_length); + to_key+= HA_KEY_BLOB_LENGTH; // Skip length info + memcpy(to_key, pos, blob_length); + } + else if (key_part->key_part_flag & HA_VAR_LENGTH_PART) + { + key_length-= HA_KEY_BLOB_LENGTH; + length= min(key_length, key_part->length); + key_part->field->get_key_image((char *) to_key, length, Field::itRAW); + to_key+= HA_KEY_BLOB_LENGTH; } else { - length=min(key_length,key_part->length); - memcpy(key,table->record[0]+key_part->offset,(size_t) length); + length= min(key_length, key_part->length); + memcpy(to_key, from_record + key_part->offset, (size_t) length); } - key+=length; - key_length-=length; + to_key+= length; + key_length-= length; } -} /* key_copy */ +} - /* restore a key from some buffer to record */ +/* + Restore a key from some buffer to record. + + SYNOPSIS + key_restore() + to_record record buffer where the key will be restored to + from_key buffer that contains a key + key_info descriptor of the index + key_length specifies length of all keyparts that will be restored -void key_restore(TABLE *table,byte *key,uint idx,uint key_length) + DESCRIPTION + This function converts a key into record format. It can be used in cases + when we want to return a key as a result row. + + RETURN + None +*/ + +void key_restore(byte *to_record, byte *from_key, KEY *key_info, + uint key_length) { uint length; - KEY *key_info=table->key_info+idx; KEY_PART_INFO *key_part; if (key_length == 0) { - if (idx == (uint) -1) - return; - key_length=key_info->key_length; + key_length= key_info->key_length; } - for (key_part=key_info->key_part; - (int) key_length > 0 ; - key_part++) + for (key_part= key_info->key_part ; (int) key_length > 0 ; key_part++) { if (key_part->null_bit) { - if (*key++) - table->record[0][key_part->null_offset]|= key_part->null_bit; + if (*from_key++) + to_record[key_part->null_offset]|= key_part->null_bit; else - table->record[0][key_part->null_offset]&= ~key_part->null_bit; + to_record[key_part->null_offset]&= ~key_part->null_bit; key_length--; } + if (key_part->type == HA_KEYTYPE_BIT) + { + Field_bit *field= (Field_bit *) (key_part->field); + if (field->bit_len) + { + uchar bits= *(from_key + key_part->length - + field->pack_length_in_rec() - 1); + set_rec_bits(bits, to_record + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + } + } if (key_part->key_part_flag & HA_BLOB_PART) { - uint blob_length=uint2korr(key); - key+=2; - key_length-=2; + uint blob_length= uint2korr(from_key); + from_key+= HA_KEY_BLOB_LENGTH; + key_length-= HA_KEY_BLOB_LENGTH; ((Field_blob*) key_part->field)->set_ptr((ulong) blob_length, - (char*) key); - length=key_part->length; + (char*) from_key); + length= key_part->length; + } + else if (key_part->key_part_flag & HA_VAR_LENGTH_PART) + { + key_length-= HA_KEY_BLOB_LENGTH; + length= min(key_length, key_part->length); + key_part->field->set_key_image((char *) from_key, length); + from_key+= HA_KEY_BLOB_LENGTH; } else { - length=min(key_length,key_part->length); - memcpy(table->record[0]+key_part->offset,key,(size_t) length); + length= min(key_length, key_part->length); + memcpy(to_record + key_part->offset, from_key, (size_t) length); } - key+=length; - key_length-=length; + from_key+= length; + key_length-= length; } -} /* key_restore */ +} /* @@ -179,54 +249,54 @@ void key_restore(TABLE *table,byte *key,uint idx,uint key_length) bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length) { - uint length; + uint store_length; KEY_PART_INFO *key_part; + const byte *key_end= key + key_length;; for (key_part=table->key_info[idx].key_part; - (int) key_length > 0; - key_part++, key+=length, key_length-=length) + key < key_end ; + key_part++, key+= store_length) { + uint length; + store_length= key_part->store_length; + if (key_part->null_bit) { - key_length--; if (*key != test(table->record[0][key_part->null_offset] & key_part->null_bit)) return 1; if (*key) - { - length=key_part->store_length; continue; - } key++; + store_length--; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART | + HA_BIT_PART)) { - if (key_part->field->key_cmp(key, key_part->length+ HA_KEY_BLOB_LENGTH)) + if (key_part->field->key_cmp(key, key_part->length)) return 1; - length=key_part->length+HA_KEY_BLOB_LENGTH; + continue; } - else + length= min((uint) (key_end-key), store_length); + if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ + FIELDFLAG_PACK))) { - length=min(key_length,key_part->length); - if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ - FIELDFLAG_PACK))) + CHARSET_INFO *cs= key_part->field->charset(); + uint char_length= key_part->length / cs->mbmaxlen; + const byte *pos= table->record[0] + key_part->offset; + if (length > char_length) { - CHARSET_INFO *cs= key_part->field->charset(); - uint char_length= key_part->length / cs->mbmaxlen; - const byte *pos= table->record[0] + key_part->offset; - if (length > char_length) - { - char_length= my_charpos(cs, pos, pos + length, char_length); - set_if_smaller(char_length, length); - } - if (cs->coll->strnncollsp(cs, - (const uchar*) key, length, - (const uchar*) pos, char_length)) - return 1; + char_length= my_charpos(cs, pos, pos + length, char_length); + set_if_smaller(char_length, length); } - else if (memcmp(key,table->record[0]+key_part->offset,length)) - return 1; + if (cs->coll->strnncollsp(cs, + (const uchar*) key, length, + (const uchar*) pos, char_length, 0)) + return 1; + continue; } + if (memcmp(key,table->record[0]+key_part->offset,length)) + return 1; } return 0; } @@ -253,7 +323,7 @@ void key_unpack(String *to,TABLE *table,uint idx) { if (table->record[0][key_part->null_offset] & key_part->null_bit) { - to->append("NULL", 4); + to->append(STRING_WITH_LEN("NULL")); continue; } } @@ -265,19 +335,31 @@ void key_unpack(String *to,TABLE *table,uint idx) to->append(tmp); } else - to->append("???", 3); + to->append(STRING_WITH_LEN("???")); } DBUG_VOID_RETURN; } /* - Return 1 if any field in a list is part of key or the key uses a field - that is automaticly updated (like a timestamp) + Check if key uses field that is listed in passed field list or is + automatically updated (like a timestamp) or can be updated by before + update trigger defined on the table. + + SYNOPSIS + is_key_used() + table TABLE object with which keys and fields are associated. + idx Key to be checked. + fields List of fields to be checked. + + RETURN VALUE + TRUE Key uses field which meets one the above conditions + FALSE Otherwise */ -bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields) +bool is_key_used(TABLE *table, uint idx, List<Item> &fields) { + Table_triggers_list *triggers= table->triggers; List_iterator_fast<Item> f(fields); KEY_PART_INFO *key_part,*key_part_end; for (key_part=table->key_info[idx].key_part,key_part_end=key_part+ @@ -296,15 +378,18 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields) if (key_part->field->eq(field->field)) return 1; } + if (triggers && + triggers->is_updated_in_before_update_triggers(key_part->field)) + return 1; } /* If table handler has primary key as part of the index, check that primary key is not updated */ - if (idx != table->primary_key && table->primary_key < MAX_KEY && + if (idx != table->s->primary_key && table->s->primary_key < MAX_KEY && (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX)) - return check_if_key_used(table, table->primary_key, fields); + return is_key_used(table, table->s->primary_key, fields); return 0; } diff --git a/sql/lex.h b/sql/lex.h index 325d052de90..352d80da5c6 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -19,11 +18,11 @@ #include "lex_symbol.h" -/* We don't want to include sql_yacc.h into gen_lex_hash */ SYM_GROUP sym_group_common= {"", ""}; SYM_GROUP sym_group_geom= {"Spatial extentions", "HAVE_SPATIAL"}; SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"}; +/* We don't want to include sql_yacc.h into gen_lex_hash */ #ifdef NO_YACC_SYMBOLS #define SYM_OR_NULL(A) 0 #else @@ -33,10 +32,10 @@ SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"}; #define SYM(A) SYM_OR_NULL(A),0,0,&sym_group_common #define F_SYM(A) SYM_OR_NULL(A) -#define CREATE_FUNC(A) (void *)(SYM_OR_NULL(A)), &sym_group_common +#define CREATE_FUNC(A) (void (*)())(SYM_OR_NULL(A)), &sym_group_common #ifdef HAVE_SPATIAL -#define CREATE_FUNC_GEOM(A) (void *)(SYM_OR_NULL(A)), &sym_group_geom +#define CREATE_FUNC_GEOM(A) (void (*)())(SYM_OR_NULL(A)), &sym_group_geom #else #define CREATE_FUNC_GEOM(A) 0, &sym_group_geom #endif @@ -48,7 +47,7 @@ SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"}; */ static SYMBOL symbols[] = { - { "&&", SYM(AND_SYM)}, + { "&&", SYM(AND_AND_SYM)}, { "<", SYM(LT)}, { "<=", SYM(LE)}, { "<>", SYM(NE)}, @@ -65,6 +64,7 @@ static SYMBOL symbols[] = { { "AGAINST", SYM(AGAINST)}, { "AGGREGATE", SYM(AGGREGATE_SYM)}, { "ALL", SYM(ALL)}, + { "ALGORITHM", SYM(ALGORITHM_SYM)}, { "ALTER", SYM(ALTER)}, { "ANALYZE", SYM(ANALYZE_SYM)}, { "AND", SYM(AND_SYM)}, @@ -72,6 +72,7 @@ static SYMBOL symbols[] = { { "AS", SYM(AS)}, { "ASC", SYM(ASC)}, { "ASCII", SYM(ASCII_SYM)}, + { "ASENSITIVE", SYM(ASENSITIVE_SYM)}, { "AUTO_INCREMENT", SYM(AUTO_INC)}, { "AVG", SYM(AVG_SYM)}, { "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)}, @@ -93,8 +94,11 @@ static SYMBOL symbols[] = { { "BY", SYM(BY)}, { "BYTE", SYM(BYTE_SYM)}, { "CACHE", SYM(CACHE_SYM)}, + { "CALL", SYM(CALL_SYM)}, { "CASCADE", SYM(CASCADE)}, + { "CASCADED", SYM(CASCADED)}, { "CASE", SYM(CASE_SYM)}, + { "CHAIN", SYM(CHAIN_SYM)}, { "CHANGE", SYM(CHANGE)}, { "CHANGED", SYM(CHANGED)}, { "CHAR", SYM(CHAR_SYM)}, @@ -105,6 +109,7 @@ static SYMBOL symbols[] = { { "CIPHER", SYM(CIPHER_SYM)}, { "CLIENT", SYM(CLIENT_SYM)}, { "CLOSE", SYM(CLOSE_SYM)}, + { "CODE", SYM(CODE_SYM)}, { "COLLATE", SYM(COLLATE_SYM)}, { "COLLATION", SYM(COLLATION_SYM)}, { "COLUMN", SYM(COLUMN_SYM)}, @@ -112,10 +117,15 @@ static SYMBOL symbols[] = { { "COMMENT", SYM(COMMENT_SYM)}, { "COMMIT", SYM(COMMIT_SYM)}, { "COMMITTED", SYM(COMMITTED_SYM)}, + { "COMPACT", SYM(COMPACT_SYM)}, { "COMPRESSED", SYM(COMPRESSED_SYM)}, { "CONCURRENT", SYM(CONCURRENT)}, + { "CONDITION", SYM(CONDITION_SYM)}, + { "CONNECTION", SYM(CONNECTION_SYM)}, { "CONSISTENT", SYM(CONSISTENT_SYM)}, { "CONSTRAINT", SYM(CONSTRAINT)}, + { "CONTAINS", SYM(CONTAINS_SYM)}, + { "CONTINUE", SYM(CONTINUE_SYM)}, { "CONVERT", SYM(CONVERT_SYM)}, { "CREATE", SYM(CREATE)}, { "CROSS", SYM(CROSS)}, @@ -124,6 +134,7 @@ static SYMBOL symbols[] = { { "CURRENT_TIME", SYM(CURTIME)}, { "CURRENT_TIMESTAMP", SYM(NOW_SYM)}, { "CURRENT_USER", SYM(CURRENT_USER)}, + { "CURSOR", SYM(CURSOR_SYM)}, { "DATA", SYM(DATA_SYM)}, { "DATABASE", SYM(DATABASE)}, { "DATABASES", SYM(DATABASES)}, @@ -137,13 +148,16 @@ static SYMBOL symbols[] = { { "DEALLOCATE", SYM(DEALLOCATE_SYM)}, { "DEC", SYM(DECIMAL_SYM)}, { "DECIMAL", SYM(DECIMAL_SYM)}, + { "DECLARE", SYM(DECLARE_SYM)}, { "DEFAULT", SYM(DEFAULT)}, + { "DEFINER", SYM(DEFINER_SYM)}, { "DELAYED", SYM(DELAYED_SYM)}, { "DELAY_KEY_WRITE", SYM(DELAY_KEY_WRITE_SYM)}, { "DELETE", SYM(DELETE_SYM)}, { "DESC", SYM(DESC)}, { "DESCRIBE", SYM(DESCRIBE)}, { "DES_KEY_FILE", SYM(DES_KEY_FILE)}, + { "DETERMINISTIC", SYM(DETERMINISTIC_SYM)}, { "DIRECTORY", SYM(DIRECTORY_SYM)}, { "DISABLE", SYM(DISABLE_SYM)}, { "DISCARD", SYM(DISCARD)}, @@ -157,7 +171,9 @@ static SYMBOL symbols[] = { { "DUMPFILE", SYM(DUMPFILE)}, { "DUPLICATE", SYM(DUPLICATE_SYM)}, { "DYNAMIC", SYM(DYNAMIC_SYM)}, + { "EACH", SYM(EACH_SYM)}, { "ELSE", SYM(ELSE)}, + { "ELSEIF", SYM(ELSEIF_SYM)}, { "ENABLE", SYM(ENABLE_SYM)}, { "ENCLOSED", SYM(ENCLOSED)}, { "END", SYM(END)}, @@ -170,11 +186,13 @@ static SYMBOL symbols[] = { { "EVENTS", SYM(EVENTS_SYM)}, { "EXECUTE", SYM(EXECUTE_SYM)}, { "EXISTS", SYM(EXISTS)}, + { "EXIT", SYM(EXIT_SYM)}, { "EXPANSION", SYM(EXPANSION_SYM)}, { "EXPLAIN", SYM(DESCRIBE)}, { "EXTENDED", SYM(EXTENDED_SYM)}, { "FALSE", SYM(FALSE_SYM)}, { "FAST", SYM(FAST_SYM)}, + { "FETCH", SYM(FETCH_SYM)}, { "FIELDS", SYM(COLUMNS)}, { "FILE", SYM(FILE_SYM)}, { "FIRST", SYM(FIRST_SYM)}, @@ -186,10 +204,12 @@ static SYMBOL symbols[] = { { "FOR", SYM(FOR_SYM)}, { "FORCE", SYM(FORCE_SYM)}, { "FOREIGN", SYM(FOREIGN)}, + { "FOUND", SYM(FOUND_SYM)}, + { "FRAC_SECOND", SYM(FRAC_SECOND_SYM)}, { "FROM", SYM(FROM)}, { "FULL", SYM(FULL)}, { "FULLTEXT", SYM(FULLTEXT_SYM)}, - { "FUNCTION", SYM(UDF_SYM)}, + { "FUNCTION", SYM(FUNCTION_SYM)}, { "GEOMETRY", SYM(GEOMETRY_SYM)}, { "GEOMETRYCOLLECTION",SYM(GEOMETRYCOLLECTION)}, { "GET_FORMAT", SYM(GET_FORMAT)}, @@ -218,6 +238,8 @@ static SYMBOL symbols[] = { { "INNER", SYM(INNER_SYM)}, { "INNOBASE", SYM(INNOBASE_SYM)}, { "INNODB", SYM(INNOBASE_SYM)}, + { "INOUT", SYM(INOUT_SYM)}, + { "INSENSITIVE", SYM(INSENSITIVE_SYM)}, { "INSERT", SYM(INSERT)}, { "INSERT_METHOD", SYM(INSERT_METHOD)}, { "INT", SYM(INT_SYM)}, @@ -233,12 +255,16 @@ static SYMBOL symbols[] = { { "IS", SYM(IS)}, { "ISOLATION", SYM(ISOLATION)}, { "ISSUER", SYM(ISSUER_SYM)}, + { "ITERATE", SYM(ITERATE_SYM)}, + { "INVOKER", SYM(INVOKER_SYM)}, { "JOIN", SYM(JOIN_SYM)}, { "KEY", SYM(KEY_SYM)}, { "KEYS", SYM(KEYS)}, { "KILL", SYM(KILL_SYM)}, + { "LANGUAGE", SYM(LANGUAGE_SYM)}, { "LAST", SYM(LAST_SYM)}, { "LEADING", SYM(LEADING)}, + { "LEAVE", SYM(LEAVE_SYM)}, { "LEAVES", SYM(LEAVES)}, { "LEFT", SYM(LEFT)}, { "LEVEL", SYM(LEVEL_SYM)}, @@ -256,6 +282,7 @@ static SYMBOL symbols[] = { { "LONG", SYM(LONG_SYM)}, { "LONGBLOB", SYM(LONGBLOB)}, { "LONGTEXT", SYM(LONGTEXT)}, + { "LOOP", SYM(LOOP_SYM)}, { "LOW_PRIORITY", SYM(LOW_PRIORITY)}, { "MASTER", SYM(MASTER_SYM)}, { "MASTER_CONNECT_RETRY", SYM(MASTER_CONNECT_RETRY_SYM)}, @@ -277,23 +304,29 @@ static SYMBOL symbols[] = { { "MAX_QUERIES_PER_HOUR", SYM(MAX_QUERIES_PER_HOUR)}, { "MAX_ROWS", SYM(MAX_ROWS)}, { "MAX_UPDATES_PER_HOUR", SYM(MAX_UPDATES_PER_HOUR)}, + { "MAX_USER_CONNECTIONS", SYM(MAX_USER_CONNECTIONS_SYM)}, { "MEDIUM", SYM(MEDIUM_SYM)}, { "MEDIUMBLOB", SYM(MEDIUMBLOB)}, { "MEDIUMINT", SYM(MEDIUMINT)}, { "MEDIUMTEXT", SYM(MEDIUMTEXT)}, + { "MERGE", SYM(MERGE_SYM)}, { "MICROSECOND", SYM(MICROSECOND_SYM)}, { "MIDDLEINT", SYM(MEDIUMINT)}, /* For powerbuilder */ + { "MIGRATE", SYM(MIGRATE_SYM)}, { "MINUTE", SYM(MINUTE_SYM)}, { "MINUTE_MICROSECOND", SYM(MINUTE_MICROSECOND_SYM)}, { "MINUTE_SECOND", SYM(MINUTE_SECOND_SYM)}, { "MIN_ROWS", SYM(MIN_ROWS)}, { "MOD", SYM(MOD_SYM)}, { "MODE", SYM(MODE_SYM)}, + { "MODIFIES", SYM(MODIFIES_SYM)}, { "MODIFY", SYM(MODIFY_SYM)}, { "MONTH", SYM(MONTH_SYM)}, { "MULTILINESTRING", SYM(MULTILINESTRING)}, { "MULTIPOINT", SYM(MULTIPOINT)}, { "MULTIPOLYGON", SYM(MULTIPOLYGON)}, + { "MUTEX", SYM(MUTEX_SYM)}, + { "NAME", SYM(NAME_SYM)}, { "NAMES", SYM(NAMES_SYM)}, { "NATIONAL", SYM(NATIONAL_SYM)}, { "NATURAL", SYM(NATURAL)}, @@ -304,7 +337,7 @@ static SYMBOL symbols[] = { { "NEXT", SYM(NEXT_SYM)}, { "NO", SYM(NO_SYM)}, { "NONE", SYM(NONE_SYM)}, - { "NOT", SYM(NOT)}, + { "NOT", SYM(NOT_SYM)}, { "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)}, { "NULL", SYM(NULL_SYM)}, { "NUMERIC", SYM(NUMERIC_SYM)}, @@ -312,6 +345,7 @@ static SYMBOL symbols[] = { { "OFFSET", SYM(OFFSET_SYM)}, { "OLD_PASSWORD", SYM(OLD_PASSWORD)}, { "ON", SYM(ON)}, + { "ONE", SYM(ONE_SYM)}, { "ONE_SHOT", SYM(ONE_SHOT_SYM)}, { "OPEN", SYM(OPEN_SYM)}, { "OPTIMIZE", SYM(OPTIMIZE)}, @@ -319,11 +353,13 @@ static SYMBOL symbols[] = { { "OPTIONALLY", SYM(OPTIONALLY)}, { "OR", SYM(OR_SYM)}, { "ORDER", SYM(ORDER_SYM)}, + { "OUT", SYM(OUT_SYM)}, { "OUTER", SYM(OUTER)}, { "OUTFILE", SYM(OUTFILE)}, { "PACK_KEYS", SYM(PACK_KEYS_SYM)}, { "PARTIAL", SYM(PARTIAL)}, { "PASSWORD", SYM(PASSWORD)}, + { "PHASE", SYM(PHASE_SYM)}, { "POINT", SYM(POINT_SYM)}, { "POLYGON", SYM(POLYGON)}, { "PRECISION", SYM(PRECISION)}, @@ -335,6 +371,7 @@ static SYMBOL symbols[] = { { "PROCESS" , SYM(PROCESS)}, { "PROCESSLIST", SYM(PROCESSLIST_SYM)}, { "PURGE", SYM(PURGE)}, + { "QUARTER", SYM(QUARTER_SYM)}, { "QUERY", SYM(QUERY_SYM)}, { "QUICK", SYM(QUICK)}, { "RAID0", SYM(RAID_0_SYM)}, @@ -342,36 +379,48 @@ static SYMBOL symbols[] = { { "RAID_CHUNKSIZE", SYM(RAID_CHUNKSIZE)}, { "RAID_TYPE", SYM(RAID_TYPE)}, { "READ", SYM(READ_SYM)}, + { "READS", SYM(READS_SYM)}, { "REAL", SYM(REAL)}, + { "RECOVER", SYM(RECOVER_SYM)}, + { "REDUNDANT", SYM(REDUNDANT_SYM)}, { "REFERENCES", SYM(REFERENCES)}, { "REGEXP", SYM(REGEXP)}, { "RELAY_LOG_FILE", SYM(RELAY_LOG_FILE_SYM)}, { "RELAY_LOG_POS", SYM(RELAY_LOG_POS_SYM)}, { "RELAY_THREAD", SYM(RELAY_THREAD)}, + { "RELEASE", SYM(RELEASE_SYM)}, { "RELOAD", SYM(RELOAD)}, { "RENAME", SYM(RENAME)}, { "REPAIR", SYM(REPAIR)}, { "REPEATABLE", SYM(REPEATABLE_SYM)}, { "REPLACE", SYM(REPLACE)}, { "REPLICATION", SYM(REPLICATION)}, + { "REPEAT", SYM(REPEAT_SYM)}, { "REQUIRE", SYM(REQUIRE_SYM)}, { "RESET", SYM(RESET_SYM)}, { "RESTORE", SYM(RESTORE_SYM)}, { "RESTRICT", SYM(RESTRICT)}, - { "RETURNS", SYM(UDF_RETURNS_SYM)}, + { "RESUME", SYM(RESUME_SYM)}, + { "RETURN", SYM(RETURN_SYM)}, + { "RETURNS", SYM(RETURNS_SYM)}, { "REVOKE", SYM(REVOKE)}, { "RIGHT", SYM(RIGHT)}, { "RLIKE", SYM(REGEXP)}, /* Like in mSQL2 */ { "ROLLBACK", SYM(ROLLBACK_SYM)}, { "ROLLUP", SYM(ROLLUP_SYM)}, + { "ROUTINE", SYM(ROUTINE_SYM)}, { "ROW", SYM(ROW_SYM)}, { "ROWS", SYM(ROWS_SYM)}, { "ROW_FORMAT", SYM(ROW_FORMAT_SYM)}, { "RTREE", SYM(RTREE_SYM)}, { "SAVEPOINT", SYM(SAVEPOINT_SYM)}, + { "SCHEMA", SYM(DATABASE)}, + { "SCHEMAS", SYM(DATABASES)}, { "SECOND", SYM(SECOND_SYM)}, { "SECOND_MICROSECOND", SYM(SECOND_MICROSECOND_SYM)}, + { "SECURITY", SYM(SECURITY_SYM)}, { "SELECT", SYM(SELECT_SYM)}, + { "SENSITIVE", SYM(SENSITIVE_SYM)}, { "SEPARATOR", SYM(SEPARATOR_SYM)}, { "SERIAL", SYM(SERIAL_SYM)}, { "SERIALIZABLE", SYM(SERIALIZABLE_SYM)}, @@ -389,6 +438,11 @@ static SYMBOL symbols[] = { { "SONAME", SYM(UDF_SONAME_SYM)}, { "SOUNDS", SYM(SOUNDS_SYM)}, { "SPATIAL", SYM(SPATIAL_SYM)}, + { "SPECIFIC", SYM(SPECIFIC_SYM)}, + { "SQL", SYM(SQL_SYM)}, + { "SQLEXCEPTION", SYM(SQLEXCEPTION_SYM)}, + { "SQLSTATE", SYM(SQLSTATE_SYM)}, + { "SQLWARNING", SYM(SQLWARNING_SYM)}, { "SQL_BIG_RESULT", SYM(SQL_BIG_RESULT)}, { "SQL_BUFFER_RESULT", SYM(SQL_BUFFER_RESULT)}, { "SQL_CACHE", SYM(SQL_CACHE_SYM)}, @@ -396,6 +450,15 @@ static SYMBOL symbols[] = { { "SQL_NO_CACHE", SYM(SQL_NO_CACHE_SYM)}, { "SQL_SMALL_RESULT", SYM(SQL_SMALL_RESULT)}, { "SQL_THREAD", SYM(SQL_THREAD)}, + { "SQL_TSI_FRAC_SECOND", SYM(FRAC_SECOND_SYM)}, + { "SQL_TSI_SECOND", SYM(SECOND_SYM)}, + { "SQL_TSI_MINUTE", SYM(MINUTE_SYM)}, + { "SQL_TSI_HOUR", SYM(HOUR_SYM)}, + { "SQL_TSI_DAY", SYM(DAY_SYM)}, + { "SQL_TSI_WEEK", SYM(WEEK_SYM)}, + { "SQL_TSI_MONTH", SYM(MONTH_SYM)}, + { "SQL_TSI_QUARTER", SYM(QUARTER_SYM)}, + { "SQL_TSI_YEAR", SYM(YEAR_SYM)}, { "SSL", SYM(SSL_SYM)}, { "START", SYM(START_SYM)}, { "STARTING", SYM(STARTING)}, @@ -407,33 +470,43 @@ static SYMBOL symbols[] = { { "STRIPED", SYM(RAID_STRIPED_SYM)}, { "SUBJECT", SYM(SUBJECT_SYM)}, { "SUPER", SYM(SUPER_SYM)}, + { "SUSPEND", SYM(SUSPEND_SYM)}, { "TABLE", SYM(TABLE_SYM)}, { "TABLES", SYM(TABLES)}, { "TABLESPACE", SYM(TABLESPACE)}, { "TEMPORARY", SYM(TEMPORARY)}, + { "TEMPTABLE", SYM(TEMPTABLE_SYM)}, { "TERMINATED", SYM(TERMINATED)}, { "TEXT", SYM(TEXT_SYM)}, { "THEN", SYM(THEN_SYM)}, { "TIME", SYM(TIME_SYM)}, { "TIMESTAMP", SYM(TIMESTAMP)}, + { "TIMESTAMPADD", SYM(TIMESTAMP_ADD)}, + { "TIMESTAMPDIFF", SYM(TIMESTAMP_DIFF)}, { "TINYBLOB", SYM(TINYBLOB)}, { "TINYINT", SYM(TINYINT)}, { "TINYTEXT", SYM(TINYTEXT)}, { "TO", SYM(TO_SYM)}, { "TRAILING", SYM(TRAILING)}, { "TRANSACTION", SYM(TRANSACTION_SYM)}, + { "TRIGGER", SYM(TRIGGER_SYM)}, + { "TRIGGERS", SYM(TRIGGERS_SYM)}, { "TRUE", SYM(TRUE_SYM)}, { "TRUNCATE", SYM(TRUNCATE_SYM)}, { "TYPE", SYM(TYPE_SYM)}, { "TYPES", SYM(TYPES_SYM)}, { "UNCOMMITTED", SYM(UNCOMMITTED_SYM)}, + { "UNDEFINED", SYM(UNDEFINED_SYM)}, + { "UNDO", SYM(UNDO_SYM)}, { "UNICODE", SYM(UNICODE_SYM)}, { "UNION", SYM(UNION_SYM)}, { "UNIQUE", SYM(UNIQUE_SYM)}, + { "UNKNOWN", SYM(UNKNOWN_SYM)}, { "UNLOCK", SYM(UNLOCK_SYM)}, { "UNSIGNED", SYM(UNSIGNED)}, { "UNTIL", SYM(UNTIL_SYM)}, { "UPDATE", SYM(UPDATE_SYM)}, + { "UPGRADE", SYM(UPGRADE_SYM)}, { "USAGE", SYM(USAGE)}, { "USE", SYM(USE_SYM)}, { "USER", SYM(USER)}, @@ -451,17 +524,21 @@ static SYMBOL symbols[] = { { "VARIABLES", SYM(VARIABLES)}, { "VARYING", SYM(VARYING)}, { "WARNINGS", SYM(WARNINGS)}, + { "WEEK", SYM(WEEK_SYM)}, { "WHEN", SYM(WHEN_SYM)}, { "WHERE", SYM(WHERE)}, + { "WHILE", SYM(WHILE_SYM)}, + { "VIEW", SYM(VIEW_SYM)}, { "WITH", SYM(WITH)}, { "WORK", SYM(WORK_SYM)}, { "WRITE", SYM(WRITE_SYM)}, { "X509", SYM(X509_SYM)}, { "XOR", SYM(XOR)}, + { "XA", SYM(XA_SYM)}, { "YEAR", SYM(YEAR_SYM)}, { "YEAR_MONTH", SYM(YEAR_MONTH_SYM)}, { "ZEROFILL", SYM(ZEROFILL)}, - { "||", SYM(OR_OR_CONCAT)} + { "||", SYM(OR_OR_SYM)} }; @@ -499,7 +576,6 @@ static SYMBOL sql_functions[] = { { "CONCAT", SYM(CONCAT)}, { "CONCAT_WS", SYM(CONCAT_WS)}, { "CONNECTION_ID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_connection_id)}, - { "CONTAINS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_contains)}, { "CONV", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_conv)}, { "CONVERT_TZ", SYM(CONVERT_TZ_SYM)}, { "COUNT", SYM(COUNT_SYM)}, @@ -614,6 +690,7 @@ static SYMBOL sql_functions[] = { { "MULTIPOINTFROMWKB",SYM(GEOMFROMWKB)}, { "MULTIPOLYGONFROMTEXT",SYM(MPOLYFROMTEXT)}, { "MULTIPOLYGONFROMWKB",SYM(GEOMFROMWKB)}, + { "NAME_CONST", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_name_const)}, { "NOW", SYM(NOW_SYM)}, { "NULLIF", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_nullif)}, { "NUMGEOMETRIES", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_numgeometries)}, @@ -636,14 +713,13 @@ static SYMBOL sql_functions[] = { { "POSITION", SYM(POSITION_SYM)}, { "POW", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, { "POWER", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, - { "QUARTER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quarter)}, { "QUOTE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quote)}, { "RADIANS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_radians)}, { "RAND", SYM(RAND)}, { "RELEASE_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_release_lock)}, - { "REPEAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_repeat)}, { "REVERSE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_reverse)}, { "ROUND", SYM(ROUND)}, + { "ROW_COUNT", SYM(ROW_COUNT_SYM)}, { "RPAD", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_rpad)}, { "RTRIM", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_rtrim)}, { "SEC_TO_TIME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sec_to_time)}, @@ -653,6 +729,7 @@ static SYMBOL sql_functions[] = { { "SIN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sin)}, { "SHA", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)}, { "SHA1", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sha)}, + { "SLEEP", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sleep)}, { "SOUNDEX", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_soundex)}, { "SPACE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_space)}, { "SQRT", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sqrt)}, @@ -660,6 +737,8 @@ static SYMBOL sql_functions[] = { { "STARTPOINT", F_SYM(FUNC_ARG1),0,CREATE_FUNC_GEOM(create_func_startpoint)}, { "STD", SYM(STD_SYM)}, { "STDDEV", SYM(STD_SYM)}, + { "STDDEV_POP", SYM(STD_SYM)}, + { "STDDEV_SAMP", SYM(STDDEV_SAMP_SYM)}, { "STR_TO_DATE", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_str_to_date)}, { "STRCMP", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_strcmp)}, { "SUBSTR", SYM(SUBSTRING)}, @@ -667,7 +746,7 @@ static SYMBOL sql_functions[] = { { "SUBSTRING_INDEX", SYM(SUBSTRING_INDEX)}, { "SUBTIME", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_subtime)}, { "SUM", SYM(SUM_SYM)}, - { "SYSDATE", SYM(NOW_SYM)}, + { "SYSDATE", SYM(SYSDATE)}, { "SYSTEM_USER", SYM(USER)}, { "TAN", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_tan)}, { "TIME_FORMAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_time_format)}, @@ -685,8 +764,9 @@ static SYMBOL sql_functions[] = { { "UPPER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_ucase)}, { "UUID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_uuid)}, { "VARIANCE", SYM(VARIANCE_SYM)}, + { "VAR_POP", SYM(VARIANCE_SYM)}, + { "VAR_SAMP", SYM(VAR_SAMP_SYM)}, { "VERSION", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_version)}, - { "WEEK", SYM(WEEK_SYM)}, { "WEEKDAY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekday)}, { "WEEKOFYEAR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekofyear)}, { "WITHIN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_within)}, diff --git a/sql/lex_symbol.h b/sql/lex_symbol.h index 3074a489b6a..c87cdb4ec43 100644 --- a/sql/lex_symbol.h +++ b/sql/lex_symbol.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -26,7 +25,7 @@ typedef struct st_symbol { const char *name; uint tok; uint length; - void *create_func; + void (*create_func)(); struct st_sym_group *group; } SYMBOL; diff --git a/sql/lock.cc b/sql/lock.cc index 3b2b2857f65..bf1512b754c 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -81,7 +80,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count, static void reset_lock_data(MYSQL_LOCK *sql_lock); static int lock_external(THD *thd, TABLE **table,uint count); static int unlock_external(THD *thd, TABLE **table,uint count); -static void print_lock_error(int error); +static void print_lock_error(int error, const char *); /* Lock tables. @@ -94,18 +93,33 @@ static void print_lock_error(int error); flags Options: MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK Ignore a global read lock MYSQL_LOCK_IGNORE_FLUSH Ignore a flush tables. + MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN Instead of reopening altered + or dropped tables by itself, + mysql_lock_tables() should + notify upper level and rely + on caller doing this. + need_reopen Out parameter, TRUE if some tables were altered + or deleted and should be reopened by caller. RETURN A lock structure pointer on success. - NULL on error. + NULL on error or if some tables should be reopen. */ -MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags) +/* Map the return value of thr_lock to an error from errmsg.txt */ +static int thr_lock_errno_to_mysql[]= +{ 0, 1, ER_LOCK_WAIT_TIMEOUT, ER_LOCK_DEADLOCK }; + +MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, + uint flags, bool *need_reopen) { MYSQL_LOCK *sql_lock; TABLE *write_lock_used; + int rc; DBUG_ENTER("mysql_lock_tables"); + *need_reopen= FALSE; + for (;;) { if (! (sql_lock= get_lock_data(thd, tables, count, GET_LOCK_STORE_LOCKS, @@ -143,7 +157,6 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags) reset_lock_data(sql_lock); my_free((gptr) sql_lock,MYF(0)); sql_lock=0; - thd->proc_info=0; break; } thd->proc_info="Table lock"; @@ -152,11 +165,21 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags) memcpy(sql_lock->locks + sql_lock->lock_count, sql_lock->locks, sql_lock->lock_count * sizeof(*sql_lock->locks)); /* Lock on the copied half of the lock data array. */ - if (thr_multi_lock(sql_lock->locks + sql_lock->lock_count, - sql_lock->lock_count)) + rc= thr_lock_errno_to_mysql[(int) thr_multi_lock(sql_lock->locks + + sql_lock->lock_count, + sql_lock->lock_count, + thd->lock_id)]; + if (rc > 1) /* a timeout or a deadlock */ + { + my_error(rc, MYF(0)); + my_free((gptr) sql_lock,MYF(0)); + sql_lock= 0; + break; + } + else if (rc == 1) /* aborted */ { thd->some_tables_deleted=1; // Try again - sql_lock->lock_count=0; // Locks are alread freed + sql_lock->lock_count= 0; // Locks are already freed } else if (!thd->some_tables_deleted || (flags & MYSQL_LOCK_IGNORE_FLUSH)) { @@ -177,13 +200,18 @@ MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **tables, uint count, uint flags) thd->locked=0; retry: sql_lock=0; + if (flags & MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN) + { + *need_reopen= TRUE; + break; + } if (wait_for_tables(thd)) break; // Couldn't open tables } thd->proc_info=0; if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); if (sql_lock) { mysql_unlock_tables(thd,sql_lock); @@ -213,12 +241,12 @@ static int lock_external(THD *thd, TABLE **tables, uint count) if ((error=(*tables)->file->external_lock(thd,lock_type))) { + print_lock_error(error, (*tables)->file->table_type()); for (; i-- ; tables--) { (*tables)->file->external_lock(thd, F_UNLCK); (*tables)->current_lock=F_UNLCK; } - print_lock_error(error); DBUG_RETURN(error); } else @@ -476,8 +504,8 @@ MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b) SYNOPSIS mysql_lock_have_duplicate() thd The current thread. - table The table to check for duplicate lock. - tables The list of tables to search for the dup lock. + needle The table to check for duplicate lock. + haystack The list of tables to search for the dup lock. NOTE This is mainly meant for MERGE tables in INSERT ... SELECT @@ -490,28 +518,38 @@ MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b) both functions should be checked. RETURN - 1 A table from 'tables' matches a lock on 'table'. - 0 No duplicate lock found. + NULL No duplicate lock found. + ! NULL First table from 'haystack' that matches a lock on 'needle'. */ -int mysql_lock_have_duplicate(THD *thd, TABLE *table, TABLE_LIST *tables) +TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, + TABLE_LIST *haystack) { MYSQL_LOCK *mylock; TABLE **lock_tables; + TABLE *table; TABLE *table2; - THR_LOCK_DATA **lock_locks, **table_lock_data; + THR_LOCK_DATA **lock_locks; + THR_LOCK_DATA **table_lock_data; THR_LOCK_DATA **end_data; THR_LOCK_DATA **lock_data2; THR_LOCK_DATA **end_data2; DBUG_ENTER("mysql_lock_have_duplicate"); + /* + Table may not be defined for derived or view tables. + Table may not be part of a lock for delayed operations. + */ + if (! (table= needle->table) || ! table->lock_count) + goto end; + /* A temporary table does not have locks. */ - if (table->tmp_table == TMP_TABLE) + if (table->s->tmp_table == TMP_TABLE) goto end; - /* Get command lock or LOCK TABLES lock. */ - mylock= thd->lock ? thd->lock : thd->locked_tables; - DBUG_ASSERT(mylock); + /* Get command lock or LOCK TABLES lock. Maybe empty for INSERT DELAYED. */ + if (! (mylock= thd->lock ? thd->lock : thd->locked_tables)) + goto end; /* If we have less than two tables, we cannot have duplicates. */ if (mylock->table_count < 2) @@ -521,18 +559,22 @@ int mysql_lock_have_duplicate(THD *thd, TABLE *table, TABLE_LIST *tables) lock_tables= mylock->table; /* Prepare table related variables that don't change in loop. */ - DBUG_ASSERT(table == lock_tables[table->lock_position]); + DBUG_ASSERT((table->lock_position < mylock->table_count) && + (table == lock_tables[table->lock_position])); table_lock_data= lock_locks + table->lock_data_start; end_data= table_lock_data + table->lock_count; - for (; tables; tables= tables->next) + for (; haystack; haystack= haystack->next_global) { - table2= tables->table; - if (table2->tmp_table == TMP_TABLE || table == table2) + if (haystack->placeholder()) + continue; + table2= haystack->table; + if (table2->s->tmp_table == TMP_TABLE) continue; /* All tables in list must be in lock. */ - DBUG_ASSERT(table2 == lock_tables[table2->lock_position]); + DBUG_ASSERT((table2->lock_position < mylock->table_count) && + (table2 == lock_tables[table2->lock_position])); for (lock_data2= lock_locks + table2->lock_data_start, end_data2= lock_data2 + table2->lock_count; @@ -547,13 +589,17 @@ int mysql_lock_have_duplicate(THD *thd, TABLE *table, TABLE_LIST *tables) lock_data++) { if ((*lock_data)->lock == lock2) - DBUG_RETURN(1); + { + DBUG_PRINT("info", ("haystack match: '%s'", haystack->table_name)); + DBUG_RETURN(haystack); + } } } } end: - DBUG_RETURN(0); + DBUG_PRINT("info", ("no duplicate found")); + DBUG_RETURN(NULL); } @@ -571,12 +617,13 @@ static int unlock_external(THD *thd, TABLE **table,uint count) { (*table)->current_lock = F_UNLCK; if ((error=(*table)->file->external_lock(thd, F_UNLCK))) + { error_code=error; + print_lock_error(error_code, (*table)->file->table_type()); + } } table++; } while (--count); - if (error_code) - print_lock_error(error_code); DBUG_RETURN(error_code); } @@ -608,11 +655,24 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, *write_lock_used=0; for (i=tables=lock_count=0 ; i < count ; i++) { - if (table_ptr[i]->tmp_table != TMP_TABLE) + if (table_ptr[i]->s->tmp_table != TMP_TABLE) { tables+=table_ptr[i]->file->lock_count(); lock_count++; } + /* + To be able to open and lock for reading system tables like 'mysql.proc', + when we already have some tables opened and locked, and avoid deadlocks + we have to disallow write-locking of these tables with any other tables. + */ + if (table_ptr[i]->s->system_table && + table_ptr[i]->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE && + count != 1) + { + my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db, + table_ptr[i]->s->table_name); + return 0; + } } /* @@ -637,7 +697,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, TABLE *table; enum thr_lock_type lock_type; - if ((table=table_ptr[i])->tmp_table == TMP_TABLE) + if ((table=table_ptr[i])->s->tmp_table == TMP_TABLE) continue; lock_type= table->reginfo.lock_type; if (lock_type >= TL_WRITE_ALLOW_WRITE) @@ -645,7 +705,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, *write_lock_used=table; if (table->db_stat & HA_READ_ONLY) { - my_error(ER_OPEN_AS_READONLY,MYF(0),table->table_name); + my_error(ER_OPEN_AS_READONLY,MYF(0),table->alias); /* Clear the lock type of the lock data that are stored already. */ sql_lock->lock_count= locks - sql_lock->locks; reset_lock_data(sql_lock); @@ -793,15 +853,17 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) TABLE *table; char key[MAX_DBKEY_LENGTH]; char *db= table_list->db; + int table_in_key_offset; uint key_length; HASH_SEARCH_STATE state; DBUG_ENTER("lock_table_name"); - DBUG_PRINT("enter",("db: %s name: %s", db, table_list->real_name)); + DBUG_PRINT("enter",("db: %s name: %s", db, table_list->table_name)); safe_mutex_assert_owner(&LOCK_open); - key_length=(uint) (strmov(strmov(key,db)+1,table_list->real_name) - -key)+ 1; + table_in_key_offset= strmov(key, db) - key + 1; + key_length= (uint)(strmov(key + table_in_key_offset, table_list->table_name) + - key) + 1; /* Only insert the table if we haven't insert it already */ @@ -819,8 +881,11 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) if (!(table= (TABLE*) my_malloc(sizeof(*table)+key_length, MYF(MY_WME | MY_ZEROFILL)))) DBUG_RETURN(-1); - memcpy((table->table_cache_key= (char*) (table+1)), key, key_length); - table->key_length=key_length; + table->s= &table->share_not_to_be_used; + memcpy((table->s->table_cache_key= (char*) (table+1)), key, key_length); + table->s->db= table->s->table_cache_key; + table->s->table_name= table->s->table_cache_key + table_in_key_offset; + table->s->key_length=key_length; table->in_use=thd; table->locked_by_name=1; table_list->table=table; @@ -831,11 +896,9 @@ int lock_table_name(THD *thd, TABLE_LIST *table_list) DBUG_RETURN(-1); } - if (remove_table_from_cache(thd, db, table_list->real_name, RTFC_NO_FLAG)) - { - DBUG_RETURN(1); // Table is in use - } - DBUG_RETURN(0); + /* Return 1 if table is in use */ + DBUG_RETURN(test(remove_table_from_cache(thd, db, table_list->table_name, + RTFC_NO_FLAG))); } @@ -844,14 +907,14 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list) if (table_list->table) { hash_delete(&open_cache, (byte*) table_list->table); - (void) pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); } } static bool locked_named_table(THD *thd, TABLE_LIST *table_list) { - for (; table_list ; table_list=table_list->next) + for (; table_list ; table_list=table_list->next_local) { if (table_list->table && table_is_used(table_list->table,0)) return 1; @@ -905,7 +968,7 @@ bool lock_table_names(THD *thd, TABLE_LIST *table_list) bool got_all_locks=1; TABLE_LIST *lock_table; - for (lock_table=table_list ; lock_table ; lock_table=lock_table->next) + for (lock_table= table_list; lock_table; lock_table= lock_table->next_local) { int got_lock; if ((got_lock=lock_table_name(thd,lock_table)) < 0) @@ -936,9 +999,9 @@ end: (default 0, which will unlock all tables) NOTES - One must have a lock on LOCK_open when calling this - This function will send a COND_refresh signal to inform other threads - that the name locks are removed + One must have a lock on LOCK_open when calling this. + This function will broadcast refresh signals to inform other threads + that the name locks are removed. RETURN 0 ok @@ -948,13 +1011,15 @@ end: void unlock_table_names(THD *thd, TABLE_LIST *table_list, TABLE_LIST *last_table) { - for (TABLE_LIST *table=table_list ; table != last_table ; table=table->next) + for (TABLE_LIST *table= table_list; + table != last_table; + table= table->next_local) unlock_table_name(thd,table); - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); } -static void print_lock_error(int error) +static void print_lock_error(int error, const char *table) { int textno; DBUG_ENTER("print_lock_error"); @@ -966,11 +1031,22 @@ static void print_lock_error(int error) case HA_ERR_READ_ONLY_TRANSACTION: textno=ER_READ_ONLY_TRANSACTION; break; + case HA_ERR_LOCK_DEADLOCK: + textno=ER_LOCK_DEADLOCK; + break; + case HA_ERR_WRONG_COMMAND: + textno=ER_ILLEGAL_HA; + break; default: textno=ER_CANT_LOCK; break; } - my_error(textno,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error); + + if ( textno == ER_ILLEGAL_HA ) + my_error(textno, MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG), table); + else + my_error(textno, MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG), error); + DBUG_VOID_RETURN; } @@ -994,6 +1070,16 @@ static void print_lock_error(int error) protect_against_global_read_lock count of threads which have set protection against global read lock. + access to them is protected with a mutex LOCK_global_read_lock + + (XXX: one should never take LOCK_open if LOCK_global_read_lock is + taken, otherwise a deadlock may occur. Other mutexes could be a + problem too - grep the code for global_read_lock if you want to use + any other mutex here) Also one must not hold LOCK_open when calling + wait_if_global_read_lock(). When the thread with the global read lock + tries to close its tables, it needs to take LOCK_open in + close_thread_table(). + How blocking of threads by global read lock is achieved: that's advisory. Any piece of code which should be blocked by global read lock must be designed like this: @@ -1031,17 +1117,13 @@ static void print_lock_error(int error) table instance of thd2 thd1: COMMIT; # blocked by thd3. thd1 blocks thd2 which blocks thd3 which blocks thd1: deadlock. - + Note that we need to support that one thread does FLUSH TABLES WITH READ LOCK; and then COMMIT; (that's what innobackup does, for some good reason). So in this exceptional case the COMMIT should not be blocked by the FLUSH TABLES WITH READ LOCK. - TODO in MySQL 5.x: make_global_read_lock_block_commit() should be - killable. Normally CPU does not spend a long time in this function (COMMITs - are quite fast), but it would still be nice. - ****************************************************************************/ volatile uint global_read_lock=0; @@ -1058,16 +1140,17 @@ bool lock_global_read_lock(THD *thd) if (!thd->global_read_lock) { - (void) pthread_mutex_lock(&LOCK_open); - const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open, - "Waiting to get readlock"); + const char *old_message; + (void) pthread_mutex_lock(&LOCK_global_read_lock); + old_message=thd->enter_cond(&COND_global_read_lock, &LOCK_global_read_lock, + "Waiting to get readlock"); DBUG_PRINT("info", ("waiting_for: %d protect_against: %d", waiting_for_read_lock, protect_against_global_read_lock)); waiting_for_read_lock++; while (protect_against_global_read_lock && !thd->killed) - pthread_cond_wait(&COND_refresh, &LOCK_open); + pthread_cond_wait(&COND_global_read_lock, &LOCK_global_read_lock); waiting_for_read_lock--; if (thd->killed) { @@ -1076,7 +1159,7 @@ bool lock_global_read_lock(THD *thd) } thd->global_read_lock= GOT_GLOBAL_READ_LOCK; global_read_lock++; - thd->exit_cond(old_message); + thd->exit_cond(old_message); // this unlocks LOCK_global_read_lock } /* We DON'T set global_read_lock_blocks_commit now, it will be set after @@ -1089,18 +1172,29 @@ bool lock_global_read_lock(THD *thd) DBUG_RETURN(0); } + void unlock_global_read_lock(THD *thd) { uint tmp; - pthread_mutex_lock(&LOCK_open); + DBUG_ENTER("unlock_global_read_lock"); + DBUG_PRINT("info", + ("global_read_lock: %u global_read_lock_blocks_commit: %u", + global_read_lock, global_read_lock_blocks_commit)); + + pthread_mutex_lock(&LOCK_global_read_lock); tmp= --global_read_lock; if (thd->global_read_lock == MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT) --global_read_lock_blocks_commit; - pthread_mutex_unlock(&LOCK_open); + pthread_mutex_unlock(&LOCK_global_read_lock); /* Send the signal outside the mutex to avoid a context switch */ if (!tmp) - pthread_cond_broadcast(&COND_refresh); + { + DBUG_PRINT("signal", ("Broadcasting COND_global_read_lock")); + pthread_cond_broadcast(&COND_global_read_lock); + } thd->global_read_lock= 0; + + DBUG_VOID_RETURN; } #define must_wait (global_read_lock && \ @@ -1115,14 +1209,22 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, DBUG_ENTER("wait_if_global_read_lock"); LINT_INIT(old_message); - (void) pthread_mutex_lock(&LOCK_open); + /* + Assert that we do not own LOCK_open. If we would own it, other + threads could not close their tables. This would make a pretty + deadlock. + */ + safe_mutex_assert_not_owner(&LOCK_open); + + (void) pthread_mutex_lock(&LOCK_global_read_lock); if ((need_exit_cond= must_wait)) { if (thd->global_read_lock) // This thread had the read locks { if (is_not_commit) - my_error(ER_CANT_UPDATE_WITH_READLOCK,MYF(0)); - (void) pthread_mutex_unlock(&LOCK_open); + my_message(ER_CANT_UPDATE_WITH_READLOCK, + ER(ER_CANT_UPDATE_WITH_READLOCK), MYF(0)); + (void) pthread_mutex_unlock(&LOCK_global_read_lock); /* We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does. This allowance is needed to not break existing versions of innobackup @@ -1130,11 +1232,15 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, */ DBUG_RETURN(is_not_commit); } - old_message=thd->enter_cond(&COND_refresh, &LOCK_open, + old_message=thd->enter_cond(&COND_global_read_lock, &LOCK_global_read_lock, "Waiting for release of readlock"); while (must_wait && ! thd->killed && (!abort_on_refresh || thd->version == refresh_version)) - (void) pthread_cond_wait(&COND_refresh,&LOCK_open); + { + DBUG_PRINT("signal", ("Waiting for COND_global_read_lock")); + (void) pthread_cond_wait(&COND_global_read_lock, &LOCK_global_read_lock); + DBUG_PRINT("signal", ("Got COND_global_read_lock")); + } if (thd->killed) result=1; } @@ -1144,10 +1250,10 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, The following is only true in case of a global read locks (which is rare) and if old_message is set */ - if (unlikely(need_exit_cond)) - thd->exit_cond(old_message); + if (unlikely(need_exit_cond)) + thd->exit_cond(old_message); // this unlocks LOCK_global_read_lock else - pthread_mutex_unlock(&LOCK_open); + pthread_mutex_unlock(&LOCK_global_read_lock); DBUG_RETURN(result); } @@ -1158,31 +1264,79 @@ void start_waiting_global_read_lock(THD *thd) DBUG_ENTER("start_waiting_global_read_lock"); if (unlikely(thd->global_read_lock)) DBUG_VOID_RETURN; - (void) pthread_mutex_lock(&LOCK_open); + (void) pthread_mutex_lock(&LOCK_global_read_lock); tmp= (!--protect_against_global_read_lock && (waiting_for_read_lock || global_read_lock_blocks_commit)); - (void) pthread_mutex_unlock(&LOCK_open); + (void) pthread_mutex_unlock(&LOCK_global_read_lock); if (tmp) - pthread_cond_broadcast(&COND_refresh); + pthread_cond_broadcast(&COND_global_read_lock); DBUG_VOID_RETURN; } -void make_global_read_lock_block_commit(THD *thd) +bool make_global_read_lock_block_commit(THD *thd) { + bool error; + const char *old_message; + DBUG_ENTER("make_global_read_lock_block_commit"); /* If we didn't succeed lock_global_read_lock(), or if we already suceeded make_global_read_lock_block_commit(), do nothing. */ if (thd->global_read_lock != GOT_GLOBAL_READ_LOCK) - return; - pthread_mutex_lock(&LOCK_open); + DBUG_RETURN(0); + pthread_mutex_lock(&LOCK_global_read_lock); /* increment this BEFORE waiting on cond (otherwise race cond) */ global_read_lock_blocks_commit++; - while (protect_against_global_read_lock) - pthread_cond_wait(&COND_refresh, &LOCK_open); - pthread_mutex_unlock(&LOCK_open); - thd->global_read_lock= MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT; + /* For testing we set up some blocking, to see if we can be killed */ + DBUG_EXECUTE_IF("make_global_read_lock_block_commit_loop", + protect_against_global_read_lock++;); + old_message= thd->enter_cond(&COND_global_read_lock, &LOCK_global_read_lock, + "Waiting for all running commits to finish"); + while (protect_against_global_read_lock && !thd->killed) + pthread_cond_wait(&COND_global_read_lock, &LOCK_global_read_lock); + DBUG_EXECUTE_IF("make_global_read_lock_block_commit_loop", + protect_against_global_read_lock--;); + if ((error= test(thd->killed))) + global_read_lock_blocks_commit--; // undo what we did + else + thd->global_read_lock= MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT; + thd->exit_cond(old_message); // this unlocks LOCK_global_read_lock + DBUG_RETURN(error); +} + + +/* + Broadcast COND_refresh and COND_global_read_lock. + + SYNOPSIS + broadcast_refresh() + void No parameters. + + DESCRIPTION + Due to a bug in a threading library it could happen that a signal + did not reach its target. A condition for this was that the same + condition variable was used with different mutexes in + pthread_cond_wait(). Some time ago we changed LOCK_open to + LOCK_global_read_lock in global read lock handling. So COND_refresh + was used with LOCK_open and LOCK_global_read_lock. + + We did now also change from COND_refresh to COND_global_read_lock + in global read lock handling. But now it is necessary to signal + both conditions at the same time. + + NOTE + When signalling COND_global_read_lock within the global read lock + handling, it is not necessary to also signal COND_refresh. + + RETURN + void +*/ + +void broadcast_refresh(void) +{ + VOID(pthread_cond_broadcast(&COND_refresh)); + VOID(pthread_cond_broadcast(&COND_global_read_lock)); } diff --git a/sql/log.cc b/sql/log.cc index b91ec2b3dee..7d0bef5ca2c 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -24,7 +23,6 @@ #include "mysql_priv.h" #include "sql_repl.h" -#include "ha_innodb.h" // necessary to cut the binlog when crash recovery #include <my_dir.h> #include <stdarg.h> @@ -34,28 +32,270 @@ #include "message.h" #endif -MYSQL_LOG mysql_log,mysql_update_log,mysql_slow_log,mysql_bin_log; +MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log; ulong sync_binlog_counter= 0; +static Muted_query_log_event invisible_commit; + static bool test_if_number(const char *str, long *res, bool allow_wildcards); +static bool binlog_init(); +static int binlog_close_connection(THD *thd); +static int binlog_savepoint_set(THD *thd, void *sv); +static int binlog_savepoint_rollback(THD *thd, void *sv); +static int binlog_commit(THD *thd, bool all); +static int binlog_rollback(THD *thd, bool all); +static int binlog_prepare(THD *thd, bool all); + +handlerton binlog_hton = { + "binlog", + SHOW_OPTION_YES, + "This is a meta storage engine to represent the binlog in a transaction", + DB_TYPE_UNKNOWN, /* IGNORE for now */ + binlog_init, + 0, + sizeof(my_off_t), /* savepoint size = binlog offset */ + binlog_close_connection, + binlog_savepoint_set, + binlog_savepoint_rollback, + NULL, /* savepoint_release */ + binlog_commit, + binlog_rollback, + binlog_prepare, + NULL, /* recover */ + NULL, /* commit_by_xid */ + NULL, /* rollback_by_xid */ + NULL, /* create_cursor_read_view */ + NULL, /* set_cursor_read_view */ + NULL, /* close_cursor_read_view */ + HTON_HIDDEN +}; + +/* + this function is mostly a placeholder. + conceptually, binlog initialization (now mostly done in MYSQL_LOG::open) + should be moved here. +*/ + +bool binlog_init() +{ + return !opt_bin_log; +} + +static int binlog_close_connection(THD *thd) +{ + IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot]; + DBUG_ASSERT(mysql_bin_log.is_open() && !my_b_tell(trans_log)); + close_cached_file(trans_log); + my_free((gptr)trans_log, MYF(0)); + return 0; +} + +static int binlog_end_trans(THD *thd, IO_CACHE *trans_log, Log_event *end_ev) +{ + int error=0; + DBUG_ENTER("binlog_end_trans"); + + /* NULL denotes ROLLBACK with nothing to replicate */ + if (end_ev != NULL) + error= mysql_bin_log.write(thd, trans_log, end_ev); + + statistic_increment(binlog_cache_use, &LOCK_status); + if (trans_log->disk_writes != 0) + { + statistic_increment(binlog_cache_disk_use, &LOCK_status); + trans_log->disk_writes= 0; + } + reinit_io_cache(trans_log, WRITE_CACHE, (my_off_t) 0, 0, 1); // cannot fail + trans_log->end_of_file= max_binlog_cache_size; + DBUG_RETURN(error); +} + +static int binlog_prepare(THD *thd, bool all) +{ + /* + do nothing. + just pretend we can do 2pc, so that MySQL won't + switch to 1pc. + real work will be done in MYSQL_LOG::log_xid() + */ + return 0; +} + +static int binlog_commit(THD *thd, bool all) +{ + IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot]; + DBUG_ENTER("binlog_commit"); + DBUG_ASSERT(mysql_bin_log.is_open() && + (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))); + + if (my_b_tell(trans_log) == 0) + { + // we're here because trans_log was flushed in MYSQL_LOG::log_xid() + DBUG_RETURN(0); + } + if (all) + { + Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE); + qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE) + DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev)); + } + else + DBUG_RETURN(binlog_end_trans(thd, trans_log, &invisible_commit)); +} + +static int binlog_rollback(THD *thd, bool all) +{ + int error=0; + IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot]; + DBUG_ENTER("binlog_rollback"); + /* + First assert is guaranteed - see trans_register_ha() call below. + The second must be true. If it is not, we're registering + unnecessary, doing extra work. The cause should be found and eliminated + */ + DBUG_ASSERT(all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))); + DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log)); + /* + Update the binary log with a BEGIN/ROLLBACK block if we have + cached some queries and we updated some non-transactional + table. Such cases should be rare (updating a + non-transactional table inside a transaction...) + */ + if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE)) + { + Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, FALSE); + qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE) + error= binlog_end_trans(thd, trans_log, &qev); + } + else + error= binlog_end_trans(thd, trans_log, 0); + DBUG_RETURN(error); +} + +/* + NOTE: how do we handle this (unlikely but legal) case: + [transaction] + [update to non-trans table] + [rollback to savepoint] ? + The problem occurs when a savepoint is before the update to the + non-transactional table. Then when there's a rollback to the savepoint, if we + simply truncate the binlog cache, we lose the part of the binlog cache where + the update is. If we want to not lose it, we need to write the SAVEPOINT + command and the ROLLBACK TO SAVEPOINT command to the binlog cache. The latter + is easy: it's just write at the end of the binlog cache, but the former + should be *inserted* to the place where the user called SAVEPOINT. The + solution is that when the user calls SAVEPOINT, we write it to the binlog + cache (so no need to later insert it). As transactions are never intermixed + in the binary log (i.e. they are serialized), we won't have conflicts with + savepoint names when using mysqlbinlog or in the slave SQL thread. + Then when ROLLBACK TO SAVEPOINT is called, if we updated some + non-transactional table, we don't truncate the binlog cache but instead write + ROLLBACK TO SAVEPOINT to it; otherwise we truncate the binlog cache (which + will chop the SAVEPOINT command from the binlog cache, which is good as in + that case there is no need to have it in the binlog). +*/ + +static int binlog_savepoint_set(THD *thd, void *sv) +{ + IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot]; + DBUG_ENTER("binlog_savepoint_set"); + DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log)); + + *(my_off_t *)sv= my_b_tell(trans_log); + /* Write it to the binary log */ + Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE); + DBUG_RETURN(mysql_bin_log.write(&qinfo)); +} + +static int binlog_savepoint_rollback(THD *thd, void *sv) +{ + IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot]; + DBUG_ENTER("binlog_savepoint_rollback"); + DBUG_ASSERT(mysql_bin_log.is_open() && my_b_tell(trans_log)); + + /* + Write ROLLBACK TO SAVEPOINT to the binlog cache if we have updated some + non-transactional table. Otherwise, truncate the binlog cache starting + from the SAVEPOINT command. + */ + if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE)) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, TRUE, FALSE); + DBUG_RETURN(mysql_bin_log.write(&qinfo)); + } + reinit_io_cache(trans_log, WRITE_CACHE, *(my_off_t *)sv, 0, 0); + DBUG_RETURN(0); +} + +int check_binlog_magic(IO_CACHE* log, const char** errmsg) +{ + char magic[4]; + DBUG_ASSERT(my_b_tell(log) == 0); + + if (my_b_read(log, (byte*) magic, sizeof(magic))) + { + *errmsg = "I/O error reading the header from the binary log"; + sql_print_error("%s, errno=%d, io cache code=%d", *errmsg, my_errno, + log->error); + return 1; + } + if (memcmp(magic, BINLOG_MAGIC, sizeof(magic))) + { + *errmsg = "Binlog has bad magic number; It's not a binary log file that can be used by this version of MySQL"; + return 1; + } + return 0; +} + +File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg) +{ + File file; + DBUG_ENTER("open_binlog"); + + if ((file = my_open(log_file_name, O_RDONLY | O_BINARY | O_SHARE, + MYF(MY_WME))) < 0) + { + sql_print_error("Failed to open log (file '%s', errno %d)", + log_file_name, my_errno); + *errmsg = "Could not open log file"; + goto err; + } + if (init_io_cache(log, file, IO_SIZE*2, READ_CACHE, 0, 0, + MYF(MY_WME|MY_DONT_CHECK_FILESIZE))) + { + sql_print_error("Failed to create a cache on log (file '%s')", + log_file_name); + *errmsg = "Could not open log file"; + goto err; + } + if (check_binlog_magic(log,errmsg)) + goto err; + DBUG_RETURN(file); + +err: + if (file >= 0) + { + my_close(file,MYF(0)); + end_io_cache(log); + } + DBUG_RETURN(-1); +} #ifdef __NT__ static int eventSource = 0; -void setup_windows_event_source() +void setup_windows_event_source() { - HKEY hRegKey= NULL; + HKEY hRegKey= NULL; DWORD dwError= 0; TCHAR szPath[MAX_PATH]; DWORD dwTypes; - + if (eventSource) // Ensure that we are only called once return; eventSource= 1; // Create the event source registry key - dwError= RegCreateKey(HKEY_LOCAL_MACHINE, + dwError= RegCreateKey(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\MySQL", &hRegKey); @@ -63,9 +303,8 @@ void setup_windows_event_source() GetModuleFileName(NULL, szPath, MAX_PATH); /* Register EventMessageFile */ - dwError = RegSetValueEx(hRegKey, "EventMessageFile", 0, REG_EXPAND_SZ, - (PBYTE) szPath, strlen(szPath)+1); - + dwError = RegSetValueEx(hRegKey, "EventMessageFile", 0, REG_EXPAND_SZ, + (PBYTE) szPath, (DWORD) (strlen(szPath) + 1)); /* Register supported event types */ dwTypes= (EVENTLOG_ERROR_TYPE | EVENTLOG_WARNING_TYPE | @@ -127,26 +366,21 @@ static int find_uniq_filename(char *name) MYSQL_LOG::MYSQL_LOG() :bytes_written(0), last_time(0), query_start(0), name(0), - file_id(1), open_count(1), log_type(LOG_CLOSED), write_error(0), inited(0), - need_start_event(1) + prepared_xids(0), log_type(LOG_CLOSED), file_id(1), open_count(1), + write_error(FALSE), inited(FALSE), need_start_event(TRUE), + description_event_for_exec(0), description_event_for_queue(0) { /* We don't want to initialize LOCK_Log here as such initialization depends on safe_mutex (when using safe_mutex) which depends on MY_INIT(), which is called only in main(). Doing initialization here would make it happen - before main(). + before main(). */ index_file_name[0] = 0; bzero((char*) &log_file,sizeof(log_file)); bzero((char*) &index_file, sizeof(index_file)); } - -MYSQL_LOG::~MYSQL_LOG() -{ - cleanup(); -} - /* this is called only once */ void MYSQL_LOG::cleanup() @@ -155,7 +389,9 @@ void MYSQL_LOG::cleanup() if (inited) { inited= 0; - close(LOG_CLOSE_INDEX); + close(LOG_CLOSE_INDEX|LOG_CLOSE_STOP_EVENT); + delete description_event_for_queue; + delete description_event_for_exec; (void) pthread_mutex_destroy(&LOCK_log); (void) pthread_mutex_destroy(&LOCK_index); (void) pthread_cond_destroy(&update_cond); @@ -165,7 +401,7 @@ void MYSQL_LOG::cleanup() int MYSQL_LOG::generate_new_name(char *new_name, const char *log_name) -{ +{ fn_format(new_name,log_name,mysql_data_home,"",4); if (log_type != LOG_NORMAL) { @@ -206,6 +442,66 @@ void MYSQL_LOG::init_pthread_objects() (void) pthread_cond_init(&update_cond, 0); } +const char *MYSQL_LOG::generate_name(const char *log_name, + const char *suffix, + bool strip_ext, char *buff) +{ + if (!log_name || !log_name[0]) + { + /* + TODO: The following should be using fn_format(); We just need to + first change fn_format() to cut the file name if it's too long. + */ + strmake(buff, pidfile_name,FN_REFLEN-5); + strmov(fn_ext(buff),suffix); + return (const char *)buff; + } + // get rid of extension if the log is binary to avoid problems + if (strip_ext) + { + char *p = fn_ext(log_name); + uint length=(uint) (p-log_name); + strmake(buff,log_name,min(length,FN_REFLEN)); + return (const char*)buff; + } + return log_name; +} + +bool MYSQL_LOG::open_index_file(const char *index_file_name_arg, + const char *log_name) +{ + File index_file_nr= -1; + DBUG_ASSERT(!my_b_inited(&index_file)); + + /* + First open of this class instance + Create an index file that will hold all file names uses for logging. + Add new entries to the end of it. + */ + myf opt= MY_UNPACK_FILENAME; + if (!index_file_name_arg) + { + index_file_name_arg= log_name; // Use same basename for index file + opt= MY_UNPACK_FILENAME | MY_REPLACE_EXT; + } + fn_format(index_file_name, index_file_name_arg, mysql_data_home, + ".index", opt); + if ((index_file_nr= my_open(index_file_name, + O_RDWR | O_CREAT | O_BINARY , + MYF(MY_WME))) < 0 || + my_sync(index_file_nr, MYF(MY_WME)) || + init_io_cache(&index_file, index_file_nr, + IO_SIZE, WRITE_CACHE, + my_seek(index_file_nr,0L,MY_SEEK_END,MYF(0)), + 0, MYF(MY_WME | MY_WAIT_IF_FULL))) + { + if (index_file_nr >= 0) + my_close(index_file_nr,MYF(0)); + return TRUE; + } + return FALSE; +} + /* Open a (new) log file. @@ -221,34 +517,39 @@ void MYSQL_LOG::init_pthread_objects() 1 error */ -bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, - const char *new_name, const char *index_file_name_arg, - enum cache_type io_cache_type_arg, - bool no_auto_events_arg, - ulong max_size_arg) +bool MYSQL_LOG::open(const char *log_name, + enum_log_type log_type_arg, + const char *new_name, + enum cache_type io_cache_type_arg, + bool no_auto_events_arg, + ulong max_size_arg, + bool null_created_arg) { - char buff[512]; - File file= -1, index_file_nr= -1; - int open_flags = O_CREAT | O_APPEND | O_BINARY; + char buff[FN_REFLEN]; + File file= -1; + int open_flags = O_CREAT | O_BINARY; DBUG_ENTER("MYSQL_LOG::open"); - DBUG_PRINT("enter",("log_type: %d",(int) log_type)); + DBUG_PRINT("enter",("log_type: %d",(int) log_type_arg)); last_time=query_start=0; write_error=0; init(log_type_arg,io_cache_type_arg,no_auto_events_arg,max_size_arg); - + if (!(name=my_strdup(log_name,MYF(MY_WME)))) + { + name= (char *)log_name; // for the error message goto err; + } if (new_name) strmov(log_file_name,new_name); else if (generate_new_name(log_file_name, name)) goto err; - + if (io_cache_type == SEQ_READ_APPEND) - open_flags |= O_RDWR; + open_flags |= O_RDWR | O_APPEND; else - open_flags |= O_WRONLY; + open_flags |= O_WRONLY | (log_type == LOG_BIN ? 0 : O_APPEND); db[0]=0; open_count++; @@ -264,15 +565,18 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, case LOG_NORMAL: { char *end; - int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s. " + int len=my_snprintf(buff, sizeof(buff), "%s, Version: %s (%s). " #ifdef EMBEDDED_LIBRARY - "embedded library\n", my_progname, server_version + "embedded library\n", + my_progname, server_version, MYSQL_COMPILATION_COMMENT #elif __NT__ "started with:\nTCP Port: %d, Named Pipe: %s\n", - my_progname, server_version, mysqld_port, mysqld_unix_port + my_progname, server_version, MYSQL_COMPILATION_COMMENT, + mysqld_port, mysqld_unix_port #else "started with:\nTcp port: %d Unix socket: %s\n", - my_progname,server_version,mysqld_port,mysqld_unix_port + my_progname, server_version, MYSQL_COMPILATION_COMMENT, + mysqld_port, mysqld_unix_port #endif ); end=strnmov(buff+len,"Time Id Command Argument\n", @@ -307,13 +611,6 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, { bool write_file_name_to_index_file=0; - myf opt= MY_UNPACK_FILENAME; - if (!index_file_name_arg) - { - index_file_name_arg= name; // Use same basename for index file - opt= MY_UNPACK_FILENAME | MY_REPLACE_EXT; - } - if (!my_b_filelength(&log_file)) { /* @@ -325,43 +622,64 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, if (my_b_safe_write(&log_file, (byte*) BINLOG_MAGIC, BIN_LOG_HEADER_SIZE)) goto err; - bytes_written += BIN_LOG_HEADER_SIZE; - write_file_name_to_index_file=1; + bytes_written+= BIN_LOG_HEADER_SIZE; + write_file_name_to_index_file= 1; } - if (!my_b_inited(&index_file)) + DBUG_ASSERT(my_b_inited(&index_file) != 0); + reinit_io_cache(&index_file, WRITE_CACHE, + my_b_filelength(&index_file), 0, 0); + if (need_start_event && !no_auto_events) { /* - First open of this class instance - Create an index file that will hold all file names uses for logging. - Add new entries to the end of it. - Index file (and binlog) are so critical for recovery/replication - that we create them with MY_WAIT_IF_FULL. + In 4.x we set need_start_event=0 here, but in 5.0 we want a Start event + even if this is not the very first binlog. */ - fn_format(index_file_name, index_file_name_arg, mysql_data_home, - ".index", opt); - if ((index_file_nr= my_open(index_file_name, - O_RDWR | O_CREAT | O_BINARY , - MYF(MY_WME))) < 0 || - my_sync(index_file_nr, MYF(MY_WME)) || - init_io_cache(&index_file, index_file_nr, - IO_SIZE, WRITE_CACHE, - my_seek(index_file_nr,0L,MY_SEEK_END,MYF(0)), - 0, MYF(MY_WME | MY_WAIT_IF_FULL))) - goto err; - } - else - { - safe_mutex_assert_owner(&LOCK_index); - reinit_io_cache(&index_file, WRITE_CACHE, my_b_filelength(&index_file), - 0, 0); + Format_description_log_event s(BINLOG_VERSION); + /* + don't set LOG_EVENT_BINLOG_IN_USE_F for SEQ_READ_APPEND io_cache + as we won't be able to reset it later + */ + if (io_cache_type == WRITE_CACHE) + s.flags|= LOG_EVENT_BINLOG_IN_USE_F; + if (!s.is_valid()) + goto err; + if (null_created_arg) + s.created= 0; + if (s.write(&log_file)) + goto err; + bytes_written+= s.data_written; } - if (need_start_event && !no_auto_events) + if (description_event_for_queue && + description_event_for_queue->binlog_version>=4) { - need_start_event=0; - Start_log_event s; - s.set_log_pos(this); - s.write(&log_file); + /* + This is a relay log written to by the I/O slave thread. + Write the event so that others can later know the format of this relay + log. + Note that this event is very close to the original event from the + master (it has binlog version of the master, event types of the + master), so this is suitable to parse the next relay log's event. It + has been produced by + Format_description_log_event::Format_description_log_event(char* buf,). + Why don't we want to write the description_event_for_queue if this + event is for format<4 (3.23 or 4.x): this is because in that case, the + description_event_for_queue describes the data received from the + master, but not the data written to the relay log (*conversion*), + which is in format 4 (slave's). + */ + /* + Set 'created' to 0, so that in next relay logs this event does not + trigger cleaning actions on the slave in + Format_description_log_event::exec_event(). + */ + description_event_for_queue->created= 0; + /* Don't set log_pos in event header */ + description_event_for_queue->artificial_event=1; + + if (description_event_for_queue->write(&log_file)) + goto err; + bytes_written+= description_event_for_queue->data_written; } if (flush_io_cache(&log_file) || my_sync(log_file.file, MYF(MY_WME))) @@ -393,11 +711,9 @@ err: sql_print_error("Could not use %s for logging (error %d). \ Turning logging off for the whole duration of the MySQL server process. \ To turn it on again: fix the cause, \ -shutdown the MySQL server and restart it.", log_name, errno); +shutdown the MySQL server and restart it.", name, errno); if (file >= 0) my_close(file,MYF(0)); - if (index_file_nr >= 0) - my_close(index_file_nr,MYF(0)); end_io_cache(&log_file); end_io_cache(&index_file); safeFree(name); @@ -442,6 +758,8 @@ int MYSQL_LOG::raw_get_current_log(LOG_INFO* linfo) 0 ok */ +#ifdef HAVE_REPLICATION + static bool copy_up_file_and_fill(IO_CACHE *index_file, my_off_t offset) { int bytes_read; @@ -475,6 +793,7 @@ err: DBUG_RETURN(1); } +#endif /* HAVE_REPLICATION */ /* Find the position in the log-index-file for the given log name @@ -508,8 +827,8 @@ int MYSQL_LOG::find_log_pos(LOG_INFO *linfo, const char *log_name, DBUG_PRINT("enter",("log_name: %s", log_name ? log_name : "NULL")); /* - Mutex needed because we need to make sure the file pointer does not move - from under our feet + Mutex needed because we need to make sure the file pointer does not + move from under our feet */ if (need_lock) pthread_mutex_lock(&LOCK_index); @@ -592,7 +911,7 @@ int MYSQL_LOG::find_next_log(LOG_INFO* linfo, bool need_lock) error = !index_file.error ? LOG_INFO_EOF : LOG_INFO_IO; goto err; } - fname[length-1]=0; // kill /n + fname[length-1]=0; // kill \n linfo->index_file_offset = my_b_tell(&index_file); err: @@ -635,6 +954,14 @@ bool MYSQL_LOG::reset_logs(THD* thd) pthread_mutex_lock(&LOCK_log); pthread_mutex_lock(&LOCK_index); + /* + The following mutex is needed to ensure that no threads call + 'delete thd' as we would then risk missing a 'rollback' from this + thread. If the transaction involved MyISAM tables, it should go + into binlog even on rollback. + */ + (void) pthread_mutex_lock(&LOCK_thread_count); + /* Save variables so that we can reopen the log */ save_name=name; name=0; // Protect against free @@ -648,24 +975,26 @@ bool MYSQL_LOG::reset_logs(THD* thd) error=1; goto err; } - + for (;;) { - my_delete(linfo.log_file_name, MYF(MY_WME)); + my_delete_allow_opened(linfo.log_file_name, MYF(MY_WME)); if (find_next_log(&linfo, 0)) break; } /* Start logging with a new file */ close(LOG_CLOSE_INDEX); - my_delete(index_file_name, MYF(MY_WME)); // Reset (open will update) + my_delete_allow_opened(index_file_name, MYF(MY_WME)); // Reset (open will update) if (!thd->slave_thread) need_start_event=1; - open(save_name, save_log_type, 0, index_file_name, - io_cache_type, no_auto_events, max_size); + if (!open_index_file(index_file_name, 0)) + open(save_name, save_log_type, 0, + io_cache_type, no_auto_events, max_size, 0); my_free((gptr) save_name, MYF(0)); -err: +err: + (void) pthread_mutex_unlock(&LOCK_thread_count); pthread_mutex_unlock(&LOCK_index); pthread_mutex_unlock(&LOCK_log); DBUG_RETURN(error); @@ -684,7 +1013,7 @@ err: rli->group_relay_log_name are deleted ; if true, the latter is deleted too (i.e. all relay logs read by the SQL slave thread are deleted). - + NOTE - This is only called from the slave-execute thread when it has read all commands from a relay log and want to switch to a new relay log. @@ -844,24 +1173,21 @@ int MYSQL_LOG::purge_logs(const char *to_log, while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) && !log_in_use(log_info.log_file_name)) { - ulong tmp; - LINT_INIT(tmp); + ulong file_size= 0; if (decrease_log_space) //stat the file we want to delete { MY_STAT s; + + /* + If we could not stat, we can't know the amount + of space that deletion will free. In most cases, + deletion won't work either, so it's not a problem. + */ if (my_stat(log_info.log_file_name,&s,MYF(0))) - tmp= s.st_size; + file_size= s.st_size; else - { - /* - If we could not stat, we can't know the amount - of space that deletion will free. In most cases, - deletion won't work either, so it's not a problem. - */ sql_print_information("Failed to execute my_stat on file '%s'", log_info.log_file_name); - tmp= 0; - } } /* It's not fatal if we can't delete a log file ; @@ -869,7 +1195,7 @@ int MYSQL_LOG::purge_logs(const char *to_log, */ DBUG_PRINT("info",("purging %s",log_info.log_file_name)); if (!my_delete(log_info.log_file_name, MYF(0)) && decrease_log_space) - *decrease_log_space-= tmp; + *decrease_log_space-= file_size; if (find_next_log(&log_info, 0) || exit_loop) break; } @@ -944,8 +1270,6 @@ err: pthread_mutex_unlock(&LOCK_index); DBUG_RETURN(error); } - - #endif /* HAVE_REPLICATION */ @@ -985,8 +1309,7 @@ bool MYSQL_LOG::is_active(const char *log_file_name_arg) SYNOPSIS new_file() - need_lock Set to 1 (default) if caller has not locked - LOCK_log and LOCK_index + need_lock Set to 1 if caller has not locked LOCK_log NOTE The new file name is stored last in the index file @@ -1005,13 +1328,30 @@ void MYSQL_LOG::new_file(bool need_lock) } if (need_lock) - { pthread_mutex_lock(&LOCK_log); - pthread_mutex_lock(&LOCK_index); - } + pthread_mutex_lock(&LOCK_index); + safe_mutex_assert_owner(&LOCK_log); safe_mutex_assert_owner(&LOCK_index); + /* + if binlog is used as tc log, be sure all xids are "unlogged", + so that on recover we only need to scan one - latest - binlog file + for prepared xids. As this is expected to be a rare event, + simple wait strategy is enough. We're locking LOCK_log to be sure no + new Xid_log_event's are added to the log (and prepared_xids is not + increased), and waiting on COND_prep_xids for late threads to + catch up. + */ + if (prepared_xids) + { + tc_log_page_waits++; + pthread_mutex_lock(&LOCK_prep_xids); + while (prepared_xids) + pthread_cond_wait(&COND_prep_xids, &LOCK_prep_xids); + pthread_mutex_unlock(&LOCK_prep_xids); + } + /* Reuse old name if not binlog and not update log */ new_name_ptr= name; @@ -1023,7 +1363,7 @@ void MYSQL_LOG::new_file(bool need_lock) if (generate_new_name(new_name, name)) goto end; new_name_ptr=new_name; - + if (log_type == LOG_BIN) { if (!no_auto_events) @@ -1035,38 +1375,43 @@ void MYSQL_LOG::new_file(bool need_lock) THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */ Rotate_log_event r(thd,new_name+dirname_length(new_name), 0, LOG_EVENT_OFFSET, 0); - r.set_log_pos(this); r.write(&log_file); - bytes_written += r.get_event_len(); + bytes_written += r.data_written; } /* Update needs to be signalled even if there is no rotate event log rotation should give the waiting thread a signal to discover EOF and move on to the next log. */ - signal_update(); + signal_update(); } old_name=name; save_log_type=log_type; name=0; // Don't free name close(LOG_CLOSE_TO_BE_OPENED); - /* + /* Note that at this point, log_type != LOG_CLOSED (important for is_open()). */ - open(old_name, save_log_type, new_name_ptr, index_file_name, io_cache_type, - no_auto_events, max_size); - if (this == &mysql_bin_log) - report_pos_in_innodb(); + /* + new_file() is only used for rotation (in FLUSH LOGS or because size > + max_binlog_size or max_relay_log_size). + If this is a binary log, the Format_description_log_event at the beginning of + the new file should have created=0 (to distinguish with the + Format_description_log_event written at server startup, which should + trigger temp tables deletion on slaves. + */ + + open(old_name, save_log_type, new_name_ptr, + io_cache_type, no_auto_events, max_size, 1); my_free(old_name,MYF(0)); end: if (need_lock) - { - pthread_mutex_unlock(&LOCK_index); pthread_mutex_unlock(&LOCK_log); - } + pthread_mutex_unlock(&LOCK_index); + DBUG_VOID_RETURN; } @@ -1087,16 +1432,12 @@ bool MYSQL_LOG::append(Log_event* ev) error=1; goto err; } - bytes_written += ev->get_event_len(); + bytes_written+= ev->data_written; DBUG_PRINT("info",("max_size: %lu",max_size)); if ((uint) my_b_append_tell(&log_file) > max_size) - { - pthread_mutex_lock(&LOCK_index); new_file(0); - pthread_mutex_unlock(&LOCK_index); - } -err: +err: pthread_mutex_unlock(&LOCK_log); signal_update(); // Safe as we don't call close DBUG_RETURN(error); @@ -1109,9 +1450,9 @@ bool MYSQL_LOG::appendv(const char* buf, uint len,...) DBUG_ENTER("MYSQL_LOG::appendv"); va_list(args); va_start(args,len); - + DBUG_ASSERT(log_file.type == SEQ_READ_APPEND); - + safe_mutex_assert_owner(&LOCK_log); do { @@ -1124,11 +1465,7 @@ bool MYSQL_LOG::appendv(const char* buf, uint len,...) } while ((buf=va_arg(args,const char*)) && (len=va_arg(args,uint))); DBUG_PRINT("info",("max_size: %lu",max_size)); if ((uint) my_b_append_tell(&log_file) > max_size) - { - pthread_mutex_lock(&LOCK_index); new_file(0); - pthread_mutex_unlock(&LOCK_index); - } err: if (!error) @@ -1139,7 +1476,7 @@ err: /* Write to normal (not rotable) log - This is the format for the 'normal', 'slow' and 'update' logs. + This is the format for the 'normal' log. */ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, @@ -1164,7 +1501,7 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, { // Normal thread if ((thd->options & OPTION_LOG_OFF) #ifndef NO_EMBEDDED_ACCESS_CHECKS - && (thd->master_access & SUPER_ACL) + && (thd->security_ctx->master_access & SUPER_ACL) #endif ) { @@ -1227,59 +1564,79 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, return 0; } +bool MYSQL_LOG::flush_and_sync() +{ + int err=0, fd=log_file.file; + safe_mutex_assert_owner(&LOCK_log); + if (flush_io_cache(&log_file)) + return 1; + if (++sync_binlog_counter >= sync_binlog_period && sync_binlog_period) + { + sync_binlog_counter= 0; + err=my_sync(fd, MYF(MY_WME)); + } + return err; +} -inline bool sync_binlog(IO_CACHE *cache) +void MYSQL_LOG::start_union_events(THD *thd, query_id_t query_id_param) { - return (sync_binlog_period && - (sync_binlog_period == ++sync_binlog_counter) && - (sync_binlog_counter= 0, my_sync(cache->file, MYF(MY_WME)))); + DBUG_ASSERT(!thd->binlog_evt_union.do_union); + thd->binlog_evt_union.do_union= TRUE; + thd->binlog_evt_union.unioned_events= FALSE; + thd->binlog_evt_union.unioned_events_trans= FALSE; + thd->binlog_evt_union.first_query_id= query_id_param; +} + +void MYSQL_LOG::stop_union_events(THD *thd) +{ + DBUG_ASSERT(thd->binlog_evt_union.do_union); + thd->binlog_evt_union.do_union= FALSE; +} + +bool MYSQL_LOG::is_query_in_union(THD *thd, query_id_t query_id_param) +{ + return (thd->binlog_evt_union.do_union && + query_id_param >= thd->binlog_evt_union.first_query_id); } /* Write an event to the binary log */ -bool MYSQL_LOG::write(Log_event* event_info) +bool MYSQL_LOG::write(Log_event *event_info) { - THD *thd=event_info->thd; - bool called_handler_commit=0; - bool error=0; - bool should_rotate = 0; - DBUG_ENTER("MYSQL_LOG::write(event)"); + THD *thd= event_info->thd; + bool error= 1; + DBUG_ENTER("MYSQL_LOG::write(Log_event *)"); + + if (thd->binlog_evt_union.do_union) + { + /* + In Stored function; Remember that function call caused an update. + We will log the function call to the binary log on function exit + */ + thd->binlog_evt_union.unioned_events= TRUE; + thd->binlog_evt_union.unioned_events_trans |= event_info->cache_stmt; + DBUG_RETURN(0); + } pthread_mutex_lock(&LOCK_log); - /* + /* In most cases this is only called if 'is_open()' is true; in fact this is mostly called if is_open() *was* true a few instructions before, but it could have changed since. */ - if (is_open()) + if (likely(is_open())) { - const char *local_db= event_info->get_db(); IO_CACHE *file= &log_file; -#ifdef USING_TRANSACTIONS - /* - Should we write to the binlog cache or to the binlog on disk? - Write to the binlog cache if: - - it is already not empty (meaning we're in a transaction; note that the - present event could be about a non-transactional table, but still we need - to write to the binlog cache in that case to handle updates to mixed - trans/non-trans table types the best possible in binlogging) - - or if the event asks for it (cache_stmt == true). - */ - if (opt_using_transactions && - (event_info->get_cache_stmt() || - (thd && my_b_tell(&thd->transaction.trans_log)))) - file= &thd->transaction.trans_log; -#endif - DBUG_PRINT("info",("event type=%d",event_info->get_type_code())); #ifdef HAVE_REPLICATION - /* - In the future we need to add to the following if tests like - "do the involved tables match (to be implemented) - binlog_[wild_]{do|ignore}_table?" (WL#1049)" + /* + In the future we need to add to the following if tests like + "do the involved tables match (to be implemented) + binlog_[wild_]{do|ignore}_table?" (WL#1049)" */ + const char *local_db= event_info->get_db(); if ((thd && !(thd->options & OPTION_BIN_LOG)) || (!db_ok(local_db, binlog_do_db, binlog_ignore_db))) { @@ -1289,103 +1646,81 @@ bool MYSQL_LOG::write(Log_event* event_info) } #endif /* HAVE_REPLICATION */ - error=1; +#ifdef USING_TRANSACTIONS + /* + Should we write to the binlog cache or to the binlog on disk? + Write to the binlog cache if: + - it is already not empty (meaning we're in a transaction; note that the + present event could be about a non-transactional table, but still we need + to write to the binlog cache in that case to handle updates to mixed + trans/non-trans table types the best possible in binlogging) + - or if the event asks for it (cache_stmt == TRUE). + */ + if (opt_using_transactions && thd) + { + IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot]; + + if (event_info->get_cache_stmt()) + { + if (!trans_log) + { + thd->ha_data[binlog_hton.slot]= trans_log= (IO_CACHE *) + my_malloc(sizeof(IO_CACHE), MYF(MY_ZEROFILL)); + if (!trans_log || open_cached_file(trans_log, mysql_tmpdir, + LOG_PREFIX, + binlog_cache_size, MYF(MY_WME))) + { + my_free((gptr)trans_log, MYF(MY_ALLOW_ZERO_PTR)); + thd->ha_data[binlog_hton.slot]= trans_log= 0; + goto err; + } + trans_log->end_of_file= max_binlog_cache_size; + trans_register_ha(thd, + test(thd->options & (OPTION_NOT_AUTOCOMMIT | + OPTION_BEGIN)), + &binlog_hton); + } + else if (!my_b_tell(trans_log)) + trans_register_ha(thd, + test(thd->options & (OPTION_NOT_AUTOCOMMIT | + OPTION_BEGIN)), + &binlog_hton); + file= trans_log; + } + else if (trans_log && my_b_tell(trans_log)) + file= trans_log; + } +#endif + DBUG_PRINT("info",("event type=%d",event_info->get_type_code())); + /* No check for auto events flag here - this write method should never be called if auto-events are enabled */ /* - 1. Write first log events which describe the 'run environment' - of the SQL command + 1. Write first log events which describe the 'run environment' + of the SQL command */ if (thd) { -#if MYSQL_VERSION_ID < 50000 - /* - To make replication of charsets working in 4.1 we are writing values - of charset related variables before every statement in the binlog, - if values of those variables differ from global server-wide defaults. - We are using SET ONE_SHOT command so that the charset vars get reset - to default after the first non-SET statement. - In the next 5.0 this won't be needed as we will use the new binlog - format to store charset info. - */ - if ((thd->variables.character_set_client->number != - global_system_variables.collation_server->number) || - (thd->variables.character_set_client->number != - thd->variables.collation_connection->number) || - (thd->variables.collation_server->number != - thd->variables.collation_connection->number)) - { - char buf[200]; - int written= my_snprintf(buf, sizeof(buf)-1, - "SET ONE_SHOT CHARACTER_SET_CLIENT=%u,\ -COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u", - (uint) thd->variables.character_set_client->number, - (uint) thd->variables.collation_connection->number, - (uint) thd->variables.collation_database->number, - (uint) thd->variables.collation_server->number); - Query_log_event e(thd, buf, written, 0, FALSE); - e.set_log_pos(this); - e.error_code = 0; // This statement cannot fail (see [1]). - if (e.write(file)) - goto err; - } - /* - We use the same ONE_SHOT trick for making replication of time zones - working in 4.1. Again in 5.0 we have better means for doing this. - */ - if (thd->time_zone_used && - thd->variables.time_zone != global_system_variables.time_zone) - { - char buf[MAX_TIME_ZONE_NAME_LENGTH + 26]; - char *buf_end= strxmov(buf, "SET ONE_SHOT TIME_ZONE='", - thd->variables.time_zone->get_name()->ptr(), - "'", NullS); - Query_log_event e(thd, buf, buf_end - buf, 0, FALSE); - e.set_log_pos(this); - e.error_code = 0; // This statement cannot fail (see [1]). - if (e.write(file)) - goto err; - } - /* - Use the same ONE_SHOT trick for making replication of lc_time_names. - */ - if (thd->variables.lc_time_names->number) // Not en_US - { - char buf[32]; - uint length= my_snprintf(buf, sizeof(buf), - "SET ONE_SHOT LC_TIME_NAMES=%u", - (uint) thd->variables.lc_time_names->number); - Query_log_event e(thd, buf, length, 0, FALSE); - e.set_log_pos(this); - e.error_code= 0; // This statement cannot fail (see [1]). - if (e.write(file)) - goto err; - } -#endif - - if (thd->last_insert_id_used) + if (thd->last_insert_id_used_bin_log) { Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT, thd->current_insert_id); - e.set_log_pos(this); if (e.write(file)) goto err; } if (thd->insert_id_used) { Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id); - e.set_log_pos(this); if (e.write(file)) goto err; } if (thd->rand_used) { Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2); - e.set_log_pos(this); if (e.write(file)) goto err; } @@ -1401,139 +1736,25 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u", user_var_event->length, user_var_event->type, user_var_event->charset_number); - e.set_log_pos(this); if (e.write(file)) goto err; } } -#ifdef TO_BE_REMOVED - if (thd->variables.convert_set) - { - char buf[256], *p; - p= strmov(strmov(buf, "SET CHARACTER SET "), - thd->variables.convert_set->name); - Query_log_event e(thd, buf, (ulong) (p - buf), 0); - e.set_log_pos(this); - e.error_code = 0; // This statement cannot fail (see [1]). - if (e.write(file)) - goto err; - } -#endif - - /* - If the user has set FOREIGN_KEY_CHECKS=0 we wrap every SQL - command in the binlog inside: - SET FOREIGN_KEY_CHECKS=0; - <command>; - SET FOREIGN_KEY_CHECKS=1; - */ - - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) - { - Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=0", 24, 0, FALSE); - e.set_log_pos(this); - e.error_code = 0; // This statement cannot fail (see [1]). - if (e.write(file)) - goto err; - } } - /* - Write the SQL command - - [1] If this statement has an error code, the slave is required to fail - with the same error code or stop. The preamble and epilogue should - *not* have this error code since the execution of those is - guaranteed *not* to produce any error code. This would therefore - stop the slave even if the execution of the real statement can be - handled gracefully by the slave. + /* + Write the SQL command */ - event_info->set_log_pos(this); if (event_info->write(file)) goto err; - /* Write log events to reset the 'run environment' of the SQL command */ - - if (thd) - { - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) - { - Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=1", 24, 0, FALSE); - e.set_log_pos(this); - e.error_code = 0; // This statement cannot fail (see [1]). - if (e.write(file)) - goto err; - } - } - - /* - Tell for transactional table handlers up to which position in the - binlog file we wrote. The table handler can store this info, and - after crash recovery print for the user the offset of the last - transactions which were recovered. Actually, we must also call - the table handler commit here, protected by the LOCK_log mutex, - because otherwise the transactions may end up in a different order - in the table handler log! - - Note that we will NOT call ha_report_binlog_offset_and_commit() if - there are binlog events cached in the transaction cache. That is - because then the log event which we write to the binlog here is - not a transactional event. In versions < 4.0.13 before this fix this - caused an InnoDB transaction to be committed if in the middle there - was a MyISAM event! - */ - if (file == &log_file) // we are writing to the real log (disk) { - if (flush_io_cache(file) || sync_binlog(file)) + if (flush_and_sync()) goto err; - - if (opt_using_transactions && - !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))) - { - /* - LOAD DATA INFILE in AUTOCOMMIT=1 mode writes to the binlog - chunks also before it is successfully completed. We only report - the binlog write and do the commit inside the transactional table - handler if the log event type is appropriate. - */ - - if (event_info->get_type_code() == QUERY_EVENT || - event_info->get_type_code() == EXEC_LOAD_EVENT) - { -#ifndef DBUG_OFF - if (unlikely(opt_crash_binlog_innodb)) - { - /* - This option is for use in rpl_crash_binlog_innodb.test. - 1st we want to verify that Binlog_dump thread cannot send the - event now (because of LOCK_log): we here tell the Binlog_dump - thread to wake up, sleep for the slave to have time to possibly - receive data from the master (it should not), and then crash. - 2nd we want to verify that at crash recovery the rolled back - event is cut from the binlog. - */ - if (!(--opt_crash_binlog_innodb)) - { - signal_update(); - sleep(2); - fprintf(stderr,"This is a normal crash because of" - " --crash-binlog-innodb\n"); - assert(0); - } - DBUG_PRINT("info",("opt_crash_binlog_innodb: %d", - opt_crash_binlog_innodb)); - } -#endif - error = ha_report_binlog_offset_and_commit(thd, log_file_name, - file->pos_in_file); - called_handler_commit=1; - } - } - /* We wrote to the real log, check automatic rotation; */ - DBUG_PRINT("info",("max_size: %lu",max_size)); - should_rotate= (my_b_tell(file) >= (my_off_t) max_size); + signal_update(); + rotate_and_purge(RP_LOCK_LOG_IS_ALREADY_LOCKED); } error=0; @@ -1541,44 +1762,38 @@ err: if (error) { if (my_errno == EFBIG) - my_error(ER_TRANS_CACHE_FULL, MYF(0)); + my_message(ER_TRANS_CACHE_FULL, ER(ER_TRANS_CACHE_FULL), MYF(0)); else my_error(ER_ERROR_ON_WRITE, MYF(0), name, errno); write_error=1; } - if (file == &log_file) - signal_update(); - if (should_rotate) - { - pthread_mutex_lock(&LOCK_index); - new_file(0); // inside mutex - pthread_mutex_unlock(&LOCK_index); - } } pthread_mutex_unlock(&LOCK_log); + DBUG_RETURN(error); +} - /* - Flush the transactional handler log file now that we have released - LOCK_log; the flush is placed here to eliminate the bottleneck on the - group commit - */ - - if (called_handler_commit) - ha_commit_complete(thd); - -#ifdef HAVE_REPLICATION - if (should_rotate && expire_logs_days) +void MYSQL_LOG::rotate_and_purge(uint flags) +{ + if (!(flags & RP_LOCK_LOG_IS_ALREADY_LOCKED)) + pthread_mutex_lock(&LOCK_log); + if ((flags & RP_FORCE_ROTATE) || + (my_b_tell(&log_file) >= (my_off_t) max_size)) { - long purge_time= time(0) - expire_logs_days*24*60*60; - if (purge_time >= 0) - error= purge_logs_before_date(purge_time); - } + new_file(0); +#ifdef HAVE_REPLICATION + if (expire_logs_days) + { + long purge_time= (long) (time(0) - expire_logs_days*24*60*60); + if (purge_time >= 0) + purge_logs_before_date(purge_time); + } #endif - DBUG_RETURN(error); + } + if (!(flags & RP_LOCK_LOG_IS_ALREADY_LOCKED)) + pthread_mutex_unlock(&LOCK_log); } - uint MYSQL_LOG::next_file_id() { uint res; @@ -1594,10 +1809,8 @@ uint MYSQL_LOG::next_file_id() SYNOPSIS write() - thd + thd cache The cache to copy to the binlog - commit_or_rollback If true, will write "COMMIT" in the end, if false will - write "ROLLBACK". NOTE - We only come here if there is something in the cache. @@ -1607,42 +1820,37 @@ uint MYSQL_LOG::next_file_id() IMPLEMENTATION - To support transaction over replication, we wrap the transaction with BEGIN/COMMIT or BEGIN/ROLLBACK in the binary log. - We want to write a BEGIN/ROLLBACK block when a non-transactional table was - updated in a transaction which was rolled back. This is to ensure that the - same updates are run on the slave. + We want to write a BEGIN/ROLLBACK block when a non-transactional table + was updated in a transaction which was rolled back. This is to ensure + that the same updates are run on the slave. */ -bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) +bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event) { - bool should_rotate= 0, error= 0; + DBUG_ENTER("MYSQL_LOG::write(THD *, IO_CACHE *, Log_event *)"); VOID(pthread_mutex_lock(&LOCK_log)); - DBUG_ENTER("MYSQL_LOG::write(cache"); - - if (is_open()) // Should always be true + + /* NULL would represent nothing to replicate after ROLLBACK */ + DBUG_ASSERT(commit_event != NULL); + + if (likely(is_open())) // Should always be true { uint length; /* - Add the "BEGIN" and "COMMIT" in the binlog around transactions - which may contain more than 1 SQL statement. If we run with - AUTOCOMMIT=1, then MySQL immediately writes each SQL statement to - the binlog when the statement has been completed. No need to add - "BEGIN" ... "COMMIT" around such statements. Otherwise, MySQL uses - thd->transaction.trans_log to cache the SQL statements until the - explicit commit, and at the commit writes the contents in .trans_log - to the binlog. - - We write the "BEGIN" mark first in the buffer (.trans_log) where we - store the SQL statements for a transaction. At the transaction commit - we will add the "COMMIT mark and write the buffer to the binlog. + Log "BEGIN" at the beginning of the transaction. + which may contain more than 1 SQL statement. */ + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { - Query_log_event qinfo(thd, "BEGIN", 5, TRUE, FALSE); + Query_log_event qinfo(thd, STRING_WITH_LEN("BEGIN"), TRUE, FALSE); /* Imagine this is rollback due to net timeout, after all statements of the transaction succeeded. Then we want a zero-error code in BEGIN. In other words, if there was a really serious error code it's already - in the statement's events. + in the statement's events, there is no need to put it also in this + internally generated event, and as this event is generated late it + would lead to false alarms. This is safer than thd->clear_error() against kills at shutdown. */ qinfo.error_code= 0; @@ -1653,7 +1861,6 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) master's binlog, which would result in wrong positions being shown to the user, MASTER_POS_WAIT undue waiting etc. */ - qinfo.set_log_pos(this); if (qinfo.write(&log_file)) goto err; } @@ -1661,83 +1868,51 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) if (reinit_io_cache(cache, READ_CACHE, 0, 0, 0)) goto err; length=my_b_bytes_in_cache(cache); + DBUG_EXECUTE_IF("half_binlogged_transaction", length-=100;); do { /* Write data to the binary log file */ if (my_b_write(&log_file, cache->read_pos, length)) goto err; cache->read_pos=cache->read_end; // Mark buffer used up + DBUG_EXECUTE_IF("half_binlogged_transaction", goto DBUG_skip_commit;); } while ((length=my_b_fill(cache))); - /* - We write the command "COMMIT" as the last SQL command in the - binlog segment cached for this transaction - */ - - { - Query_log_event qinfo(thd, - commit_or_rollback ? "COMMIT" : "ROLLBACK", - commit_or_rollback ? 6 : 8, - TRUE, FALSE); - qinfo.error_code= 0; - qinfo.set_log_pos(this); - if (qinfo.write(&log_file) || flush_io_cache(&log_file) || - sync_binlog(&log_file)) - goto err; - } + if (commit_event->write(&log_file)) + goto err; +#ifndef DBUG_OFF +DBUG_skip_commit: +#endif + if (flush_and_sync()) + goto err; + DBUG_EXECUTE_IF("half_binlogged_transaction", abort();); if (cache->error) // Error on read { sql_print_error(ER(ER_ERROR_ON_READ), cache->file_name, errno); write_error=1; // Don't give more errors goto err; } -#ifndef DBUG_OFF - if (unlikely(opt_crash_binlog_innodb)) - { - /* see the previous MYSQL_LOG::write() method for a comment */ - if (!(--opt_crash_binlog_innodb)) - { - signal_update(); - sleep(2); - fprintf(stderr, "This is a normal crash because of" - " --crash-binlog-innodb\n"); - assert(0); - } - DBUG_PRINT("info",("opt_crash_binlog_innodb: %d", - opt_crash_binlog_innodb)); - } -#endif - if ((ha_report_binlog_offset_and_commit(thd, log_file_name, - log_file.pos_in_file))) - goto err; signal_update(); - DBUG_PRINT("info",("max_size: %lu",max_size)); - if (should_rotate= (my_b_tell(&log_file) >= (my_off_t) max_size)) + /* + if commit_event is Xid_log_event, increase the number of + prepared_xids (it's decreasd in ::unlog()). Binlog cannot be rotated + if there're prepared xids in it - see the comment in new_file() for + an explanation. + If the commit_event is not Xid_log_event (then it's a Query_log_event) + rotate binlog, if necessary. + */ + if (commit_event->get_type_code() == XID_EVENT) { - pthread_mutex_lock(&LOCK_index); - new_file(0); // inside mutex - pthread_mutex_unlock(&LOCK_index); + pthread_mutex_lock(&LOCK_prep_xids); + prepared_xids++; + pthread_mutex_unlock(&LOCK_prep_xids); } - + else + rotate_and_purge(RP_LOCK_LOG_IS_ALREADY_LOCKED); } VOID(pthread_mutex_unlock(&LOCK_log)); - /* Flush the transactional handler log file now that we have released - LOCK_log; the flush is placed here to eliminate the bottleneck on the - group commit */ - - ha_commit_complete(thd); - -#ifdef HAVE_REPLICATION - if (should_rotate && expire_logs_days) - { - long purge_time= time(0) - expire_logs_days*24*60*60; - if (purge_time >= 0) - error= purge_logs_before_date(purge_time); - } -#endif - - DBUG_RETURN(error); + DBUG_RETURN(0); err: if (!write_error) @@ -1751,8 +1926,7 @@ err: /* - Write update log in a format suitable for incremental backup - This is also used by the slow query log. + Write to the slow query log. */ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, @@ -1777,6 +1951,7 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, } if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT) || query_start_arg) { + Security_context *sctx= thd->security_ctx; current_time=time(NULL); if (current_time != last_time) { @@ -1797,10 +1972,12 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, tmp_errno=errno; } if (my_b_printf(&log_file, "# User@Host: %s[%s] @ %s [%s]\n", - thd->priv_user ? thd->priv_user : "", - thd->user ? thd->user : "", - thd->host ? thd->host : "", - thd->ip ? thd->ip : "") == (uint) -1) + sctx->priv_user ? + sctx->priv_user : "", + sctx->user ? sctx->user : "", + sctx->host ? sctx->host : "", + sctx->ip ? sctx->ip : "") == + (uint) -1) tmp_errno=errno; } if (query_start_arg) @@ -1820,7 +1997,7 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, tmp_errno=errno; strmov(db,thd->db); } - if (thd->last_insert_id_used) + if (thd->last_insert_id_used_bin_log) { end=strmov(end,",last_insert_id="); end=longlong10_to_str((longlong) thd->current_insert_id,end,-10); @@ -1883,26 +2060,27 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, SYNOPSIS wait_for_update() thd Thread variable - master_or_slave If 0, the caller is the Binlog_dump thread from master; + is_slave If 0, the caller is the Binlog_dump thread from master; if 1, the caller is the SQL thread from the slave. This influences only thd->proc_info. NOTES One must have a lock on LOCK_log before calling this function. - This lock will be freed before return! That's required by + This lock will be released before return! That's required by THD::enter_cond() (see NOTES in sql_class.h). */ -void MYSQL_LOG::wait_for_update(THD* thd, bool master_or_slave) +void MYSQL_LOG::wait_for_update(THD* thd, bool is_slave) { const char *old_msg; DBUG_ENTER("wait_for_update"); + old_msg= thd->enter_cond(&update_cond, &LOCK_log, - master_or_slave ? + is_slave ? "Has read all relay log; waiting for the slave I/O " - "thread to update it" : + "thread to update it" : "Has sent all binlog to slave; waiting for binlog " - "to be updated"); + "to be updated"); pthread_cond_wait(&update_cond, &LOCK_log); thd->exit_cond(old_msg); DBUG_VOID_RETURN; @@ -1914,11 +2092,11 @@ void MYSQL_LOG::wait_for_update(THD* thd, bool master_or_slave) SYNOPSIS close() - exiting Bitmask for one or more of the following bits: - LOG_CLOSE_INDEX if we should close the index file - LOG_CLOSE_TO_BE_OPENED if we intend to call open - at once after close. - LOG_CLOSE_STOP_EVENT write a 'stop' event to the log + exiting Bitmask for one or more of the following bits: + LOG_CLOSE_INDEX if we should close the index file + LOG_CLOSE_TO_BE_OPENED if we intend to call open + at once after close. + LOG_CLOSE_STOP_EVENT write a 'stop' event to the log NOTES One can do an open on the object at once after doing a close. @@ -1936,13 +2114,27 @@ void MYSQL_LOG::close(uint exiting) (exiting & LOG_CLOSE_STOP_EVENT)) { Stop_log_event s; - s.set_log_pos(this); s.write(&log_file); + bytes_written+= s.data_written; signal_update(); } #endif /* HAVE_REPLICATION */ end_io_cache(&log_file); - if (my_close(log_file.file,MYF(0)) < 0 && ! write_error) + + /* don't pwrite in a file opened with O_APPEND - it doesn't work */ + if (log_file.type == WRITE_CACHE && log_type == LOG_BIN) + { + my_off_t offset= BIN_LOG_HEADER_SIZE + FLAGS_OFFSET; + byte flags=0; // clearing LOG_EVENT_BINLOG_IN_USE_F + my_pwrite(log_file.file, &flags, 1, offset, MYF(0)); + } + + if (my_sync(log_file.file,MYF(MY_WME)) && ! write_error) + { + write_error=1; + sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno); + } + if (my_close(log_file.file,MYF(MY_WME)) && ! write_error) { write_error=1; sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno); @@ -2093,7 +2285,7 @@ bool flush_error_log() On Windows is necessary a temporary file for to rename the current error file. */ - strmov(strmov(err_temp, err_renamed),"-tmp"); + strxmov(err_temp, err_renamed,"-tmp",NullS); (void) my_delete(err_temp, MYF(0)); if (freopen(err_temp,"a+",stdout)) { @@ -2126,145 +2318,6 @@ bool flush_error_log() return result; } - -/* - If the server has InnoDB on, and InnoDB has published the position of the - last committed transaction (which happens only if a crash recovery occured at - this startup) then truncate the previous binary log at the position given by - InnoDB. If binlog is shorter than the position, print a message to the error - log. - - SYNOPSIS - cut_spurious_tail() - - RETURN VALUES - 1 Error - 0 Ok -*/ - -bool MYSQL_LOG::cut_spurious_tail() -{ - int error= 0; - DBUG_ENTER("cut_spurious_tail"); - -#ifdef HAVE_INNOBASE_DB - if (have_innodb != SHOW_OPTION_YES) - DBUG_RETURN(0); - /* - This is the place where we use information from InnoDB to cut the - binlog. - */ - char *name= ha_innobase::get_mysql_bin_log_name(); - ulonglong pos= ha_innobase::get_mysql_bin_log_pos(); - ulonglong actual_size; - char llbuf1[22], llbuf2[22]; - - if (name[0] == 0 || pos == ULONGLONG_MAX) - { - DBUG_PRINT("info", ("InnoDB has not set binlog info")); - DBUG_RETURN(0); - } - /* The binlog given by InnoDB normally is never an active binlog */ - if (is_open() && is_active(name)) - { - sql_print_error("Warning: after InnoDB crash recovery, InnoDB says that " - "the binary log of the previous run has the same name " - "'%s' as the current one; this is likely to be abnormal.", - name); - DBUG_RETURN(1); - } - sql_print_error("After InnoDB crash recovery, checking if the binary log " - "'%s' contains rolled back transactions which must be " - "removed from it...", name); - /* If we have a too long binlog, cut. If too short, print error */ - int fd= my_open(name, O_EXCL | O_APPEND | O_BINARY | O_WRONLY, MYF(MY_WME)); - if (fd < 0) - { - int save_errno= my_errno; - sql_print_error("Could not open the binary log '%s' for truncation.", - name); - if (save_errno != ENOENT) - sql_print_error("The binary log '%s' should not be used for " - "replication.", name); - DBUG_RETURN(1); - } - - if (pos > (actual_size= my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME)))) - { - /* - Note that when we have MyISAM rollback this error message should be - reconsidered. - */ - sql_print_error("The binary log '%s' is shorter than its expected size " - "(actual: %s, expected: %s) so it misses at least one " - "committed transaction; so it should not be used for " - "replication or point-in-time recovery. You would need " - "to restart slaves from a fresh master's data " - "snapshot ", - name, llstr(actual_size, llbuf1), - llstr(pos, llbuf2)); - error= 1; - goto err; - } - if (pos < actual_size) - { - sql_print_error("The binary log '%s' is bigger than its expected size " - "(actual: %s, expected: %s) so it contains a rolled back " - "transaction; now truncating that.", name, - llstr(actual_size, llbuf1), llstr(pos, llbuf2)); - /* - As on some OS, my_chsize() can only pad with 0s instead of really - truncating. Then mysqlbinlog (and Binlog_dump thread) will error on - these zeroes. This is annoying, but not more (you just need to manually - switch replication to the next binlog). Fortunately, in my_chsize.c, it - says that all modern machines support real ftruncate(). - - */ - if ((error= my_chsize(fd, pos, 0, MYF(MY_WME)))) - goto err; - } -err: - if (my_close(fd, MYF(MY_WME))) - error= 1; -#endif - DBUG_RETURN(error); -} - - -/* - If the server has InnoDB on, store the binlog name and position into - InnoDB. This function is used every time we create a new binlog. - - SYNOPSIS - report_pos_in_innodb() - - NOTES - This cannot simply be done in MYSQL_LOG::open(), because when we create - the first binlog at startup, we have not called ha_init() yet so we cannot - write into InnoDB yet. - - RETURN VALUES - 1 Error - 0 Ok -*/ - -void MYSQL_LOG::report_pos_in_innodb() -{ - DBUG_ENTER("report_pos_in_innodb"); -#ifdef HAVE_INNOBASE_DB - if (is_open() && have_innodb == SHOW_OPTION_YES) - { - DBUG_PRINT("info", ("Reporting binlog info into InnoDB - " - "name: '%s' position: %d", - log_file_name, my_b_tell(&log_file))); - innobase_store_binlog_offset_and_flush_log(log_file_name, - my_b_tell(&log_file)); - } -#endif - DBUG_VOID_RETURN; -} - - void MYSQL_LOG::signal_update() { DBUG_ENTER("MYSQL_LOG::signal_update"); @@ -2272,7 +2325,6 @@ void MYSQL_LOG::signal_update() DBUG_VOID_RETURN; } - #ifdef __NT__ void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, uint length, int buffLen) @@ -2329,7 +2381,7 @@ void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, vprint_msg_to_log() event_type Type of event to write (Error, Warning, or Info) format Printf style format of message - args va_list list of arguments for the message + args va_list list of arguments for the message NOTE @@ -2402,3 +2454,662 @@ void sql_print_information(const char *format, ...) DBUG_VOID_RETURN; } + + +/********* transaction coordinator log for 2pc - mmap() based solution *******/ + +/* + the log consists of a file, mmapped to a memory. + file is divided on pages of tc_log_page_size size. + (usable size of the first page is smaller because of log header) + there's PAGE control structure for each page + each page (or rather PAGE control structure) can be in one of three + states - active, syncing, pool. + there could be only one page in active or syncing states, + but many in pool - pool is fifo queue. + usual lifecycle of a page is pool->active->syncing->pool + "active" page - is a page where new xid's are logged. + the page stays active as long as syncing slot is taken. + "syncing" page is being synced to disk. no new xid can be added to it. + when the sync is done the page is moved to a pool and an active page + becomes "syncing". + + the result of such an architecture is a natural "commit grouping" - + If commits are coming faster than the system can sync, they do not + stall. Instead, all commit that came since the last sync are + logged to the same page, and they all are synced with the next - + one - sync. Thus, thought individual commits are delayed, throughput + is not decreasing. + + when a xid is added to an active page, the thread of this xid waits + for a page's condition until the page is synced. when syncing slot + becomes vacant one of these waiters is awaken to take care of syncing. + it syncs the page and signals all waiters that the page is synced. + PAGE::waiters is used to count these waiters, and a page may never + become active again until waiters==0 (that is all waiters from the + previous sync have noticed the sync was completed) + + note, that the page becomes "dirty" and has to be synced only when a + new xid is added into it. Removing a xid from a page does not make it + dirty - we don't sync removals to disk. +*/ + +ulong tc_log_page_waits= 0; + +#ifdef HAVE_MMAP + +#define TC_LOG_HEADER_SIZE (sizeof(tc_log_magic)+1) + +static const char tc_log_magic[]={(char) 254, 0x23, 0x05, 0x74}; + +ulong opt_tc_log_size= TC_LOG_MIN_SIZE; +ulong tc_log_max_pages_used=0, tc_log_page_size=0, tc_log_cur_pages_used=0; + +int TC_LOG_MMAP::open(const char *opt_name) +{ + uint i; + bool crashed=FALSE; + PAGE *pg; + + DBUG_ASSERT(total_ha_2pc > 1); + DBUG_ASSERT(opt_name && opt_name[0]); + + tc_log_page_size= my_getpagesize(); + DBUG_ASSERT(TC_LOG_PAGE_SIZE % tc_log_page_size == 0); + + fn_format(logname,opt_name,mysql_data_home,"",MY_UNPACK_FILENAME); + if ((fd= my_open(logname, O_RDWR, MYF(0))) < 0) + { + if (my_errno != ENOENT) + goto err; + if (using_heuristic_recover()) + return 1; + if ((fd= my_create(logname, CREATE_MODE, O_RDWR, MYF(MY_WME))) < 0) + goto err; + inited=1; + file_length= opt_tc_log_size; + if (my_chsize(fd, file_length, 0, MYF(MY_WME))) + goto err; + } + else + { + inited= 1; + crashed= TRUE; + sql_print_information("Recovering after a crash using %s", opt_name); + if (tc_heuristic_recover) + { + sql_print_error("Cannot perform automatic crash recovery when " + "--tc-heuristic-recover is used"); + goto err; + } + file_length= my_seek(fd, 0L, MY_SEEK_END, MYF(MY_WME+MY_FAE)); + if (file_length == MY_FILEPOS_ERROR || file_length % tc_log_page_size) + goto err; + } + + data= (uchar *)my_mmap(0, (size_t)file_length, PROT_READ|PROT_WRITE, + MAP_NOSYNC|MAP_SHARED, fd, 0); + if (data == MAP_FAILED) + { + my_errno=errno; + goto err; + } + inited=2; + + npages=(uint)file_length/tc_log_page_size; + DBUG_ASSERT(npages >= 3); // to guarantee non-empty pool + if (!(pages=(PAGE *)my_malloc(npages*sizeof(PAGE), MYF(MY_WME|MY_ZEROFILL)))) + goto err; + inited=3; + for (pg=pages, i=0; i < npages; i++, pg++) + { + pg->next=pg+1; + pg->waiters=0; + pg->state=POOL; + pthread_mutex_init(&pg->lock, MY_MUTEX_INIT_FAST); + pthread_cond_init (&pg->cond, 0); + pg->start=(my_xid *)(data + i*tc_log_page_size); + pg->end=(my_xid *)(pg->start + tc_log_page_size); + pg->size=pg->free=tc_log_page_size/sizeof(my_xid); + } + pages[0].size=pages[0].free= + (tc_log_page_size-TC_LOG_HEADER_SIZE)/sizeof(my_xid); + pages[0].start=pages[0].end-pages[0].size; + pages[npages-1].next=0; + inited=4; + + if (crashed && recover()) + goto err; + + memcpy(data, tc_log_magic, sizeof(tc_log_magic)); + data[sizeof(tc_log_magic)]= (uchar)total_ha_2pc; + my_msync(fd, data, tc_log_page_size, MS_SYNC); + inited=5; + + pthread_mutex_init(&LOCK_sync, MY_MUTEX_INIT_FAST); + pthread_mutex_init(&LOCK_active, MY_MUTEX_INIT_FAST); + pthread_mutex_init(&LOCK_pool, MY_MUTEX_INIT_FAST); + pthread_cond_init(&COND_active, 0); + pthread_cond_init(&COND_pool, 0); + + inited=6; + + syncing= 0; + active=pages; + pool=pages+1; + pool_last=pages+npages-1; + + return 0; + +err: + close(); + return 1; +} + +/* + there is no active page, let's got one from the pool + + two strategies here: + 1. take the first from the pool + 2. if there're waiters - take the one with the most free space + + TODO page merging. try to allocate adjacent page first, + so that they can be flushed both in one sync +*/ +void TC_LOG_MMAP::get_active_from_pool() +{ + PAGE **p, **best_p=0; + int best_free; + + if (syncing) + pthread_mutex_lock(&LOCK_pool); + + do + { + best_p= p= &pool; + if ((*p)->waiters == 0) // can the first page be used ? + break; // yes - take it. + + best_free=0; // no - trying second strategy + for (p=&(*p)->next; *p; p=&(*p)->next) + { + if ((*p)->waiters == 0 && (*p)->free > best_free) + { + best_free=(*p)->free; + best_p=p; + } + } + } + while ((*best_p == 0 || best_free == 0) && overflow()); + + active=*best_p; + if (active->free == active->size) // we've chosen an empty page + { + tc_log_cur_pages_used++; + set_if_bigger(tc_log_max_pages_used, tc_log_cur_pages_used); + } + + if ((*best_p)->next) // unlink the page from the pool + *best_p=(*best_p)->next; + else + pool_last=*best_p; + + if (syncing) + pthread_mutex_unlock(&LOCK_pool); +} + +int TC_LOG_MMAP::overflow() +{ + /* + simple overflow handling - just wait + TODO perhaps, increase log size ? + let's check the behaviour of tc_log_page_waits first + */ + tc_log_page_waits++; + pthread_cond_wait(&COND_pool, &LOCK_pool); + return 1; // always return 1 +} + +/* + Record that transaction XID is committed on the persistent storage + + NOTES + This function is called in the middle of two-phase commit: + First all resources prepare the transaction, then tc_log->log() is called, + then all resources commit the transaction, then tc_log->unlog() is called. + + All access to active page is serialized but it's not a problem, as + we're assuming that fsync() will be a main bottleneck. + That is, parallelizing writes to log pages we'll decrease number of + threads waiting for a page, but then all these threads will be waiting + for a fsync() anyway + + IMPLEMENTATION + If tc_log == MYSQL_LOG then tc_log writes transaction to binlog and + records XID in a special Xid_log_event. + If tc_log = TC_LOG_MMAP then xid is written in a special memory-mapped + log. + + RETURN + 0 Error + # "cookie", a number that will be passed as an argument + to unlog() call. tc_log can define it any way it wants, + and use for whatever purposes. TC_LOG_MMAP sets it + to the position in memory where xid was logged to. +*/ + +int TC_LOG_MMAP::log_xid(THD *thd, my_xid xid) +{ + int err; + PAGE *p; + ulong cookie; + + pthread_mutex_lock(&LOCK_active); + + /* + if active page is full - just wait... + frankly speaking, active->free here accessed outside of mutex + protection, but it's safe, because it only means we may miss an + unlog() for the active page, and we're not waiting for it here - + unlog() does not signal COND_active. + */ + while (unlikely(active && active->free == 0)) + pthread_cond_wait(&COND_active, &LOCK_active); + + /* no active page ? take one from the pool */ + if (active == 0) + get_active_from_pool(); + + p=active; + pthread_mutex_lock(&p->lock); + + /* searching for an empty slot */ + while (*p->ptr) + { + p->ptr++; + DBUG_ASSERT(p->ptr < p->end); // because p->free > 0 + } + + /* found! store xid there and mark the page dirty */ + cookie= (ulong)((uchar *)p->ptr - data); // can never be zero + *p->ptr++= xid; + p->free--; + p->state= DIRTY; + + /* to sync or not to sync - this is the question */ + pthread_mutex_unlock(&LOCK_active); + pthread_mutex_lock(&LOCK_sync); + pthread_mutex_unlock(&p->lock); + + if (syncing) + { // somebody's syncing. let's wait + p->waiters++; + /* + note - it must be while (), not do ... while () here + as p->state may be not DIRTY when we come here + */ + while (p->state == DIRTY && syncing) + pthread_cond_wait(&p->cond, &LOCK_sync); + p->waiters--; + err= p->state == ERROR; + if (p->state != DIRTY) // page was synced + { + if (p->waiters == 0) + pthread_cond_signal(&COND_pool); // in case somebody's waiting + pthread_mutex_unlock(&LOCK_sync); + goto done; // we're done + } + } // page was not synced! do it now + DBUG_ASSERT(active == p && syncing == 0); + pthread_mutex_lock(&LOCK_active); + syncing=p; // place is vacant - take it + active=0; // page is not active anymore + pthread_cond_broadcast(&COND_active); // in case somebody's waiting + pthread_mutex_unlock(&LOCK_active); + pthread_mutex_unlock(&LOCK_sync); + err= sync(); + +done: + return err ? 0 : cookie; +} + +int TC_LOG_MMAP::sync() +{ + int err; + + DBUG_ASSERT(syncing != active); + + /* + sit down and relax - this can take a while... + note - no locks are held at this point + */ + err= my_msync(fd, syncing->start, 1, MS_SYNC); + + /* page is synced. let's move it to the pool */ + pthread_mutex_lock(&LOCK_pool); + pool_last->next=syncing; + pool_last=syncing; + syncing->next=0; + syncing->state= err ? ERROR : POOL; + pthread_cond_broadcast(&syncing->cond); // signal "sync done" + pthread_cond_signal(&COND_pool); // in case somebody's waiting + pthread_mutex_unlock(&LOCK_pool); + + /* marking 'syncing' slot free */ + pthread_mutex_lock(&LOCK_sync); + syncing=0; + pthread_cond_signal(&active->cond); // wake up a new syncer + pthread_mutex_unlock(&LOCK_sync); + return err; +} + +/* + erase xid from the page, update page free space counters/pointers. + cookie points directly to the memory where xid was logged +*/ + +void TC_LOG_MMAP::unlog(ulong cookie, my_xid xid) +{ + PAGE *p=pages+(cookie/tc_log_page_size); + my_xid *x=(my_xid *)(data+cookie); + + DBUG_ASSERT(*x == xid); + DBUG_ASSERT(x >= p->start && x < p->end); + *x=0; + + pthread_mutex_lock(&p->lock); + p->free++; + DBUG_ASSERT(p->free <= p->size); + set_if_smaller(p->ptr, x); + if (p->free == p->size) // the page is completely empty + statistic_decrement(tc_log_cur_pages_used, &LOCK_status); + if (p->waiters == 0) // the page is in pool and ready to rock + pthread_cond_signal(&COND_pool); // ping ... for overflow() + pthread_mutex_unlock(&p->lock); +} + +void TC_LOG_MMAP::close() +{ + uint i; + switch (inited) { + case 6: + pthread_mutex_destroy(&LOCK_sync); + pthread_mutex_destroy(&LOCK_active); + pthread_mutex_destroy(&LOCK_pool); + pthread_cond_destroy(&COND_pool); + case 5: + data[0]='A'; // garble the first (signature) byte, in case my_delete fails + case 4: + for (i=0; i < npages; i++) + { + if (pages[i].ptr == 0) + break; + pthread_mutex_destroy(&pages[i].lock); + pthread_cond_destroy(&pages[i].cond); + } + case 3: + my_free((gptr)pages, MYF(0)); + case 2: + my_munmap((byte*)data, (size_t)file_length); + case 1: + my_close(fd, MYF(0)); + } + if (inited>=5) // cannot do in the switch because of Windows + my_delete(logname, MYF(MY_WME)); + inited=0; +} + +int TC_LOG_MMAP::recover() +{ + HASH xids; + PAGE *p=pages, *end_p=pages+npages; + + if (memcmp(data, tc_log_magic, sizeof(tc_log_magic))) + { + sql_print_error("Bad magic header in tc log"); + goto err1; + } + + /* + the first byte after magic signature is set to current + number of storage engines on startup + */ + if (data[sizeof(tc_log_magic)] != total_ha_2pc) + { + sql_print_error("Recovery failed! You must enable " + "exactly %d storage engines that support " + "two-phase commit protocol", + data[sizeof(tc_log_magic)]); + goto err1; + } + + if (hash_init(&xids, &my_charset_bin, tc_log_page_size/3, 0, + sizeof(my_xid), 0, 0, MYF(0))) + goto err1; + + for ( ; p < end_p ; p++) + { + for (my_xid *x=p->start; x < p->end; x++) + if (*x && my_hash_insert(&xids, (byte *)x)) + goto err2; // OOM + } + + if (ha_recover(&xids)) + goto err2; + + hash_free(&xids); + bzero(data, (size_t)file_length); + return 0; + +err2: + hash_free(&xids); +err1: + sql_print_error("Crash recovery failed. Either correct the problem " + "(if it's, for example, out of memory error) and restart, " + "or delete tc log and start mysqld with " + "--tc-heuristic-recover={commit|rollback}"); + return 1; +} +#endif + +TC_LOG *tc_log; +TC_LOG_DUMMY tc_log_dummy; +TC_LOG_MMAP tc_log_mmap; + +/* + Perform heuristic recovery, if --tc-heuristic-recover was used + + RETURN VALUE + 0 no heuristic recovery was requested + 1 heuristic recovery was performed + + NOTE + no matter whether heuristic recovery was successful or not + mysqld must exit. So, return value is the same in both cases. +*/ + +int TC_LOG::using_heuristic_recover() +{ + if (!tc_heuristic_recover) + return 0; + + sql_print_information("Heuristic crash recovery mode"); + if (ha_recover(0)) + sql_print_error("Heuristic crash recovery failed"); + sql_print_information("Please restart mysqld without --tc-heuristic-recover"); + return 1; +} + +/****** transaction coordinator log for 2pc - binlog() based solution ******/ +#define TC_LOG_BINLOG MYSQL_LOG + +/* + TODO keep in-memory list of prepared transactions + (add to list in log(), remove on unlog()) + and copy it to the new binlog if rotated + but let's check the behaviour of tc_log_page_waits first! +*/ + +int TC_LOG_BINLOG::open(const char *opt_name) +{ + LOG_INFO log_info; + int error= 1; + + DBUG_ASSERT(total_ha_2pc > 1); + DBUG_ASSERT(opt_name && opt_name[0]); + + pthread_mutex_init(&LOCK_prep_xids, MY_MUTEX_INIT_FAST); + pthread_cond_init (&COND_prep_xids, 0); + + if (!my_b_inited(&index_file)) + { + /* There was a failure to open the index file, can't open the binlog */ + cleanup(); + return 1; + } + + if (using_heuristic_recover()) + { + /* generate a new binlog to mask a corrupted one */ + open(opt_name, LOG_BIN, 0, WRITE_CACHE, 0, max_binlog_size, 0); + cleanup(); + return 1; + } + + if ((error= find_log_pos(&log_info, NullS, 1))) + { + if (error != LOG_INFO_EOF) + sql_print_error("find_log_pos() failed (error: %d)", error); + else + error= 0; + goto err; + } + + { + const char *errmsg; + IO_CACHE log; + File file; + Log_event *ev=0; + Format_description_log_event fdle(BINLOG_VERSION); + char log_name[FN_REFLEN]; + + if (! fdle.is_valid()) + goto err; + + do + { + strmake(log_name, log_info.log_file_name, sizeof(log_name)-1); + } while (!(error= find_next_log(&log_info, 1))); + + if (error != LOG_INFO_EOF) + { + sql_print_error("find_log_pos() failed (error: %d)", error); + goto err; + } + + if ((file= open_binlog(&log, log_name, &errmsg)) < 0) + { + sql_print_error("%s", errmsg); + goto err; + } + + if ((ev= Log_event::read_log_event(&log, 0, &fdle)) && + ev->get_type_code() == FORMAT_DESCRIPTION_EVENT && + ev->flags & LOG_EVENT_BINLOG_IN_USE_F) + { + sql_print_information("Recovering after a crash using %s", opt_name); + error= recover(&log, (Format_description_log_event *)ev); + } + else + error=0; + + delete ev; + end_io_cache(&log); + my_close(file, MYF(MY_WME)); + + if (error) + goto err; + } + +err: + return error; +} + +/* this is called on shutdown, after ha_panic */ +void TC_LOG_BINLOG::close() +{ + DBUG_ASSERT(prepared_xids==0); + pthread_mutex_destroy(&LOCK_prep_xids); + pthread_cond_destroy (&COND_prep_xids); +} + +/* + TODO group commit + + RETURN + 0 - error + 1 - success +*/ +int TC_LOG_BINLOG::log_xid(THD *thd, my_xid xid) +{ + Xid_log_event xle(thd, xid); + IO_CACHE *trans_log= (IO_CACHE*)thd->ha_data[binlog_hton.slot]; + return !binlog_end_trans(thd, trans_log, &xle); // invert return value +} + +void TC_LOG_BINLOG::unlog(ulong cookie, my_xid xid) +{ + pthread_mutex_lock(&LOCK_prep_xids); + if (--prepared_xids == 0) + pthread_cond_signal(&COND_prep_xids); + pthread_mutex_unlock(&LOCK_prep_xids); + rotate_and_purge(0); // as ::write() did not rotate +} + +int TC_LOG_BINLOG::recover(IO_CACHE *log, Format_description_log_event *fdle) +{ + Log_event *ev; + HASH xids; + MEM_ROOT mem_root; + + if (! fdle->is_valid() || + hash_init(&xids, &my_charset_bin, TC_LOG_PAGE_SIZE/3, 0, + sizeof(my_xid), 0, 0, MYF(0))) + goto err1; + + init_alloc_root(&mem_root, TC_LOG_PAGE_SIZE, TC_LOG_PAGE_SIZE); + + fdle->flags&= ~LOG_EVENT_BINLOG_IN_USE_F; // abort on the first error + + while ((ev= Log_event::read_log_event(log,0,fdle)) && ev->is_valid()) + { + if (ev->get_type_code() == XID_EVENT) + { + Xid_log_event *xev=(Xid_log_event *)ev; + byte *x=(byte *)memdup_root(&mem_root, (char *)& xev->xid, + sizeof(xev->xid)); + if (! x) + goto err2; + my_hash_insert(&xids, x); + } + delete ev; + } + + if (ha_recover(&xids)) + goto err2; + + free_root(&mem_root, MYF(0)); + hash_free(&xids); + return 0; + +err2: + free_root(&mem_root, MYF(0)); + hash_free(&xids); +err1: + sql_print_error("Crash recovery failed. Either correct the problem " + "(if it's, for example, out of memory error) and restart, " + "or delete (or rename) binary log and start mysqld with " + "--tc-heuristic-recover={commit|rollback}"); + return 1; +} + diff --git a/sql/log_event.cc b/sql/log_event.cc index 412ebbce0ac..e272140c080 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1,15 +1,14 @@ /* Copyright (C) 2000-2004 MySQL AB - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - + the Free Software Foundation; version 2 of the License. + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -114,20 +113,35 @@ static char *pretty_print_str(char *packet, char *str, int len) /* - slave_load_file_stem() + Creates a temporary name for load data infile: + + SYNOPSIS + slave_load_file_stem() + buf Store new filename here + file_id File_id (part of file name) + event_server_id Event_id (part of file name) + ext Extension for file name + + RETURN + Pointer to start of extension */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -static inline char* slave_load_file_stem(char*buf, uint file_id, - int event_server_id) +static char *slave_load_file_stem(char *buf, uint file_id, + int event_server_id, const char *ext) { + char *res; fn_format(buf,"SQL_LOAD-",slave_load_tmpdir, "", MY_UNPACK_FILENAME); + to_unix_path(buf); + buf = strend(buf); buf = int10_to_str(::server_id, buf, 10); *buf++ = '-'; buf = int10_to_str(event_server_id, buf, 10); *buf++ = '-'; - return int10_to_str(file_id, buf, 10); + res= int10_to_str(file_id, buf, 10); + strmov(res, ext); // Add extension last + return res; // Pointer to extension } #endif @@ -158,7 +172,7 @@ static void cleanup_load_tmpdir() we cannot meet Start_log event in the middle of events from one LOAD DATA. */ - p= strmake(prefbuf,"SQL_LOAD-",9); + p= strmake(prefbuf, STRING_WITH_LEN("SQL_LOAD-")); p= int10_to_str(::server_id, p, 10); *(p++)= '-'; *p= 0; @@ -182,10 +196,12 @@ static void cleanup_load_tmpdir() write_str() */ -static bool write_str(IO_CACHE *file, char *str, byte length) +static bool write_str(IO_CACHE *file, char *str, uint length) { - return (my_b_safe_write(file, &length, 1) || - my_b_safe_write(file, (byte*) str, (int) length)); + byte tmp[1]; + tmp[0]= (byte) length; + return (my_b_safe_write(file, tmp, sizeof(tmp)) || + my_b_safe_write(file, (byte*) str, length)); } @@ -193,43 +209,89 @@ static bool write_str(IO_CACHE *file, char *str, byte length) read_str() */ -static inline int read_str(char * &buf, char *buf_end, char * &str, - uint8 &len) +static inline int read_str(char **buf, char *buf_end, char **str, + uint8 *len) { - if (buf + (uint) (uchar) *buf >= buf_end) + if (*buf + ((uint) (uchar) **buf) >= buf_end) return 1; - len = (uint8) *buf; - str= buf+1; - buf+= (uint) len+1; + *len= (uint8) **buf; + *str= (*buf)+1; + (*buf)+= (uint) *len+1; return 0; } + /* Transforms a string into "" or its expression in 0x... form. */ + char *str_to_hex(char *to, const char *from, uint len) { - char *p= to; if (len) { - p= strmov(p, "0x"); - for (uint i= 0; i < len; i++, p+= 2) - { - /* val[i] is char. Casting to uchar helps greatly if val[i] < 0 */ - uint tmp= (uint) (uchar) from[i]; - p[0]= _dig_vec_upper[tmp >> 4]; - p[1]= _dig_vec_upper[tmp & 15]; - } - *p= 0; + *to++= '0'; + *to++= 'x'; + to= octet2hex(to, from, len); } else - p= strmov(p, "\"\""); - return p; // pointer to end 0 of 'to' + to= strmov(to, "\"\""); + return to; // pointer to end 0 of 'to' } +/* + Append a version of the 'from' string suitable for use in a query to + the 'to' string. To generate a correct escaping, the character set + information in 'csinfo' is used. + */ +#ifndef MYSQL_CLIENT +int +append_query_string(CHARSET_INFO *csinfo, + String const *from, String *to) +{ + char *beg, *ptr; + uint32 const orig_len= to->length(); + if (to->reserve(orig_len + from->length()*2+3)) + return 1; + + beg= to->c_ptr_quick() + to->length(); + ptr= beg; + if (csinfo->escape_with_backslash_is_dangerous) + ptr= str_to_hex(ptr, from->ptr(), from->length()); + else + { + *ptr++= '\''; + ptr+= escape_string_for_mysql(csinfo, ptr, 0, + from->ptr(), from->length()); + *ptr++='\''; + } + to->length(orig_len + ptr - beg); + return 0; +} +#endif + + +/* + Prints a "session_var=value" string. Used by mysqlbinlog to print some SET + commands just before it prints a query. +*/ + +#ifdef MYSQL_CLIENT + +static void print_set_option(FILE* file, uint32 bits_changed, uint32 option, + uint32 flags, const char* name, bool* need_comma) +{ + if (bits_changed & option) + { + if (*need_comma) + fprintf(file,", "); + fprintf(file,"%s=%d", name, test(flags & option)); + *need_comma= 1; + } +} +#endif /************************************************************************** - Log_event methods + Log_event methods (= the parent class of all events) **************************************************************************/ /* @@ -239,7 +301,7 @@ char *str_to_hex(char *to, const char *from, uint len) const char* Log_event::get_type_str() { switch(get_type_code()) { - case START_EVENT: return "Start"; + case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; @@ -252,8 +314,12 @@ const char* Log_event::get_type_str() case DELETE_FILE_EVENT: return "Delete_file"; case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; + case XID_EVENT: return "Xid"; case USER_VAR_EVENT: return "User var"; - default: return "Unknown"; /* impossible */ + case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; + case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; + case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; + default: return "Unknown"; /* impossible */ } } @@ -264,25 +330,23 @@ const char* Log_event::get_type_str() #ifndef MYSQL_CLIENT Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans) - :log_pos(0), temp_buf(0), exec_time(0), cached_event_len(0), - flags(flags_arg), thd(thd_arg) + :log_pos(0), temp_buf(0), exec_time(0), flags(flags_arg), thd(thd_arg) { server_id= thd->server_id; when= thd->start_time; - cache_stmt= (using_trans && - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN))); + cache_stmt= using_trans; } /* - This minimal constructor is for when you are not even sure that there is a - valid THD. For example in the server when we are shutting down or flushing - logs after receiving a SIGHUP (then we must write a Rotate to the binlog but - we have no THD, so we need this minimal constructor). + This minimal constructor is for when you are not even sure that there + is a valid THD. For example in the server when we are shutting down or + flushing logs after receiving a SIGHUP (then we must write a Rotate to + the binlog but we have no THD, so we need this minimal constructor). */ Log_event::Log_event() - :temp_buf(0), exec_time(0), cached_event_len(0), flags(0), cache_stmt(0), + :temp_buf(0), exec_time(0), flags(0), cache_stmt(0), thd(0) { server_id= ::server_id; @@ -296,24 +360,71 @@ Log_event::Log_event() Log_event::Log_event() */ -Log_event::Log_event(const char* buf, bool old_format) - :temp_buf(0), cached_event_len(0), cache_stmt(0) +Log_event::Log_event(const char* buf, + const Format_description_log_event* description_event) + :temp_buf(0), cache_stmt(0) { +#ifndef MYSQL_CLIENT + thd = 0; +#endif when = uint4korr(buf); server_id = uint4korr(buf + SERVER_ID_OFFSET); - if (old_format) + if (description_event->binlog_version==1) { - log_pos=0; - flags=0; + log_pos= 0; + flags= 0; + return; } - else + /* 4.0 or newer */ + log_pos= uint4korr(buf + LOG_POS_OFFSET); + /* + If the log is 4.0 (so here it can only be a 4.0 relay log read by + the SQL thread or a 4.0 master binlog read by the I/O thread), + log_pos is the beginning of the event: we transform it into the end + of the event, which is more useful. + But how do you know that the log is 4.0: you know it if + description_event is version 3 *and* you are not reading a + Format_desc (remember that mysqlbinlog starts by assuming that 5.0 + logs are in 4.0 format, until it finds a Format_desc). + */ + if (description_event->binlog_version==3 && + buf[EVENT_TYPE_OFFSET]<FORMAT_DESCRIPTION_EVENT && log_pos) { - log_pos = uint4korr(buf + LOG_POS_OFFSET); - flags = uint2korr(buf + FLAGS_OFFSET); + /* + If log_pos=0, don't change it. log_pos==0 is a marker to mean + "don't change rli->group_master_log_pos" (see + inc_group_relay_log_pos()). As it is unreal log_pos, adding the + event len's is nonsense. For example, a fake Rotate event should + not have its log_pos (which is 0) changed or it will modify + Exec_master_log_pos in SHOW SLAVE STATUS, displaying a nonsense + value of (a non-zero offset which does not exist in the master's + binlog, so which will cause problems if the user uses this value + in CHANGE MASTER). + */ + log_pos+= uint4korr(buf + EVENT_LEN_OFFSET); } -#ifndef MYSQL_CLIENT - thd = 0; -#endif + DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); + + flags= uint2korr(buf + FLAGS_OFFSET); + if ((buf[EVENT_TYPE_OFFSET] == FORMAT_DESCRIPTION_EVENT) || + (buf[EVENT_TYPE_OFFSET] == ROTATE_EVENT)) + { + /* + These events always have a header which stops here (i.e. their + header is FROZEN). + */ + /* + Initialization to zero of all other Log_event members as they're + not specified. Currently there are no such members; in the future + there will be an event UID (but Format_description and Rotate + don't need this UID, as they are not propagated through + --log-slave-updates (remember the UID is used to not play a query + twice when you have two masters which are slaves of a 3rd master). + Then we are done. + */ + return; + } + /* otherwise, go on with reading the header from buf (nothing now) */ } #ifndef MYSQL_CLIENT @@ -339,41 +450,42 @@ int Log_event::exec_event(struct st_relay_log_info* rli) only in the case discussed above, 'if (rli)' is useless here. But as we are not 100% sure, keep it for now. */ - if (rli) + if (rli) { /* - If in a transaction, and if the slave supports transactions, - just inc_event_relay_log_pos(). We only have to check for OPTION_BEGIN - (not OPTION_NOT_AUTOCOMMIT) as transactions are logged - with BEGIN/COMMIT, not with SET AUTOCOMMIT= . - + If in a transaction, and if the slave supports transactions, just + inc_event_relay_log_pos(). We only have to check for OPTION_BEGIN + (not OPTION_NOT_AUTOCOMMIT) as transactions are logged with + BEGIN/COMMIT, not with SET AUTOCOMMIT= . + CAUTION: opt_using_transactions means - innodb || bdb ; suppose the master supports InnoDB and BDB, + innodb || bdb ; suppose the master supports InnoDB and BDB, but the slave supports only BDB, problems - will arise: + will arise: - suppose an InnoDB table is created on the master, - then it will be MyISAM on the slave - - but as opt_using_transactions is true, the slave will believe he is - transactional with the MyISAM table. And problems will come when one - does START SLAVE; STOP SLAVE; START SLAVE; (the slave will resume at - BEGIN whereas there has not been any rollback). This is the problem of - using opt_using_transactions instead of a finer - "does the slave support _the_transactional_handler_used_on_the_master_". - - More generally, we'll have problems when a query mixes a transactional - handler and MyISAM and STOP SLAVE is issued in the middle of the - "transaction". START SLAVE will resume at BEGIN while the MyISAM table - has already been updated. + - but as opt_using_transactions is true, the slave will believe he + is transactional with the MyISAM table. And problems will come + when one does START SLAVE; STOP SLAVE; START SLAVE; (the slave + will resume at BEGIN whereas there has not been any rollback). + This is the problem of using opt_using_transactions instead of a + finer "does the slave support + _the_transactional_handler_used_on_the_master_". + + More generally, we'll have problems when a query mixes a + transactional handler and MyISAM and STOP SLAVE is issued in the + middle of the "transaction". START SLAVE will resume at BEGIN + while the MyISAM table has already been updated. */ if ((thd->options & OPTION_BEGIN) && opt_using_transactions) - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); else { - rli->inc_group_relay_log_pos(get_event_len(),log_pos); + rli->inc_group_relay_log_pos(log_pos); flush_relay_log_info(rli); /* - Note that Rotate_log_event::exec_event() does not call this function, - so there is no chance that a fake rotate event resets + Note that Rotate_log_event::exec_event() does not call this + function, so there is no chance that a fake rotate event resets last_master_timestamp. Note that we update without mutex (probably ok - except in some very rare cases, only consequence is that value may take some time to @@ -429,63 +541,107 @@ int Log_event::net_send(Protocol *protocol, const char* log_name, my_off_t pos) void Log_event::init_show_field_list(List<Item>* field_list) { field_list->push_back(new Item_empty_string("Log_name", 20)); - field_list->push_back(new Item_return_int("Pos", 11, + field_list->push_back(new Item_return_int("Pos", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Event_type", 20)); field_list->push_back(new Item_return_int("Server_id", 10, MYSQL_TYPE_LONG)); - field_list->push_back(new Item_return_int("Orig_log_pos", 11, + field_list->push_back(new Item_return_int("End_log_pos", + MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Info", 20)); } -#endif /* !MYSQL_CLIENT */ /* Log_event::write() */ -int Log_event::write(IO_CACHE* file) +bool Log_event::write_header(IO_CACHE* file, ulong event_data_length) { - return (write_header(file) || write_data(file)) ? -1 : 0; -} + byte header[LOG_EVENT_HEADER_LEN]; + DBUG_ENTER("Log_event::write_header"); + /* Store number of bytes that will be written by this event */ + data_written= event_data_length + sizeof(header); -/* - Log_event::write_header() -*/ + /* + log_pos != 0 if this is relay-log event. In this case we should not + change the position + */ -int Log_event::write_header(IO_CACHE* file) -{ - char buf[LOG_EVENT_HEADER_LEN]; - char* pos = buf; - int4store(pos, (ulong) when); // timestamp - pos += 4; - *pos++ = get_type_code(); // event type code - int4store(pos, server_id); - pos += 4; - long tmp=get_data_size() + LOG_EVENT_HEADER_LEN; - int4store(pos, tmp); - pos += 4; - int4store(pos, log_pos); - pos += 4; - int2store(pos, flags); - pos += 2; - return (my_b_safe_write(file, (byte*) buf, (uint) (pos - buf))); + if (is_artificial_event()) + { + /* + We should not do any cleanup on slave when reading this. We + mark this by setting log_pos to 0. Start_log_event_v3() will + detect this on reading and set artificial_event=1 for the event. + */ + log_pos= 0; + } + else if (!log_pos) + { + /* + Calculate position of end of event + + Note that with a SEQ_READ_APPEND cache, my_b_tell() does not + work well. So this will give slightly wrong positions for the + Format_desc/Rotate/Stop events which the slave writes to its + relay log. For example, the initial Format_desc will have + end_log_pos=91 instead of 95. Because after writing the first 4 + bytes of the relay log, my_b_tell() still reports 0. Because + my_b_append() does not update the counter which my_b_tell() + later uses (one should probably use my_b_append_tell() to work + around this). To get right positions even when writing to the + relay log, we use the (new) my_b_safe_tell(). + + Note that this raises a question on the correctness of all these + DBUG_ASSERT(my_b_tell()=rli->event_relay_log_pos). + + If in a transaction, the log_pos which we calculate below is not + very good (because then my_b_safe_tell() returns start position + of the BEGIN, so it's like the statement was at the BEGIN's + place), but it's not a very serious problem (as the slave, when + it is in a transaction, does not take those end_log_pos into + account (as it calls inc_event_relay_log_pos()). To be fixed + later, so that it looks less strange. But not bug. + */ + + log_pos= my_b_safe_tell(file)+data_written; + } + + /* + Header will be of size LOG_EVENT_HEADER_LEN for all events, except for + FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT, where it will be + LOG_EVENT_MINIMAL_HEADER_LEN (remember these 2 have a frozen header, + because we read them before knowing the format). + */ + + int4store(header, (ulong) when); // timestamp + header[EVENT_TYPE_OFFSET]= get_type_code(); + int4store(header+ SERVER_ID_OFFSET, server_id); + int4store(header+ EVENT_LEN_OFFSET, data_written); + int4store(header+ LOG_POS_OFFSET, log_pos); + int2store(header+ FLAGS_OFFSET, flags); + + DBUG_RETURN(my_b_safe_write(file, header, sizeof(header)) != 0); } /* Log_event::read_log_event() + + This needn't be format-tolerant, because we only read + LOG_EVENT_MINIMAL_HEADER_LEN (we just want to read the event's length). + */ -#ifndef MYSQL_CLIENT int Log_event::read_log_event(IO_CACHE* file, String* packet, pthread_mutex_t* log_lock) { ulong data_len; int result=0; - char buf[LOG_EVENT_HEADER_LEN]; + char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; DBUG_ENTER("read_log_event"); if (log_lock) @@ -505,24 +661,25 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, goto end; } data_len= uint4korr(buf + EVENT_LEN_OFFSET); - if (data_len < LOG_EVENT_HEADER_LEN || + if (data_len < LOG_EVENT_MINIMAL_HEADER_LEN || data_len > current_thd->variables.max_allowed_packet) { DBUG_PRINT("error",("data_len: %ld", data_len)); - result= ((data_len < LOG_EVENT_HEADER_LEN) ? LOG_READ_BOGUS : + result= ((data_len < LOG_EVENT_MINIMAL_HEADER_LEN) ? LOG_READ_BOGUS : LOG_READ_TOO_LARGE); goto end; } packet->append(buf, sizeof(buf)); - data_len-= LOG_EVENT_HEADER_LEN; + data_len-= LOG_EVENT_MINIMAL_HEADER_LEN; if (data_len) { if (packet->append(file, data_len)) { /* - Here we should never hit EOF in a non-error condition. + Here if we hit EOF it's really an error: as data_len is >=0 + there's supposed to be more bytes available. EOF means we are reading the event partially, which should - never happen. + never happen: either we read badly or the binlog is truncated. */ result= file->error >= 0 ? LOG_READ_TRUNC: LOG_READ_IO; /* Implicit goto end; */ @@ -539,42 +696,62 @@ end: #ifndef MYSQL_CLIENT #define UNLOCK_MUTEX if (log_lock) pthread_mutex_unlock(log_lock); #define LOCK_MUTEX if (log_lock) pthread_mutex_lock(log_lock); -#define max_allowed_packet current_thd->variables.max_allowed_packet #else #define UNLOCK_MUTEX #define LOCK_MUTEX -#define max_allowed_packet (*mysql_get_parameters()->p_max_allowed_packet) #endif /* Log_event::read_log_event() NOTE: - Allocates memory; The caller is responsible for clean-up + Allocates memory; The caller is responsible for clean-up. */ #ifndef MYSQL_CLIENT Log_event* Log_event::read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, - bool old_format) + const Format_description_log_event *description_event) #else -Log_event* Log_event::read_log_event(IO_CACHE* file, bool old_format) -#endif +Log_event* Log_event::read_log_event(IO_CACHE* file, + const Format_description_log_event *description_event) +#endif { - char head[LOG_EVENT_HEADER_LEN]; - uint header_size= old_format ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + DBUG_ENTER("Log_event::read_log_event(IO_CACHE *, Format_description_log_event *"); + DBUG_ASSERT(description_event != 0); + char head[LOG_EVENT_MINIMAL_HEADER_LEN]; + /* + First we only want to read at most LOG_EVENT_MINIMAL_HEADER_LEN, just to + check the event for sanity and to know its length; no need to really parse + it. We say "at most" because this could be a 3.23 master, which has header + of 13 bytes, whereas LOG_EVENT_MINIMAL_HEADER_LEN is 19 bytes (it's + "minimal" over the set {MySQL >=4.0}). + */ + uint header_size= min(description_event->common_header_len, + LOG_EVENT_MINIMAL_HEADER_LEN); LOCK_MUTEX; + DBUG_PRINT("info", ("my_b_tell: %lu", (ulong) my_b_tell(file))); if (my_b_read(file, (byte *) head, header_size)) { + DBUG_PRINT("info", ("Log_event::read_log_event(IO_CACHE*,Format_desc*) \ +failed my_b_read")); UNLOCK_MUTEX; - return 0; + /* + No error here; it could be that we are at the file's end. However + if the next my_b_read() fails (below), it will be an error as we + were able to read the first bytes. + */ + DBUG_RETURN(0); } - uint data_len = uint4korr(head + EVENT_LEN_OFFSET); char *buf= 0; const char *error= 0; Log_event *res= 0; +#ifndef max_allowed_packet + THD *thd=current_thd; + uint max_allowed_packet= thd ? thd->variables.max_allowed_packet : ~(ulong)0; +#endif if (data_len > max_allowed_packet) { @@ -601,15 +778,16 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, bool old_format) error = "read error"; goto err; } - if ((res = read_log_event(buf, data_len, &error, old_format))) + if ((res= read_log_event(buf, data_len, &error, description_event))) res->register_temp_buf(buf); err: UNLOCK_MUTEX; - if (error) + if (!res) { - sql_print_error("\ -Error in Log_event::read_log_event(): '%s', data_len: %d, event_type: %d", + DBUG_ASSERT(error != 0); + sql_print_error("Error in Log_event::read_log_event(): " + "'%s', data_len: %d, event_type: %d", error,data_len,head[EVENT_TYPE_OFFSET]); my_free(buf, MYF(MY_ALLOW_ZERO_PTR)); /* @@ -622,94 +800,120 @@ Error in Log_event::read_log_event(): '%s', data_len: %d, event_type: %d", */ file->error= -1; } - return res; + DBUG_RETURN(res); } /* Log_event::read_log_event() + Binlog format tolerance is in (buf, event_len, description_event) + constructors. */ -Log_event* Log_event::read_log_event(const char* buf, int event_len, - const char **error, bool old_format) +Log_event* Log_event::read_log_event(const char* buf, uint event_len, + const char **error, + const Format_description_log_event *description_event) { - DBUG_ENTER("Log_event::read_log_event"); - + Log_event* ev; + DBUG_ENTER("Log_event::read_log_event(char*,...)"); + DBUG_ASSERT(description_event != 0); + DBUG_PRINT("info", ("binlog_version: %d", description_event->binlog_version)); if (event_len < EVENT_LEN_OFFSET || (uint) event_len != uint4korr(buf+EVENT_LEN_OFFSET)) { *error="Sanity check failed"; // Needed to free buffer DBUG_RETURN(NULL); // general sanity check - will fail on a partial read } - - Log_event* ev = NULL; - + switch(buf[EVENT_TYPE_OFFSET]) { case QUERY_EVENT: - ev = new Query_log_event(buf, event_len, old_format); + ev = new Query_log_event(buf, event_len, description_event, QUERY_EVENT); break; case LOAD_EVENT: - ev = new Create_file_log_event(buf, event_len, old_format); + ev = new Load_log_event(buf, event_len, description_event); break; case NEW_LOAD_EVENT: - ev = new Load_log_event(buf, event_len, old_format); + ev = new Load_log_event(buf, event_len, description_event); break; case ROTATE_EVENT: - ev = new Rotate_log_event(buf, event_len, old_format); + ev = new Rotate_log_event(buf, event_len, description_event); break; #ifdef HAVE_REPLICATION - case SLAVE_EVENT: + case SLAVE_EVENT: /* can never happen (unused event) */ ev = new Slave_log_event(buf, event_len); break; #endif /* HAVE_REPLICATION */ case CREATE_FILE_EVENT: - ev = new Create_file_log_event(buf, event_len, old_format); + ev = new Create_file_log_event(buf, event_len, description_event); break; case APPEND_BLOCK_EVENT: - ev = new Append_block_log_event(buf, event_len); + ev = new Append_block_log_event(buf, event_len, description_event); break; case DELETE_FILE_EVENT: - ev = new Delete_file_log_event(buf, event_len); + ev = new Delete_file_log_event(buf, event_len, description_event); break; case EXEC_LOAD_EVENT: - ev = new Execute_load_log_event(buf, event_len); + ev = new Execute_load_log_event(buf, event_len, description_event); break; - case START_EVENT: - ev = new Start_log_event(buf, old_format); + case START_EVENT_V3: /* this is sent only by MySQL <=4.x */ + ev = new Start_log_event_v3(buf, description_event); break; -#ifdef HAVE_REPLICATION case STOP_EVENT: - ev = new Stop_log_event(buf, old_format); + ev = new Stop_log_event(buf, description_event); break; -#endif /* HAVE_REPLICATION */ case INTVAR_EVENT: - ev = new Intvar_log_event(buf, old_format); + ev = new Intvar_log_event(buf, description_event); + break; + case XID_EVENT: + ev = new Xid_log_event(buf, description_event); break; case RAND_EVENT: - ev = new Rand_log_event(buf, old_format); + ev = new Rand_log_event(buf, description_event); break; case USER_VAR_EVENT: - ev = new User_var_log_event(buf, old_format); + ev = new User_var_log_event(buf, description_event); + break; + case FORMAT_DESCRIPTION_EVENT: + ev = new Format_description_log_event(buf, event_len, description_event); + break; + case BEGIN_LOAD_QUERY_EVENT: + ev = new Begin_load_query_log_event(buf, event_len, description_event); + break; + case EXECUTE_LOAD_QUERY_EVENT: + ev = new Execute_load_query_log_event(buf, event_len, description_event); break; default: + DBUG_PRINT("error",("Unknown evernt code: %d",(int) buf[EVENT_TYPE_OFFSET])); + ev= NULL; break; } + + /* + is_valid() are small event-specific sanity tests which are + important; for example there are some my_malloc() in constructors + (e.g. Query_log_event::Query_log_event(char*...)); when these + my_malloc() fail we can't return an error out of the constructor + (because constructor is "void") ; so instead we leave the pointer we + wanted to allocate (e.g. 'query') to 0 and we test it in is_valid(). + Same for Format_description_log_event, member 'post_header_len'. + */ if (!ev || !ev->is_valid()) { + DBUG_PRINT("error",("Found invalid event in binary log")); + delete ev; #ifdef MYSQL_CLIENT - if (!force_opt) + if (!force_opt) /* then mysqlbinlog dies */ { *error= "Found invalid event in binary log"; DBUG_RETURN(0); } - ev= new Unknown_log_event(buf, old_format); + ev= new Unknown_log_event(buf, description_event); #else *error= "Found invalid event in binary log"; DBUG_RETURN(0); #endif } - ev->cached_event_len = event_len; DBUG_RETURN(ev); } @@ -719,15 +923,78 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len, Log_event::print_header() */ -void Log_event::print_header(FILE* file) +void Log_event::print_header(FILE* file, PRINT_EVENT_INFO* print_event_info) { char llbuff[22]; + my_off_t hexdump_from= print_event_info->hexdump_from; + fputc('#', file); print_timestamp(file); - fprintf(file, " server id %d log_pos %s ", server_id, - llstr(log_pos,llbuff)); + fprintf(file, " server id %d end_log_pos %s ", server_id, + llstr(log_pos,llbuff)); + + /* mysqlbinlog --hexdump */ + if (print_event_info->hexdump_from) + { + fprintf(file, "\n"); + uchar *ptr= (uchar*)temp_buf; + my_off_t size= + uint4korr(ptr + EVENT_LEN_OFFSET) - LOG_EVENT_MINIMAL_HEADER_LEN; + my_off_t i; + + /* Header len * 4 >= header len * (2 chars + space + extra space) */ + char *h, hex_string[LOG_EVENT_MINIMAL_HEADER_LEN*4]= {0}; + char *c, char_string[16+1]= {0}; + + /* Pretty-print event common header if header is exactly 19 bytes */ + if (print_event_info->common_header_len == LOG_EVENT_MINIMAL_HEADER_LEN) + { + fprintf(file, "# Position Timestamp Type Master ID " + "Size Master Pos Flags \n"); + fprintf(file, "# %8.8lx %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x %02x\n", + (unsigned long) hexdump_from, + ptr[0], ptr[1], ptr[2], ptr[3], ptr[4], ptr[5], ptr[6], + ptr[7], ptr[8], ptr[9], ptr[10], ptr[11], ptr[12], ptr[13], + ptr[14], ptr[15], ptr[16], ptr[17], ptr[18]); + ptr += LOG_EVENT_MINIMAL_HEADER_LEN; + hexdump_from += LOG_EVENT_MINIMAL_HEADER_LEN; + } + + /* Rest of event (without common header) */ + for (i= 0, c= char_string, h=hex_string; + i < size; + i++, ptr++) + { + my_snprintf(h, 4, "%02x ", *ptr); + h += 3; + + *c++= my_isalnum(&my_charset_bin, *ptr) ? *ptr : '.'; + + if (i % 16 == 15) + { + fprintf(file, "# %8.8lx %-48.48s |%16s|\n", + (unsigned long) (hexdump_from + (i & 0xfffffff0)), + hex_string, char_string); + hex_string[0]= 0; + char_string[0]= 0; + c= char_string; + h= hex_string; + } + else if (i % 8 == 7) *h++ = ' '; + } + *c= '\0'; + + /* Non-full last line */ + if (hex_string[0]) + fprintf(file, "# %8.8lx %-48.48s |%s|\n# ", + (unsigned long) (hexdump_from + (i & 0xfffffff0)), + hex_string, char_string); + } } + /* Log_event::print_timestamp() */ @@ -756,19 +1023,6 @@ void Log_event::print_timestamp(FILE* file, time_t* ts) #endif /* MYSQL_CLIENT */ -/* - Log_event::set_log_pos() -*/ - -#ifndef MYSQL_CLIENT -void Log_event::set_log_pos(MYSQL_LOG* log) -{ - if (!log_pos) - log_pos = my_b_tell(&log->log_file); -} -#endif /* !MYSQL_CLIENT */ - - /************************************************************************** Query_log_event methods **************************************************************************/ @@ -777,15 +1031,20 @@ void Log_event::set_log_pos(MYSQL_LOG* log) /* Query_log_event::pack_info() + This (which is used only for SHOW BINLOG EVENTS) could be updated to + print SET @@session_var=. But this is not urgent, as SHOW BINLOG EVENTS is + only an information, it does not produce suitable queries to replay (for + example it does not print LOAD DATA INFILE). */ void Query_log_event::pack_info(Protocol *protocol) { + // TODO: show the catalog ?? char *buf, *pos; if (!(buf= my_malloc(9 + db_len + q_len, MYF(MY_WME)))) return; - pos= buf; - if (!(flags & LOG_EVENT_SUPPRESS_USE_F) + pos= buf; + if (!(flags & LOG_EVENT_SUPPRESS_USE_F) && db && db_len) { pos= strmov(buf, "use `"); @@ -802,34 +1061,52 @@ void Query_log_event::pack_info(Protocol *protocol) } #endif +#ifndef MYSQL_CLIENT -/* - Query_log_event::write() -*/ - -int Query_log_event::write(IO_CACHE* file) +/* Utility function for the next method */ +static void write_str_with_code_and_len(char **dst, const char *src, + int len, uint code) { - return query ? Log_event::write(file) : -1; + DBUG_ASSERT(src); + *((*dst)++)= code; + *((*dst)++)= (uchar) len; + bmove(*dst, src, len); + (*dst)+= len; } /* - Query_log_event::write_data() + Query_log_event::write() + + NOTES: + In this event we have to modify the header to have the correct + EVENT_LEN_OFFSET as we don't yet know how many status variables we + will print! */ -int Query_log_event::write_data(IO_CACHE* file) +bool Query_log_event::write(IO_CACHE* file) { - char buf[QUERY_HEADER_LEN]; + uchar buf[QUERY_HEADER_LEN+ + 1+4+ // code of flags2 and flags2 + 1+8+ // code of sql_mode and sql_mode + 1+1+FN_REFLEN+ // code of catalog and catalog length and catalog + 1+4+ // code of autoinc and the 2 autoinc variables + 1+6+ // code of charset and charset + 1+1+MAX_TIME_ZONE_NAME_LENGTH+ // code of tz and tz length and tz name + 1+2+ // code of lc_time_names and lc_time_names_number + 1+2 // code of charset_database and charset_database_number + ], *start, *start_of_status; + ulong event_length; if (!query) - return -1; - + return 1; // Something wrong with event + /* We want to store the thread id: (- as an information for the user when he reads the binlog) - if the query uses temporary table: for the slave SQL thread to know to which master connection the temp table belongs. - Now imagine we (write_data()) are called by the slave SQL thread (we are + Now imagine we (write()) are called by the slave SQL thread (we are logging a query executed by this thread; the slave runs with --log-slave-updates). Then this query will be logged with thread_id=the_thread_id_of_the_SQL_thread. Imagine that 2 temp tables of @@ -866,78 +1143,382 @@ int Query_log_event::write_data(IO_CACHE* file) buf[Q_DB_LEN_OFFSET] = (char) db_len; int2store(buf + Q_ERR_CODE_OFFSET, error_code); - return (my_b_safe_write(file, (byte*) buf, QUERY_HEADER_LEN) || - my_b_safe_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) || - my_b_safe_write(file, (byte*) query, q_len)) ? -1 : 0; -} + /* + You MUST always write status vars in increasing order of code. This + guarantees that a slightly older slave will be able to parse those he + knows. + */ + start_of_status= start= buf+QUERY_HEADER_LEN; + if (flags2_inited) + { + *start++= Q_FLAGS2_CODE; + int4store(start, flags2); + start+= 4; + } + if (sql_mode_inited) + { + *start++= Q_SQL_MODE_CODE; + int8store(start, (ulonglong)sql_mode); + start+= 8; + } + if (catalog_len) // i.e. this var is inited (false for 4.0 events) + { + write_str_with_code_and_len((char **)(&start), + catalog, catalog_len, Q_CATALOG_NZ_CODE); + /* + In 5.0.x where x<4 masters we used to store the end zero here. This was + a waste of one byte so we don't do it in x>=4 masters. We change code to + Q_CATALOG_NZ_CODE, because re-using the old code would make x<4 slaves + of this x>=4 master segfault (expecting a zero when there is + none). Remaining compatibility problems are: the older slave will not + find the catalog; but it is will not crash, and it's not an issue + that it does not find the catalog as catalogs were not used in these + older MySQL versions (we store it in binlog and read it from relay log + but do nothing useful with it). What is an issue is that the older slave + will stop processing the Q_* blocks (and jumps to the db/query) as soon + as it sees unknown Q_CATALOG_NZ_CODE; so it will not be able to read + Q_AUTO_INCREMENT*, Q_CHARSET and so replication will fail silently in + various ways. Documented that you should not mix alpha/beta versions if + they are not exactly the same version, with example of 5.0.3->5.0.2 and + 5.0.4->5.0.3. If replication is from older to new, the new will + recognize Q_CATALOG_CODE and have no problem. + */ + } + if (auto_increment_increment != 1) + { + *start++= Q_AUTO_INCREMENT; + int2store(start, auto_increment_increment); + int2store(start+2, auto_increment_offset); + start+= 4; + } + if (charset_inited) + { + *start++= Q_CHARSET_CODE; + memcpy(start, charset, 6); + start+= 6; + } + if (time_zone_len) + { + /* In the TZ sys table, column Name is of length 64 so this should be ok */ + DBUG_ASSERT(time_zone_len <= MAX_TIME_ZONE_NAME_LENGTH); + *start++= Q_TIME_ZONE_CODE; + *start++= time_zone_len; + memcpy(start, time_zone_str, time_zone_len); + start+= time_zone_len; + } + if (lc_time_names_number) + { + DBUG_ASSERT(lc_time_names_number <= 0xFFFF); + *start++= Q_LC_TIME_NAMES_CODE; + int2store(start, lc_time_names_number); + start+= 2; + } + if (charset_database_number) + { + DBUG_ASSERT(charset_database_number <= 0xFFFF); + *start++= Q_CHARSET_DATABASE_CODE; + int2store(start, charset_database_number); + start+= 2; + } + /* + Here there could be code like + if (command-line-option-which-says-"log_this_variable" && inited) + { + *start++= Q_THIS_VARIABLE_CODE; + int4store(start, this_variable); + start+= 4; + } + */ + + /* Store length of status variables */ + status_vars_len= (uint) (start-start_of_status); + DBUG_ASSERT(status_vars_len <= MAX_SIZE_LOG_EVENT_STATUS); + int2store(buf + Q_STATUS_VARS_LEN_OFFSET, status_vars_len); + /* + Calculate length of whole event + The "1" below is the \0 in the db's length + */ + event_length= (uint) (start-buf) + get_post_header_size_for_derived() + db_len + 1 + q_len; + + return (write_header(file, event_length) || + my_b_safe_write(file, (byte*) buf, QUERY_HEADER_LEN) || + write_post_header_for_derived(file) || + my_b_safe_write(file, (byte*) start_of_status, + (uint) (start-start_of_status)) || + my_b_safe_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) || + my_b_safe_write(file, (byte*) query, q_len)) ? 1 : 0; +} /* Query_log_event::Query_log_event() + + The simplest constructor that could possibly work. This is used for + creating static objects that have a special meaning and are invisible + to the log. */ +Query_log_event::Query_log_event() + :Log_event(), data_buf(0) +{ +} -#ifndef MYSQL_CLIENT + +/* + Query_log_event::Query_log_event() +*/ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, bool using_trans, bool suppress_use) - :Log_event(thd_arg, + :Log_event(thd_arg, ((thd_arg->tmp_table_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0) | (suppress_use ? LOG_EVENT_SUPPRESS_USE_F : 0)), using_trans), - data_buf(0), query(query_arg), + data_buf(0), query(query_arg), catalog(thd_arg->catalog), db(thd_arg->db), q_len((uint32) query_length), - error_code(thd_arg->killed ? + error_code((thd_arg->killed != THD::NOT_KILLED) ? ((thd_arg->system_thread & SYSTEM_THREAD_DELAYED_INSERT) ? - 0 : ER_SERVER_SHUTDOWN) : thd_arg->net.last_errno), + 0 : thd->killed_errno()) : thd_arg->net.last_errno), thread_id(thd_arg->thread_id), /* save the original thread id; we already know the server id */ - slave_proxy_id(thd_arg->variables.pseudo_thread_id) + slave_proxy_id(thd_arg->variables.pseudo_thread_id), + flags2_inited(1), sql_mode_inited(1), charset_inited(1), + sql_mode(thd_arg->variables.sql_mode), + auto_increment_increment(thd_arg->variables.auto_increment_increment), + auto_increment_offset(thd_arg->variables.auto_increment_offset), + lc_time_names_number(thd_arg->variables.lc_time_names->number), + charset_database_number(0) { time_t end_time; time(&end_time); exec_time = (ulong) (end_time - thd->start_time); + catalog_len = (catalog) ? (uint32) strlen(catalog) : 0; + /* status_vars_len is set just before writing the event */ db_len = (db) ? (uint32) strlen(db) : 0; + if (thd_arg->variables.collation_database != thd_arg->db_charset) + charset_database_number= thd_arg->variables.collation_database->number; + + /* + If we don't use flags2 for anything else than options contained in + thd->options, it would be more efficient to flags2=thd_arg->options + (OPTIONS_WRITTEN_TO_BINLOG would be used only at reading time). + But it's likely that we don't want to use 32 bits for 3 bits; in the future + we will probably want to reclaim the 29 bits. So we need the &. + */ + flags2= (uint32) (thd_arg->options & OPTIONS_WRITTEN_TO_BIN_LOG); + DBUG_ASSERT(thd->variables.character_set_client->number < 256*256); + DBUG_ASSERT(thd->variables.collation_connection->number < 256*256); + DBUG_ASSERT(thd->variables.collation_server->number < 256*256); + int2store(charset, thd_arg->variables.character_set_client->number); + int2store(charset+2, thd_arg->variables.collation_connection->number); + int2store(charset+4, thd_arg->variables.collation_server->number); + if (thd_arg->time_zone_used) + { + /* + Note that our event becomes dependent on the Time_zone object + representing the time zone. Fortunately such objects are never deleted + or changed during mysqld's lifetime. + */ + time_zone_len= thd_arg->variables.time_zone->get_name()->length(); + time_zone_str= thd_arg->variables.time_zone->get_name()->ptr(); + } + else + time_zone_len= 0; + DBUG_PRINT("info",("Query_log_event has flags2: %lu sql_mode: %lu", + (ulong) flags2, sql_mode)); } #endif /* MYSQL_CLIENT */ +/* 2 utility functions for the next method */ + +/* + Get the pointer for a string (src) that contains the length in + the first byte. Set the output string (dst) to the string value + and place the length of the string in the byte after the string. +*/ +static void get_str_len_and_pointer(const Log_event::Byte **src, + const char **dst, + uint *len) +{ + if ((*len= **src)) + *dst= (char *)*src + 1; // Will be copied later + (*src)+= *len + 1; +} + +static void copy_str_and_move(const char **src, + Log_event::Byte **dst, + uint len) +{ + memcpy(*dst, *src, len); + *src= (const char *)*dst; + (*dst)+= len; + *(*dst)++= 0; +} + /* Query_log_event::Query_log_event() + This is used by the SQL slave thread to prepare the event before execution. */ -Query_log_event::Query_log_event(const char* buf, int event_len, - bool old_format) - :Log_event(buf, old_format),data_buf(0), query(NULL), db(NULL) +Query_log_event::Query_log_event(const char* buf, uint event_len, + const Format_description_log_event *description_event, + Log_event_type event_type) + :Log_event(buf, description_event), data_buf(0), query(NullS), + db(NullS), catalog_len(0), status_vars_len(0), + flags2_inited(0), sql_mode_inited(0), charset_inited(0), + auto_increment_increment(1), auto_increment_offset(1), + time_zone_len(0), lc_time_names_number(0), charset_database_number(0) { ulong data_len; - if (old_format) - { - if ((uint)event_len < OLD_HEADER_LEN + QUERY_HEADER_LEN) - return; - data_len = event_len - (QUERY_HEADER_LEN + OLD_HEADER_LEN); - buf += OLD_HEADER_LEN; - } - else - { - if ((uint)event_len < QUERY_EVENT_OVERHEAD) - return; - data_len = event_len - QUERY_EVENT_OVERHEAD; - buf += LOG_EVENT_HEADER_LEN; - } - + uint32 tmp; + uint8 common_header_len, post_header_len; + Log_event::Byte *start; + const Log_event::Byte *end; + bool catalog_nz= 1; + DBUG_ENTER("Query_log_event::Query_log_event(char*,...)"); + + common_header_len= description_event->common_header_len; + post_header_len= description_event->post_header_len[event_type-1]; + DBUG_PRINT("info",("event_len: %u common_header_len: %d post_header_len: %d", + event_len, common_header_len, post_header_len)); + + /* + We test if the event's length is sensible, and if so we compute data_len. + We cannot rely on QUERY_HEADER_LEN here as it would not be format-tolerant. + We use QUERY_HEADER_MINIMAL_LEN which is the same for 3.23, 4.0 & 5.0. + */ + if (event_len < (uint)(common_header_len + post_header_len)) + DBUG_VOID_RETURN; + data_len = event_len - (common_header_len + post_header_len); + buf+= common_header_len; + + slave_proxy_id= thread_id = uint4korr(buf + Q_THREAD_ID_OFFSET); exec_time = uint4korr(buf + Q_EXEC_TIME_OFFSET); + db_len = (uint)buf[Q_DB_LEN_OFFSET]; // TODO: add a check of all *_len vars error_code = uint2korr(buf + Q_ERR_CODE_OFFSET); - if (!(data_buf = (char*) my_malloc(data_len + 1, MYF(MY_WME)))) - return; + /* + 5.0 format starts here. + Depending on the format, we may or not have affected/warnings etc + The remnent post-header to be parsed has length: + */ + tmp= post_header_len - QUERY_HEADER_MINIMAL_LEN; + if (tmp) + { + status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET); + data_len-= status_vars_len; + DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u", + (uint) status_vars_len)); + tmp-= 2; + } + /* + We have parsed everything we know in the post header for QUERY_EVENT, + the rest of post header is either comes from older version MySQL or + dedicated to derived events (e.g. Execute_load_query...) + */ - memcpy(data_buf, buf + Q_DATA_OFFSET, data_len); - slave_proxy_id= thread_id= uint4korr(buf + Q_THREAD_ID_OFFSET); - db = data_buf; - db_len = (uint)buf[Q_DB_LEN_OFFSET]; - query=data_buf + db_len + 1; - q_len = data_len - 1 - db_len; - *((char*)query+q_len) = 0; + /* variable-part: the status vars; only in MySQL 5.0 */ + + start= (Log_event::Byte*) (buf+post_header_len); + end= (const Log_event::Byte*) (start+status_vars_len); + for (const Log_event::Byte* pos= start; pos < end;) + { + switch (*pos++) { + case Q_FLAGS2_CODE: + flags2_inited= 1; + flags2= uint4korr(pos); + DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", (ulong) flags2)); + pos+= 4; + break; + case Q_SQL_MODE_CODE: + { +#ifndef DBUG_OFF + char buff[22]; +#endif + sql_mode_inited= 1; + sql_mode= (ulong) uint8korr(pos); // QQ: Fix when sql_mode is ulonglong + DBUG_PRINT("info",("In Query_log_event, read sql_mode: %s", + llstr(sql_mode, buff))); + pos+= 8; + break; + } + case Q_CATALOG_NZ_CODE: + get_str_len_and_pointer(&pos, &catalog, &catalog_len); + break; + case Q_AUTO_INCREMENT: + auto_increment_increment= uint2korr(pos); + auto_increment_offset= uint2korr(pos+2); + pos+= 4; + break; + case Q_CHARSET_CODE: + { + charset_inited= 1; + memcpy(charset, pos, 6); + pos+= 6; + break; + } + case Q_TIME_ZONE_CODE: + { + get_str_len_and_pointer(&pos, &time_zone_str, &time_zone_len); + break; + } + case Q_CATALOG_CODE: /* for 5.0.x where 0<=x<=3 masters */ + if ((catalog_len= *pos)) + catalog= (char*) pos+1; // Will be copied later + pos+= catalog_len+2; // leap over end 0 + catalog_nz= 0; // catalog has end 0 in event + break; + case Q_LC_TIME_NAMES_CODE: + lc_time_names_number= uint2korr(pos); + pos+= 2; + break; + case Q_CHARSET_DATABASE_CODE: + charset_database_number= uint2korr(pos); + pos+= 2; + break; + default: + /* That's why you must write status vars in growing order of code */ + DBUG_PRINT("info",("Query_log_event has unknown status vars (first has\ + code: %u), skipping the rest of them", (uint) *(pos-1))); + pos= (const uchar*) end; // Break loop + } + } + +#if !defined(MYSQL_CLIENT) && defined(HAVE_QUERY_CACHE) + if (!(start= data_buf = (Log_event::Byte*) my_malloc(catalog_len + 1 + + time_zone_len + 1 + + data_len + 1 + + QUERY_CACHE_FLAGS_SIZE + + db_len + 1, + MYF(MY_WME)))) +#else + if (!(start= data_buf = (Log_event::Byte*) my_malloc(catalog_len + 1 + + time_zone_len + 1 + + data_len + 1, + MYF(MY_WME)))) +#endif + DBUG_VOID_RETURN; + if (catalog_len) // If catalog is given + { + if (likely(catalog_nz)) // true except if event comes from 5.0.0|1|2|3. + copy_str_and_move(&catalog, &start, catalog_len); + else + { + memcpy(start, catalog, catalog_len+1); // copy end 0 + catalog= (const char *)start; + start+= catalog_len+1; + } + } + if (time_zone_len) + copy_str_and_move(&time_zone_str, &start, time_zone_len); + + /* A 2nd variable part; this is common to all versions */ + memcpy((char*) start, end, data_len); // Copy db and query + start[data_len]= '\0'; // End query with \0 (For safetly) + db= (char *)start; + query= (char *)(start + db_len + 1); + q_len= data_len - db_len -1; + DBUG_VOID_RETURN; } @@ -946,52 +1527,170 @@ Query_log_event::Query_log_event(const char* buf, int event_len, */ #ifdef MYSQL_CLIENT -void Query_log_event::print(FILE* file, bool short_form, char* last_db) +void Query_log_event::print_query_header(FILE* file, + PRINT_EVENT_INFO* print_event_info) { + // TODO: print the catalog ?? char buff[40],*end; // Enough for SET TIMESTAMP - const uint set_len= sizeof("SET ONE_SHOT CHARACTER_SET_CLIENT=") - 1; - if (!short_form) + bool different_db= 1; + uint32 tmp; + + if (!print_event_info->short_form) { - print_header(file); - fprintf(file, "\tQuery\tthread_id=%lu\texec_time=%lu\terror_code=%d\n", - (ulong) thread_id, (ulong) exec_time, error_code); + print_header(file, print_event_info); + fprintf(file, "\t%s\tthread_id=%lu\texec_time=%lu\terror_code=%d\n", + get_type_str(), (ulong) thread_id, (ulong) exec_time, error_code); } - bool different_db= 1; + if (!(flags & LOG_EVENT_SUPPRESS_USE_F) && db) + { + if (different_db= memcmp(print_event_info->db, db, db_len + 1)) + memcpy(print_event_info->db, db, db_len + 1); + if (db[0] && different_db) + fprintf(file, "use %s%s\n", db, print_event_info->delimiter); + } + + end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10); + end= strmov(end, print_event_info->delimiter); + *end++='\n'; + my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME)); + if (flags & LOG_EVENT_THREAD_SPECIFIC_F) + fprintf(file,"SET @@session.pseudo_thread_id=%lu%s\n", + (ulong)thread_id, print_event_info->delimiter); - if (!(flags & LOG_EVENT_SUPPRESS_USE_F)) + /* + If flags2_inited==0, this is an event from 3.23 or 4.0; nothing to + print (remember we don't produce mixed relay logs so there cannot be + 5.0 events before that one so there is nothing to reset). + */ + if (likely(flags2_inited)) /* likely as this will mainly read 5.0 logs */ { - if (db && last_db) + /* tmp is a bitmask of bits which have changed. */ + if (likely(print_event_info->flags2_inited)) + /* All bits which have changed */ + tmp= (print_event_info->flags2) ^ flags2; + else /* that's the first Query event we read */ { - if (different_db= memcmp(last_db, db, db_len + 1)) - memcpy(last_db, db, db_len + 1); + print_event_info->flags2_inited= 1; + tmp= ~((uint32)0); /* all bits have changed */ } - - if (db && db[0] && different_db) + + if (unlikely(tmp)) /* some bits have changed */ { - fprintf(file, "use %s;\n", db); + bool need_comma= 0; + fprintf(file, "SET "); + print_set_option(file, tmp, OPTION_NO_FOREIGN_KEY_CHECKS, ~flags2, + "@@session.foreign_key_checks", &need_comma); + print_set_option(file, tmp, OPTION_AUTO_IS_NULL, flags2, + "@@session.sql_auto_is_null", &need_comma); + print_set_option(file, tmp, OPTION_RELAXED_UNIQUE_CHECKS, ~flags2, + "@@session.unique_checks", &need_comma); + fprintf(file,"%s\n", print_event_info->delimiter); + print_event_info->flags2= flags2; } } - end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10); - *end++=';'; - *end++='\n'; - my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME)); - if (flags & LOG_EVENT_THREAD_SPECIFIC_F) - fprintf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id); - /* charset_name command for mysql client */ - if (!strncmp(query, "SET ONE_SHOT CHARACTER_SET_CLIENT=", set_len)) - { - char * endptr; - int cs_number= strtoul(query + set_len, &endptr, 10); - DBUG_ASSERT(*endptr == ','); - CHARSET_INFO *cs_info= get_charset(cs_number, MYF(MY_WME)); - if (cs_info) { - fprintf(file, "/*!\\C %s */;\n", cs_info->csname); + /* + Now the session variables; + it's more efficient to pass SQL_MODE as a number instead of a + comma-separated list. + FOREIGN_KEY_CHECKS, SQL_AUTO_IS_NULL, UNIQUE_CHECKS are session-only + variables (they have no global version; they're not listed in + sql_class.h), The tests below work for pure binlogs or pure relay + logs. Won't work for mixed relay logs but we don't create mixed + relay logs (that is, there is no relay log with a format change + except within the 3 first events, which mysqlbinlog handles + gracefully). So this code should always be good. + */ + + if (likely(sql_mode_inited)) + { + if (unlikely(!print_event_info->sql_mode_inited)) /* first Query event */ + { + print_event_info->sql_mode_inited= 1; + /* force a difference to force write */ + print_event_info->sql_mode= ~sql_mode; + } + if (unlikely(print_event_info->sql_mode != sql_mode)) + { + fprintf(file,"SET @@session.sql_mode=%lu%s\n", + (ulong)sql_mode, print_event_info->delimiter); + print_event_info->sql_mode= sql_mode; + } + } + if (print_event_info->auto_increment_increment != auto_increment_increment || + print_event_info->auto_increment_offset != auto_increment_offset) + { + fprintf(file,"SET @@session.auto_increment_increment=%lu, @@session.auto_increment_offset=%lu%s\n", + auto_increment_increment,auto_increment_offset, + print_event_info->delimiter); + print_event_info->auto_increment_increment= auto_increment_increment; + print_event_info->auto_increment_offset= auto_increment_offset; + } + + /* TODO: print the catalog when we feature SET CATALOG */ + + if (likely(charset_inited)) + { + if (unlikely(!print_event_info->charset_inited)) /* first Query event */ + { + print_event_info->charset_inited= 1; + print_event_info->charset[0]= ~charset[0]; // force a difference to force write + } + if (unlikely(bcmp(print_event_info->charset, charset, 6))) + { + CHARSET_INFO *cs_info= get_charset(uint2korr(charset), MYF(MY_WME)); + if (cs_info) + { + /* for mysql client */ + fprintf(file, "/*!\\C %s */%s\n", + cs_info->csname, print_event_info->delimiter); + } + fprintf(file,"SET " + "@@session.character_set_client=%d," + "@@session.collation_connection=%d," + "@@session.collation_server=%d" + "%s\n", + uint2korr(charset), + uint2korr(charset+2), + uint2korr(charset+4), + print_event_info->delimiter); + memcpy(print_event_info->charset, charset, 6); + } + } + if (time_zone_len) + { + if (bcmp(print_event_info->time_zone_str, time_zone_str, time_zone_len+1)) + { + fprintf(file,"SET @@session.time_zone='%s'%s\n", + time_zone_str, print_event_info->delimiter); + memcpy(print_event_info->time_zone_str, time_zone_str, time_zone_len+1); } } + if (lc_time_names_number != print_event_info->lc_time_names_number) + { + fprintf(file, "SET @@session.lc_time_names=%d%s\n", + lc_time_names_number, print_event_info->delimiter); + print_event_info->lc_time_names_number= lc_time_names_number; + } + if (charset_database_number != print_event_info->charset_database_number) + { + if (charset_database_number) + fprintf(file, "SET @@session.collation_database=%d%s\n", + charset_database_number, print_event_info->delimiter); + else + fprintf(file, "SET @@session.collation_database=DEFAULT%s\n", + print_event_info->delimiter); + print_event_info->charset_database_number= charset_database_number; + } +} + + +void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) +{ + print_query_header(file, print_event_info); my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME)); - fprintf(file, ";\n"); + fprintf(file, "%s\n", print_event_info->delimiter); } #endif /* MYSQL_CLIENT */ @@ -1001,23 +1700,58 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) + +static const char *rewrite_db(const char *db) +{ + if (replicate_rewrite_db.is_empty() || db == NULL) + return db; + I_List_iterator<i_string_pair> it(replicate_rewrite_db); + i_string_pair* tmp; + + while ((tmp=it++)) + { + if (strcmp(tmp->key, db) == 0) + return tmp->val; + } + return db; +} + + int Query_log_event::exec_event(struct st_relay_log_info* rli) { + return exec_event(rli, query, q_len); +} + + +int Query_log_event::exec_event(struct st_relay_log_info* rli, + const char *query_arg, uint32 q_len_arg) +{ + const char *new_db= rewrite_db(db); int expected_error,actual_error= 0; - thd->db_length= db_len; - thd->db= (char*) rewrite_db(db, &thd->db_length); + /* + Colleagues: please never free(thd->catalog) in MySQL. This would lead to + bugs as here thd->catalog is a part of an alloced block, not an entire + alloced block (see Query_log_event::exec_event()). Same for thd->db. + Thank you. + */ + thd->catalog= catalog_len ? (char *) catalog : (char *)""; + thd->set_db(new_db, strlen(new_db)); /* allocates a copy of 'db' */ + thd->variables.auto_increment_increment= auto_increment_increment; + thd->variables.auto_increment_offset= auto_increment_offset; /* - InnoDB internally stores the master log position it has processed so far; - position to store is of the END of the current log event. + InnoDB internally stores the master log position it has executed so far, + i.e. the position just after the COMMIT event. + When InnoDB will want to store, the positions in rli won't have + been updated yet, so group_master_log_* will point to old BEGIN + and event_master_log* will point to the beginning of current COMMIT. + But log_pos of the COMMIT Query event is what we want, i.e. the pos of the + END of the current log event (COMMIT). We save it in rli so that InnoDB can + access it. */ -#if MYSQL_VERSION_ID < 50000 - rli->future_group_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - /* In 5.0 we store the end_log_pos in the relay log so no problem */ rli->future_group_master_log_pos= log_pos; -#endif + DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); + clear_all_errors(thd, rli); /* @@ -1033,17 +1767,106 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli) if (db_ok(thd->db, replicate_do_db, replicate_ignore_db)) { thd->set_time((time_t)when); - thd->query_length= q_len; - thd->query = (char*)query; + thd->query_length= q_len_arg; + thd->query= (char*)query_arg; VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query_id = query_id++; + thd->query_id = next_query_id(); VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->variables.pseudo_thread_id= thread_id; // for temp tables - DBUG_PRINT("query",("%s",thd->query)); + if (ignored_error_code((expected_error= error_code)) || !check_expected_error(thd,rli,expected_error)) - mysql_parse(thd, thd->query, q_len); + { + if (flags2_inited) + /* + all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG must + take their value from flags2. + */ + thd->options= flags2|(thd->options & ~(ulong)OPTIONS_WRITTEN_TO_BIN_LOG); + /* + else, we are in a 3.23/4.0 binlog; we previously received a + Rotate_log_event which reset thd->options and sql_mode etc, so nothing to do. + */ + /* + We do not replicate IGNORE_DIR_IN_CREATE. That is, if the master is a + slave which runs with SQL_MODE=IGNORE_DIR_IN_CREATE, this should not + force us to ignore the dir too. Imagine you are a ring of machines, and + one has a disk problem so that you temporarily need IGNORE_DIR_IN_CREATE + on this machine; you don't want it to propagate elsewhere (you don't want + all slaves to start ignoring the dirs). + */ + if (sql_mode_inited) + thd->variables.sql_mode= + (ulong) ((thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE) | + (sql_mode & ~(ulong) MODE_NO_DIR_IN_CREATE)); + if (charset_inited) + { + if (rli->cached_charset_compare(charset)) + { + /* Verify that we support the charsets found in the event. */ + if (!(thd->variables.character_set_client= + get_charset(uint2korr(charset), MYF(MY_WME))) || + !(thd->variables.collation_connection= + get_charset(uint2korr(charset+2), MYF(MY_WME))) || + !(thd->variables.collation_server= + get_charset(uint2korr(charset+4), MYF(MY_WME)))) + { + /* + We updated the thd->variables with nonsensical values (0). Let's + set them to something safe (i.e. which avoids crash), and we'll + stop with EE_UNKNOWN_CHARSET in compare_errors (unless set to + ignore this error). + */ + set_slave_thread_default_charset(thd, rli); + goto compare_errors; + } + thd->update_charset(); // for the charset change to take effect + } + } + if (time_zone_len) + { + String tmp(time_zone_str, time_zone_len, &my_charset_bin); + if (!(thd->variables.time_zone= + my_tz_find_with_opening_tz_tables(thd, &tmp))) + { + my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), tmp.c_ptr()); + thd->variables.time_zone= global_system_variables.time_zone; + goto compare_errors; + } + } + if (lc_time_names_number) + { + if (!(thd->variables.lc_time_names= + my_locale_by_number(lc_time_names_number))) + { + my_printf_error(ER_UNKNOWN_ERROR, + "Unknown locale: '%d'", MYF(0), lc_time_names_number); + thd->variables.lc_time_names= &my_locale_en_US; + goto compare_errors; + } + } + else + thd->variables.lc_time_names= &my_locale_en_US; + if (charset_database_number) + { + CHARSET_INFO *cs; + if (!(cs= get_charset(charset_database_number, MYF(0)))) + { + char buf[20]; + int10_to_str((int) charset_database_number, buf, -10); + my_error(ER_UNKNOWN_COLLATION, MYF(0), buf); + goto compare_errors; + } + thd->variables.collation_database= cs; + } + else + thd->variables.collation_database= thd->db_charset; + + /* Execute the query (note that we bypass dispatch_command()) */ + mysql_parse(thd, thd->query, thd->query_length); + + } else { /* @@ -1053,7 +1876,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli) we exit gracefully; otherwise we warn about the bad error and tell DBA to check/fix it. */ - if (mysql_test_parse_for_slave(thd, thd->query, q_len)) + if (mysql_test_parse_for_slave(thd, thd->query, thd->query_length)) clear_all_errors(thd, rli); /* Can ignore query */ else { @@ -1073,19 +1896,21 @@ START SLAVE; . Query: '%s'", expected_error, thd->query); if (thd->net.last_errno != ER_SLAVE_IGNORED_TABLE) mysql_log.write(thd,COM_QUERY,"%s",thd->query); - /* +compare_errors: + + /* If we expected a non-zero error code, and we don't get the same error code, and none of them should be ignored. */ DBUG_PRINT("info",("expected_error: %d last_errno: %d", - expected_error, thd->net.last_errno)); + expected_error, thd->net.last_errno)); if ((expected_error != (actual_error= thd->net.last_errno)) && - expected_error && - !ignored_error_code(actual_error) && - !ignored_error_code(expected_error)) + expected_error && + !ignored_error_code(actual_error) && + !ignored_error_code(expected_error)) { slave_print_error(rli, 0, - "\ + "\ Query caused different errors on master and slave. \ Error on master: '%s' (%d), Error on slave: '%s' (%d). \ Default database: '%s'. Query: '%s'", @@ -1093,14 +1918,14 @@ Default database: '%s'. Query: '%s'", expected_error, actual_error ? thd->net.last_error: "no error", actual_error, - print_slave_db_safe(thd->db), query); + print_slave_db_safe(db), query_arg); thd->query_error= 1; } /* If we get the same error code as expected, or they should be ignored. */ else if (expected_error == actual_error || - ignored_error_code(actual_error)) + ignored_error_code(actual_error)) { DBUG_PRINT("info",("error ignored")); clear_all_errors(thd, rli); @@ -1114,16 +1939,49 @@ Default database: '%s'. Query: '%s'", "Error '%s' on query. Default database: '%s'. Query: '%s'", (actual_error ? thd->net.last_error : "unexpected success or fatal error"), - print_slave_db_safe(thd->db), query); + print_slave_db_safe(thd->db), query_arg); thd->query_error= 1; } + + /* + TODO: compare the values of "affected rows" around here. Something + like: + if ((uint32) affected_in_event != (uint32) affected_on_slave) + { + sql_print_error("Slave: did not get the expected number of affected \ + rows running query from master - expected %d, got %d (this numbers \ + should have matched modulo 4294967296).", 0, ...); + thd->query_error = 1; + } + We may also want an option to tell the slave to ignore "affected" + mismatch. This mismatch could be implemented with a new ER_ code, and + to ignore it you would use --slave-skip-errors... + + To do the comparison we need to know the value of "affected" which the + above mysql_parse() computed. And we need to know the value of + "affected" in the master's binlog. Both will be implemented later. The + important thing is that we now have the format ready to log the values + of "affected" in the binlog. So we can release 5.0.0 before effectively + logging "affected" and effectively comparing it. + */ } /* End of if (db_ok(... */ end: VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= 0; // prevent db from being freed + /* + Probably we have set thd->query, thd->db, thd->catalog to point to places + in the data_buf of this event. Now the event is going to be deleted + probably, so data_buf will be freed, so the thd->... listed above will be + pointers to freed memory. + So we must set them to 0, so that those bad pointers values are not later + used. Note that "cleanup" queries like automatic DROP TEMPORARY TABLE + don't suffer from these assignments to 0 as DROP TEMPORARY + TABLE uses the db.table syntax. + */ + thd->catalog= 0; + thd->set_db(NULL, 0); /* will free the current database */ thd->query= 0; // just to be sure - thd->query_length= thd->db_length =0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); @@ -1134,22 +1992,45 @@ end: updating query. */ return (thd->query_error ? thd->query_error : - (thd->one_shot_set ? (rli->inc_event_relay_log_pos(get_event_len()),0) : + (thd->one_shot_set ? (rli->inc_event_relay_log_pos(),0) : Log_event::exec_event(rli))); } #endif /************************************************************************** - Start_log_event methods + Muted_query_log_event methods +**************************************************************************/ + +#ifndef MYSQL_CLIENT +/* + Muted_query_log_event::Muted_query_log_event() +*/ +Muted_query_log_event::Muted_query_log_event() + :Query_log_event() +{ +} +#endif + + +/************************************************************************** + Start_log_event_v3 methods **************************************************************************/ +#ifndef MYSQL_CLIENT +Start_log_event_v3::Start_log_event_v3() :Log_event(), binlog_version(BINLOG_VERSION), artificial_event(0) +{ + created= when; + memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); +} +#endif + /* - Start_log_event::pack_info() + Start_log_event_v3::pack_info() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -void Start_log_event::pack_info(Protocol *protocol) +void Start_log_event_v3::pack_info(Protocol *protocol) { char buf[12 + ST_SERVER_VER_LEN + 14 + 22], *pos; pos= strmov(buf, "Server ver: "); @@ -1162,57 +2043,82 @@ void Start_log_event::pack_info(Protocol *protocol) /* - Start_log_event::print() + Start_log_event_v3::print() */ #ifdef MYSQL_CLIENT -void Start_log_event::print(FILE* file, bool short_form, char* last_db) +void Start_log_event_v3::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { - if (short_form) - return; - - print_header(file); - fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version, - server_version); - print_timestamp(file); - if (created) - fprintf(file," at startup"); - fputc('\n', file); + if (!print_event_info->short_form) + { + print_header(file, print_event_info); + fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version, + server_version); + print_timestamp(file); + if (created) + fprintf(file," at startup"); + fputc('\n', file); + if (flags & LOG_EVENT_BINLOG_IN_USE_F) + fprintf(file, "# Warning: this binlog was not closed properly. " + "Most probably mysqld crashed writing it.\n"); + } + if (!artificial_event && created) + { +#ifdef WHEN_WE_HAVE_THE_RESET_CONNECTION_SQL_COMMAND + /* + This is for mysqlbinlog: like in replication, we want to delete the stale + tmp files left by an unclean shutdown of mysqld (temporary tables) + and rollback unfinished transaction. + Probably this can be done with RESET CONNECTION (syntax to be defined). + */ + fprintf(file,"RESET CONNECTION%s\n", print_event_info->delimiter); +#else + fprintf(file,"ROLLBACK%s\n", print_event_info->delimiter); +#endif + } fflush(file); } #endif /* MYSQL_CLIENT */ /* - Start_log_event::Start_log_event() + Start_log_event_v3::Start_log_event_v3() */ -Start_log_event::Start_log_event(const char* buf, - bool old_format) - :Log_event(buf, old_format) +Start_log_event_v3::Start_log_event_v3(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - binlog_version = uint2korr(buf+ST_BINLOG_VER_OFFSET); + buf+= description_event->common_header_len; + binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET); memcpy(server_version, buf+ST_SERVER_VER_OFFSET, ST_SERVER_VER_LEN); - created = uint4korr(buf+ST_CREATED_OFFSET); + // prevent overrun if log is corrupted on disk + server_version[ST_SERVER_VER_LEN-1]= 0; + created= uint4korr(buf+ST_CREATED_OFFSET); + /* We use log_pos to mark if this was an artificial event or not */ + artificial_event= (log_pos == 0); } /* - Start_log_event::write_data() + Start_log_event_v3::write() */ -int Start_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Start_log_event_v3::write(IO_CACHE* file) { - char buff[START_HEADER_LEN]; + char buff[START_V3_HEADER_LEN]; int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); int4store(buff + ST_CREATED_OFFSET,created); - return (my_b_safe_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0); + return (write_header(file, sizeof(buff)) || + my_b_safe_write(file, (byte*) buff, sizeof(buff))); } +#endif + /* - Start_log_event::exec_event() + Start_log_event_v3::exec_event() The master started @@ -1231,84 +2137,356 @@ int Start_log_event::write_data(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Start_log_event::exec_event(struct st_relay_log_info* rli) +int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) { - DBUG_ENTER("Start_log_event::exec_event"); - - /* - If the I/O thread has not started, mi->old_format is BINLOG_FORMAT_CURRENT - (that's what the MASTER_INFO constructor does), so the test below is not - perfect at all. - */ - switch (rli->mi->old_format) { - case BINLOG_FORMAT_CURRENT: - /* - This is 4.x, so a Start_log_event is only at master startup, - so we are sure the master has restarted and cleared his temp tables. - */ - close_temporary_tables(thd); - cleanup_load_tmpdir(); + DBUG_ENTER("Start_log_event_v3::exec_event"); + switch (binlog_version) + { + case 3: + case 4: /* - As a transaction NEVER spans on 2 or more binlogs: - if we have an active transaction at this point, the master died while - writing the transaction to the binary log, i.e. while flushing the binlog - cache to the binlog. As the write was started, the transaction had been - committed on the master, so we lack of information to replay this - transaction on the slave; all we can do is stop with error. + This can either be 4.x (then a Start_log_event_v3 is only at master + startup so we are sure the master has restarted and cleared his temp + tables; the event always has 'created'>0) or 5.0 (then we have to test + 'created'). */ - if (thd->options & OPTION_BEGIN) + if (created) { - slave_print_error(rli, 0, "\ -Rolling back unfinished transaction (no COMMIT or ROLLBACK) from relay log. \ -A probable cause is that the master died while writing the transaction to its \ -binary log."); - return(1); + close_temporary_tables(thd); + cleanup_load_tmpdir(); } break; - /* + /* Now the older formats; in that case load_tmpdir is cleaned up by the I/O thread. */ - case BINLOG_FORMAT_323_LESS_57: - /* - Cannot distinguish a Start_log_event generated at master startup and - one generated by master FLUSH LOGS, so cannot be sure temp tables - have to be dropped. So do nothing. - */ - break; - case BINLOG_FORMAT_323_GEQ_57: + case 1: + if (strncmp(rli->relay_log.description_event_for_exec->server_version, + "3.23.57",7) >= 0 && created) + { + /* + Can distinguish, based on the value of 'created': this event was + generated at master startup. + */ + close_temporary_tables(thd); + } /* - Can distinguish, based on the value of 'created', - which was generated at master startup. + Otherwise, can't distinguish a Start_log_event generated at + master startup and one generated by master FLUSH LOGS, so cannot + be sure temp tables have to be dropped. So do nothing. */ - if (created) - close_temporary_tables(thd); break; default: /* this case is impossible */ - return 1; + DBUG_RETURN(1); } - DBUG_RETURN(Log_event::exec_event(rli)); } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ -/************************************************************************** - Load_log_event methods -**************************************************************************/ +/*************************************************************************** + Format_description_log_event methods +****************************************************************************/ /* - Load_log_event::pack_info() + Format_description_log_event 1st ctor. + + SYNOPSIS + Format_description_log_event::Format_description_log_event + binlog_version the binlog version for which we want to build + an event. Can be 1 (=MySQL 3.23), 3 (=4.0.x + x>=2 and 4.1) or 4 (MySQL 5.0). Note that the + old 4.0 (binlog version 2) is not supported; + it should not be used for replication with + 5.0. + + DESCRIPTION + Ctor. Can be used to create the event to write to the binary log (when the + server starts or when FLUSH LOGS), or to create artificial events to parse + binlogs from MySQL 3.23 or 4.x. + When in a client, only the 2nd use is possible. +*/ + +Format_description_log_event:: +Format_description_log_event(uint8 binlog_ver, const char* server_ver) + :Start_log_event_v3() +{ + created= when; + binlog_version= binlog_ver; + switch (binlog_ver) { + case 4: /* MySQL 5.0 */ + memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); + DBUG_EXECUTE_IF("pretend_version_50034_in_binlog", + strmov(server_version, "5.0.34");); + common_header_len= LOG_EVENT_HEADER_LEN; + number_of_event_types= LOG_EVENT_TYPES; + /* we'll catch my_malloc() error in is_valid() */ + post_header_len=(uint8*) my_malloc(number_of_event_types*sizeof(uint8), + MYF(MY_ZEROFILL)); + /* + This long list of assignments is not beautiful, but I see no way to + make it nicer, as the right members are #defines, not array members, so + it's impossible to write a loop. + */ + if (post_header_len) + { + post_header_len[START_EVENT_V3-1]= START_V3_HEADER_LEN; + post_header_len[QUERY_EVENT-1]= QUERY_HEADER_LEN; + post_header_len[ROTATE_EVENT-1]= ROTATE_HEADER_LEN; + post_header_len[LOAD_EVENT-1]= LOAD_HEADER_LEN; + post_header_len[CREATE_FILE_EVENT-1]= CREATE_FILE_HEADER_LEN; + post_header_len[APPEND_BLOCK_EVENT-1]= APPEND_BLOCK_HEADER_LEN; + post_header_len[EXEC_LOAD_EVENT-1]= EXEC_LOAD_HEADER_LEN; + post_header_len[DELETE_FILE_EVENT-1]= DELETE_FILE_HEADER_LEN; + post_header_len[NEW_LOAD_EVENT-1]= post_header_len[LOAD_EVENT-1]; + post_header_len[FORMAT_DESCRIPTION_EVENT-1]= FORMAT_DESCRIPTION_HEADER_LEN; + post_header_len[BEGIN_LOAD_QUERY_EVENT-1]= post_header_len[APPEND_BLOCK_EVENT-1]; + post_header_len[EXECUTE_LOAD_QUERY_EVENT-1]= EXECUTE_LOAD_QUERY_HEADER_LEN; + } + break; + + case 1: /* 3.23 */ + case 3: /* 4.0.x x>=2 */ + /* + We build an artificial (i.e. not sent by the master) event, which + describes what those old master versions send. + */ + if (binlog_ver==1) + strmov(server_version, server_ver ? server_ver : "3.23"); + else + strmov(server_version, server_ver ? server_ver : "4.0"); + common_header_len= binlog_ver==1 ? OLD_HEADER_LEN : + LOG_EVENT_MINIMAL_HEADER_LEN; + /* + The first new event in binlog version 4 is Format_desc. So any event type + after that does not exist in older versions. We use the events known by + version 3, even if version 1 had only a subset of them (this is not a + problem: it uses a few bytes for nothing but unifies code; it does not + make the slave detect less corruptions). + */ + number_of_event_types= FORMAT_DESCRIPTION_EVENT - 1; + post_header_len=(uint8*) my_malloc(number_of_event_types*sizeof(uint8), + MYF(0)); + if (post_header_len) + { + post_header_len[START_EVENT_V3-1]= START_V3_HEADER_LEN; + post_header_len[QUERY_EVENT-1]= QUERY_HEADER_MINIMAL_LEN; + post_header_len[STOP_EVENT-1]= 0; + post_header_len[ROTATE_EVENT-1]= (binlog_ver==1) ? 0 : ROTATE_HEADER_LEN; + post_header_len[INTVAR_EVENT-1]= 0; + post_header_len[LOAD_EVENT-1]= LOAD_HEADER_LEN; + post_header_len[SLAVE_EVENT-1]= 0; + post_header_len[CREATE_FILE_EVENT-1]= CREATE_FILE_HEADER_LEN; + post_header_len[APPEND_BLOCK_EVENT-1]= APPEND_BLOCK_HEADER_LEN; + post_header_len[EXEC_LOAD_EVENT-1]= EXEC_LOAD_HEADER_LEN; + post_header_len[DELETE_FILE_EVENT-1]= DELETE_FILE_HEADER_LEN; + post_header_len[NEW_LOAD_EVENT-1]= post_header_len[LOAD_EVENT-1]; + post_header_len[RAND_EVENT-1]= 0; + post_header_len[USER_VAR_EVENT-1]= 0; + } + break; + default: /* Includes binlog version 2 i.e. 4.0.x x<=1 */ + post_header_len= 0; /* will make is_valid() fail */ + break; + } + calc_server_version_split(); +} + + +/* + The problem with this constructor is that the fixed header may have a + length different from this version, but we don't know this length as we + have not read the Format_description_log_event which says it, yet. This + length is in the post-header of the event, but we don't know where the + post-header starts. + So this type of event HAS to: + - either have the header's length at the beginning (in the header, at a + fixed position which will never be changed), not in the post-header. That + would make the header be "shifted" compared to other events. + - or have a header of size LOG_EVENT_MINIMAL_HEADER_LEN (19), in all future + versions, so that we know for sure. + I (Guilhem) chose the 2nd solution. Rotate has the same constraint (because + it is sent before Format_description_log_event). +*/ + +Format_description_log_event:: +Format_description_log_event(const char* buf, + uint event_len, + const + Format_description_log_event* + description_event) + :Start_log_event_v3(buf, description_event) +{ + DBUG_ENTER("Format_description_log_event::Format_description_log_event(char*,...)"); + buf+= LOG_EVENT_MINIMAL_HEADER_LEN; + if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < OLD_HEADER_LEN) + DBUG_VOID_RETURN; /* sanity check */ + number_of_event_types= + event_len-(LOG_EVENT_MINIMAL_HEADER_LEN+ST_COMMON_HEADER_LEN_OFFSET+1); + DBUG_PRINT("info", ("common_header_len=%d number_of_event_types=%d", + common_header_len, number_of_event_types)); + /* If alloc fails, we'll detect it in is_valid() */ + post_header_len= (uint8*) my_memdup((byte*)buf+ST_COMMON_HEADER_LEN_OFFSET+1, + number_of_event_types* + sizeof(*post_header_len), MYF(0)); + calc_server_version_split(); + DBUG_VOID_RETURN; +} + +#ifndef MYSQL_CLIENT +bool Format_description_log_event::write(IO_CACHE* file) +{ + /* + We don't call Start_log_event_v3::write() because this would make 2 + my_b_safe_write(). + */ + byte buff[FORMAT_DESCRIPTION_HEADER_LEN]; + int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); + memcpy((char*) buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); + int4store(buff + ST_CREATED_OFFSET,created); + buff[ST_COMMON_HEADER_LEN_OFFSET]= LOG_EVENT_HEADER_LEN; + memcpy((char*) buff+ST_COMMON_HEADER_LEN_OFFSET+1, (byte*) post_header_len, + LOG_EVENT_TYPES); + return (write_header(file, sizeof(buff)) || + my_b_safe_write(file, buff, sizeof(buff))); +} +#endif + +/* + SYNOPSIS + Format_description_log_event::exec_event() + + IMPLEMENTATION + Save the information which describes the binlog's format, to be able to + read all coming events. + Call Start_log_event_v3::exec_event(). */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -void Load_log_event::pack_info(Protocol *protocol) +int Format_description_log_event::exec_event(struct st_relay_log_info* rli) { - char *buf, *pos; - uint buf_len; + DBUG_ENTER("Format_description_log_event::exec_event"); + + /* save the information describing this binlog */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= this; + +#ifdef USING_TRANSACTIONS + /* + As a transaction NEVER spans on 2 or more binlogs: + if we have an active transaction at this point, the master died + while writing the transaction to the binary log, i.e. while + flushing the binlog cache to the binlog. As the write was started, + the transaction had been committed on the master, so we lack of + information to replay this transaction on the slave; all we can do + is stop with error. + Note: this event could be sent by the master to inform us of the + format of its binlog; in other words maybe it is not at its + original place when it comes to us; we'll know this by checking + log_pos ("artificial" events have log_pos == 0). + */ + if (!artificial_event && created && thd->transaction.all.nht) + { + slave_print_error(rli, 0, "Rolling back unfinished transaction (no " + "COMMIT or ROLLBACK) from relay log. A probable cause " + "is that the master died while writing the transaction " + "to its binary log."); + end_trans(thd, ROLLBACK); + } +#endif + /* + If this event comes from ourselves, there is no cleaning task to perform, + we don't call Start_log_event_v3::exec_event() (this was just to update the + log's description event). + */ + if (server_id == (uint32) ::server_id) + { + /* + Do not modify rli->group_master_log_pos, as this event did not exist on + the master. That is, just update the *relay log* coordinates; this is + done by passing log_pos=0 to inc_group_relay_log_pos, like we do in + Stop_log_event::exec_event(). + If in a transaction, don't touch group_* coordinates. + */ + if (thd->options & OPTION_BEGIN) + rli->inc_event_relay_log_pos(); + else + { + rli->inc_group_relay_log_pos(0); + flush_relay_log_info(rli); + } + DBUG_RETURN(0); + } + + /* + If the event was not requested by the slave i.e. the master sent it while + the slave asked for a position >4, the event will make + rli->group_master_log_pos advance. Say that the slave asked for position + 1000, and the Format_desc event's end is 96. Then in the beginning of + replication rli->group_master_log_pos will be 0, then 96, then jump to + first really asked event (which is >96). So this is ok. + */ + DBUG_RETURN(Start_log_event_v3::exec_event(rli)); +} +#endif - buf_len= + +/** + Splits the event's 'server_version' string into three numeric pieces stored + into 'server_version_split': + X.Y.Zabc (X,Y,Z numbers, a not a digit) -> {X,Y,Z} + X.Yabc -> {X,Y,0} + Xabc -> {X,0,0} + 'server_version_split' is then used for lookups to find if the server which + created this event has some known bug. +*/ +void Format_description_log_event::calc_server_version_split() +{ + char *p= server_version, *r; + ulong number; + for (uint i= 0; i<=2; i++) + { + number= strtoul(p, &r, 10); + server_version_split[i]= (uchar)number; + DBUG_ASSERT(number < 256); // fit in uchar + p= r; + DBUG_ASSERT(!((i == 0) && (*r != '.'))); // should be true in practice + if (*r == '.') + p++; // skip the dot + } + DBUG_PRINT("info",("Format_description_log_event::server_version_split:" + " '%s' %d %d %d", server_version, + server_version_split[0], + server_version_split[1], server_version_split[2])); +} + + + /************************************************************************** + Load_log_event methods + General note about Load_log_event: the binlogging of LOAD DATA INFILE is + going to be changed in 5.0 (or maybe in 5.1; not decided yet). + However, the 5.0 slave could still have to read such events (from a 4.x + master), convert them (which just means maybe expand the header, when 5.0 + servers have a UID in events) (remember that whatever is after the header + will be like in 4.x, as this event's format is not modified in 5.0 as we + will use new types of events to log the new LOAD DATA INFILE features). + To be able to read/convert, we just need to not assume that the common + header is of length LOG_EVENT_HEADER_LEN (we must use the description + event). + Note that I (Guilhem) manually tested replication of a big LOAD DATA INFILE + between 3.23 and 5.0, and between 4.0 and 5.0, and it works fine (and the + positions displayed in SHOW SLAVE STATUS then are fine too). + **************************************************************************/ + +/* + Load_log_event::pack_info() +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +uint Load_log_event::get_query_buffer_length() +{ + return 5 + db_len + 3 + // "use DB; " 18 + fname_len + 2 + // "LOAD DATA INFILE 'file''" 7 + // LOCAL @@ -1318,14 +2496,18 @@ void Load_log_event::pack_info(Protocol *protocol) 23 + sql_ex.enclosed_len*4 + 2 + // " OPTIONALLY ENCLOSED BY 'str'" 12 + sql_ex.escaped_len*4 + 2 + // " ESCAPED BY 'str'" 21 + sql_ex.line_term_len*4 + 2 + // " FIELDS TERMINATED BY 'str'" - 19 + sql_ex.line_start_len*4 + 2 + // " LINES STARTING BY 'str'" - 15 + 22 + // " IGNORE xxx LINES" + 19 + sql_ex.line_start_len*4 + 2 + // " LINES STARTING BY 'str'" + 15 + 22 + // " IGNORE xxx LINES" 3 + (num_fields-1)*2 + field_block_len; // " (field1, field2, ...)" +} - if (!(buf= my_malloc(buf_len, MYF(MY_WME)))) - return; - pos= buf; - if (db && db_len) + +void Load_log_event::print_query(bool need_db, char *buf, + char **end, char **fn_start, char **fn_end) +{ + char *pos= buf; + + if (need_db && db && db_len) { pos= strmov(pos, "use `"); memcpy(pos, db, db_len); @@ -1333,6 +2515,10 @@ void Load_log_event::pack_info(Protocol *protocol) } pos= strmov(pos, "LOAD DATA "); + + if (fn_start) + *fn_start= pos; + if (check_fname_outside_temp_buf()) pos= strmov(pos, "LOCAL "); pos= strmov(pos, "INFILE '"); @@ -1344,7 +2530,12 @@ void Load_log_event::pack_info(Protocol *protocol) else if (sql_ex.opt_flags & IGNORE_FLAG) pos= strmov(pos, " IGNORE "); - pos= strmov(pos ,"INTO TABLE `"); + pos= strmov(pos ,"INTO"); + + if (fn_end) + *fn_end= pos; + + pos= strmov(pos ," TABLE `"); memcpy(pos, table_name, table_name_len); pos+= table_name_len; @@ -1393,17 +2584,30 @@ void Load_log_event::pack_info(Protocol *protocol) *pos++= ')'; } - protocol->store(buf, pos-buf, &my_charset_bin); + *end= pos; +} + + +void Load_log_event::pack_info(Protocol *protocol) +{ + char *buf, *end; + + if (!(buf= my_malloc(get_query_buffer_length(), MYF(MY_WME)))) + return; + print_query(TRUE, buf, &end, 0, 0); + protocol->store(buf, end-buf, &my_charset_bin); my_free(buf, MYF(0)); } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ +#ifndef MYSQL_CLIENT + /* Load_log_event::write_data_header() */ -int Load_log_event::write_data_header(IO_CACHE* file) +bool Load_log_event::write_data_header(IO_CACHE* file) { char buf[LOAD_HEADER_LEN]; int4store(buf + L_THREAD_ID_OFFSET, slave_proxy_id); @@ -1412,7 +2616,7 @@ int Load_log_event::write_data_header(IO_CACHE* file) buf[L_TBL_LEN_OFFSET] = (char)table_name_len; buf[L_DB_LEN_OFFSET] = (char)db_len; int4store(buf + L_NUM_FIELDS_OFFSET, num_fields); - return my_b_safe_write(file, (byte*)buf, LOAD_HEADER_LEN); + return my_b_safe_write(file, (byte*)buf, LOAD_HEADER_LEN) != 0; } @@ -1420,7 +2624,7 @@ int Load_log_event::write_data_header(IO_CACHE* file) Load_log_event::write_data_body() */ -int Load_log_event::write_data_body(IO_CACHE* file) +bool Load_log_event::write_data_body(IO_CACHE* file) { if (sql_ex.write_data(file)) return 1; @@ -1440,7 +2644,6 @@ int Load_log_event::write_data_body(IO_CACHE* file) Load_log_event::Load_log_event() */ -#ifndef MYSQL_CLIENT Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, const char *db_arg, const char *table_name_arg, List<Item> &fields_arg, @@ -1524,6 +2727,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, } #endif /* !MYSQL_CLIENT */ + /* Load_log_event::Load_log_event() @@ -1532,15 +2736,25 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, constructed event. */ -Load_log_event::Load_log_event(const char *buf, int event_len, - bool old_format) - :Log_event(buf, old_format), num_fields(0), fields(0), - field_lens(0), field_block_len(0), +Load_log_event::Load_log_event(const char *buf, uint event_len, + const Format_description_log_event *description_event) + :Log_event(buf, description_event), num_fields(0), fields(0), + field_lens(0),field_block_len(0), table_name(0), db(0), fname(0), local_fname(FALSE) { DBUG_ENTER("Load_log_event"); - if (event_len) // derived class, will call copy_log_event() itself - copy_log_event(buf, event_len, old_format); + /* + I (Guilhem) manually tested replication of LOAD DATA INFILE for 3.23->5.0, + 4.0->5.0 and 5.0->5.0 and it works. + */ + if (event_len) + copy_log_event(buf, event_len, + ((buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ? + LOAD_HEADER_LEN + + description_event->common_header_len : + LOAD_HEADER_LEN + LOG_EVENT_HEADER_LEN), + description_event); + /* otherwise it's a derived class, will call copy_log_event() itself */ DBUG_VOID_RETURN; } @@ -1550,14 +2764,14 @@ Load_log_event::Load_log_event(const char *buf, int event_len, */ int Load_log_event::copy_log_event(const char *buf, ulong event_len, - bool old_format) + int body_offset, + const Format_description_log_event *description_event) { + DBUG_ENTER("Load_log_event::copy_log_event"); uint data_len; char* buf_end = (char*)buf + event_len; - uint header_len= old_format ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - const char* data_head = buf + header_len; - DBUG_ENTER("Load_log_event::copy_log_event"); - + /* this is the beginning of the post-header */ + const char* data_head = buf + description_event->common_header_len; slave_proxy_id= thread_id= uint4korr(data_head + L_THREAD_ID_OFFSET); exec_time = uint4korr(data_head + L_EXEC_TIME_OFFSET); skip_lines = uint4korr(data_head + L_SKIP_LINES_OFFSET); @@ -1565,21 +2779,17 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, db_len = (uint)data_head[L_DB_LEN_OFFSET]; num_fields = uint4korr(data_head + L_NUM_FIELDS_OFFSET); - int body_offset = ((buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ? - LOAD_HEADER_LEN + header_len : - get_data_body_offset()); - if ((int) event_len < body_offset) DBUG_RETURN(1); /* Sql_ex.init() on success returns the pointer to the first byte after the sql_ex structure, which is the start of field lengths array. */ - if (!(field_lens=(uchar*)sql_ex.init((char*)buf + body_offset, - buf_end, - buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) + if (!(field_lens= (uchar*)sql_ex.init((char*)buf + body_offset, + buf_end, + buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) DBUG_RETURN(1); - + data_len = event_len - body_offset; if (num_fields > data_len) // simple sanity check against corruption DBUG_RETURN(1); @@ -1592,6 +2802,7 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, fname = db + db_len + 1; fname_len = strlen(fname); // null termination is accomplished by the caller doing buf[event_len]=0 + DBUG_RETURN(0); } @@ -1601,25 +2812,25 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, */ #ifdef MYSQL_CLIENT -void Load_log_event::print(FILE* file, bool short_form, char* last_db) +void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { - print(file, short_form, last_db, 0); + print(file, print_event_info, 0); } -void Load_log_event::print(FILE* file, bool short_form, char* last_db, +void Load_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool commented) { DBUG_ENTER("Load_log_event::print"); - if (!short_form) + if (!print_event_info->short_form) { - print_header(file); + print_header(file, print_event_info); fprintf(file, "\tQuery\tthread_id=%ld\texec_time=%ld\n", thread_id, exec_time); } bool different_db= 1; - if (db && last_db) + if (db) { /* If the database is different from the one of the previous statement, we @@ -1627,19 +2838,20 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db, But if commented, the "use" is going to be commented so we should not update the last_db. */ - if ((different_db= memcmp(last_db, db, db_len + 1)) && + if ((different_db= memcmp(print_event_info->db, db, db_len + 1)) && !commented) - memcpy(last_db, db, db_len + 1); + memcpy(print_event_info->db, db, db_len + 1); } if (db && db[0] && different_db) - fprintf(file, "%suse %s;\n", + fprintf(file, "%suse %s%s\n", commented ? "# " : "", - db); + db, print_event_info->delimiter); if (flags & LOG_EVENT_THREAD_SPECIFIC_F) - fprintf(file,"%sSET @@session.pseudo_thread_id=%lu;\n", - commented ? "# " : "", (ulong)thread_id); + fprintf(file,"%sSET @@session.pseudo_thread_id=%lu%s\n", + commented ? "# " : "", (ulong)thread_id, + print_event_info->delimiter); fprintf(file, "%sLOAD DATA ", commented ? "# " : ""); if (check_fname_outside_temp_buf()) @@ -1691,7 +2903,7 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db, fputc(')', file); } - fprintf(file, ";\n"); + fprintf(file, "%s\n", print_event_info->delimiter); DBUG_VOID_RETURN; } #endif /* MYSQL_CLIENT */ @@ -1708,13 +2920,15 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db, #ifndef MYSQL_CLIENT void Load_log_event::set_fields(const char* affected_db, - List<Item> &field_list) + List<Item> &field_list, + Name_resolution_context *context) { uint i; const char* field = fields; for (i= 0; i < num_fields; i++) { - field_list.push_back(new Item_field(affected_db, table_name, field)); + field_list.push_back(new Item_field(context, + affected_db, table_name, field)); field+= field_lens[i] + 1; } } @@ -1751,9 +2965,8 @@ void Load_log_event::set_fields(const char* affected_db, int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors) { - char *load_data_query= 0; - thd->db_length= db_len; - thd->db= (char*) rewrite_db(db, &thd->db_length); + const char *new_db= rewrite_db(db); + thd->set_db(new_db, strlen(new_db)); DBUG_ASSERT(thd->query == 0); thd->query_length= 0; // Should not be needed thd->query_error= 0; @@ -1765,15 +2978,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, mysql_init_query(thd, 0, 0); if (!use_rli_only_for_errors) { -#if MYSQL_VERSION_ID < 50000 - rli->future_group_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else + /* Saved for InnoDB, see comment in Query_log_event::exec_event() */ rli->future_group_master_log_pos= log_pos; -#endif + DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); } - - /* + + /* We test replicate_*_db rules. Note that we have already prepared the file to load, even if we are going to ignore and delete it now. So it is possible that we did a lot of disk writes for nothing. In other words, a @@ -1799,7 +3009,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, { thd->set_time((time_t)when); VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query_id = query_id++; + thd->query_id = next_query_id(); VOID(pthread_mutex_unlock(&LOCK_thread_count)); /* Initing thd->row_count is not necessary in theory as this variable has no @@ -1807,12 +3017,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, "data truncated" warning but which is absorbed and never gets to the error log); still we init it to avoid a Valgrind message. */ - mysql_reset_errors(thd); + mysql_reset_errors(thd, 0); TABLE_LIST tables; bzero((char*) &tables,sizeof(tables)); - tables.db = thd->db; - tables.alias = tables.real_name = (char*)table_name; + tables.db= thd->strmake(thd->db, thd->db_length); + tables.alias = tables.table_name = (char*) table_name; tables.lock_type = TL_WRITE; tables.updating= 1; @@ -1826,22 +3036,30 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, else { char llbuff[22]; + char *end; enum enum_duplicates handle_dup; bool ignore= 0; + char *load_data_query; + /* - Make a simplified LOAD DATA INFILE query, for the information of the - user in SHOW PROCESSLIST. Note that db is known in the 'db' column. + Forge LOAD DATA INFILE query which will be used in SHOW PROCESS LIST + and written to slave's binlog if binlogging is on. */ - if ((load_data_query= (char *) my_alloca(18 + strlen(fname) + 14 + - strlen(tables.real_name) + 8))) + if (!(load_data_query= (char *)thd->alloc(get_query_buffer_length() + 1))) { - thd->query_length= (uint)(strxmov(load_data_query, - "LOAD DATA INFILE '", fname, - "' INTO TABLE `", tables.real_name, - "` <...>", NullS) - load_data_query); - thd->query= load_data_query; + /* + This will set thd->fatal_error in case of OOM. So we surely will notice + that something is wrong. + */ + goto error; } + print_query(FALSE, load_data_query, &end, (char **)&thd->lex->fname_start, + (char **)&thd->lex->fname_end); + *end= 0; + thd->query_length= end - load_data_query; + thd->query= load_data_query; + if (sql_ex.opt_flags & REPLACE_FLAG) { handle_dup= DUP_REPLACE; @@ -1897,7 +3115,8 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, ex.skip_lines = skip_lines; List<Item> field_list; - set_fields(thd->db,field_list); + thd->lex->select_lex.context.resolve_in_table_list_only(&tables); + set_fields(tables.db, field_list, &thd->lex->select_lex.context); thd->variables.pseudo_thread_id= thread_id; if (net) { @@ -1908,9 +3127,14 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, */ thd->net.pkt_nr = net->pkt_nr; } - if (mysql_load(thd, &ex, &tables, field_list, handle_dup, ignore, net != 0, - TL_WRITE)) - thd->query_error = 1; + /* + It is safe to use tmp_list twice because we are not going to + update it inside mysql_load(). + */ + List<Item> tmp_list; + if (mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list, + handle_dup, ignore, net != 0)) + thd->query_error= 1; if (thd->cuted_fields) { /* log_pos is the position of the LOAD event in the master log */ @@ -1936,20 +3160,20 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, if (net) skip_load_data_infile(net); } - + +error: thd->net.vio = 0; - char *save_db= thd->db; + const char *remember_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= 0; + thd->catalog= 0; + thd->set_db(NULL, 0); /* will free the current database */ thd->query= 0; - thd->query_length= thd->db_length= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); - if (load_data_query) - my_afree(load_data_query); if (thd->query_error) { - /* this err/sql_errno code is copy-paste from send_error() */ + /* this err/sql_errno code is copy-paste from net_send_error() */ const char *err; int sql_errno; if ((err=thd->net.last_error)[0]) @@ -1961,7 +3185,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, } slave_print_error(rli,sql_errno,"\ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", - err, (char*)table_name, print_slave_db_safe(save_db)); + err, (char*)table_name, print_slave_db_safe(remember_db)); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); return 1; } @@ -1971,7 +3195,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", { slave_print_error(rli,ER_UNKNOWN_ERROR, "\ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", - (char*)table_name, print_slave_db_safe(save_db)); + (char*)table_name, print_slave_db_safe(remember_db)); return 1; } @@ -1995,7 +3219,7 @@ void Rotate_log_event::pack_info(Protocol *protocol) String tmp(buf1, sizeof(buf1), log_cs); tmp.length(0); tmp.append(new_log_ident, ident_len); - tmp.append(";pos="); + tmp.append(STRING_WITH_LEN(";pos=")); tmp.append(llstr(pos,buf)); protocol->store(tmp.ptr(), tmp.length(), &my_charset_bin); } @@ -2007,17 +3231,17 @@ void Rotate_log_event::pack_info(Protocol *protocol) */ #ifdef MYSQL_CLIENT -void Rotate_log_event::print(FILE* file, bool short_form, char* last_db) +void Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { char buf[22]; - if (short_form) - return; - print_header(file); + if (print_event_info->short_form) + return; + print_header(file, print_event_info); fprintf(file, "\tRotate to "); if (new_log_ident) my_fwrite(file, (byte*) new_log_ident, (uint)ident_len, - MYF(MY_NABP | MY_WME)); + MYF(MY_NABP | MY_WME)); fprintf(file, " pos: %s", llstr(pos, buf)); fputc('\n', file); fflush(file); @@ -2043,45 +3267,35 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg, #ifndef DBUG_OFF char buff[22]; DBUG_ENTER("Rotate_log_event::Rotate_log_event(THD*,...)"); - DBUG_PRINT("enter",("new_log_ident %s pos %s flags %lu", new_log_ident_arg, - llstr(pos_arg, buff), flags)); + DBUG_PRINT("enter",("new_log_ident: %s pos: %s flags: %lu", new_log_ident_arg, + llstr(pos_arg, buff), (ulong) flags)); #endif if (flags & DUP_NAME) - new_log_ident= my_strdup_with_length((byte*) new_log_ident_arg, - ident_len, - MYF(MY_WME)); + new_log_ident= my_strdup_with_length(new_log_ident_arg, + ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; } #endif -Rotate_log_event::Rotate_log_event(const char* buf, int event_len, - bool old_format) - :Log_event(buf, old_format), new_log_ident(0), flags(DUP_NAME) +Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) ,new_log_ident(0), flags(DUP_NAME) { + DBUG_ENTER("Rotate_log_event::Rotate_log_event(char*,...)"); // The caller will ensure that event_len is what we have at EVENT_LEN_OFFSET - int header_size = (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + uint8 header_size= description_event->common_header_len; + uint8 post_header_len= description_event->post_header_len[ROTATE_EVENT-1]; uint ident_offset; - DBUG_ENTER("Rotate_log_event::Rotate_log_event(char*,...)"); - if (event_len < header_size) DBUG_VOID_RETURN; - buf += header_size; - if (old_format) - { - ident_len = (uint)(event_len - OLD_HEADER_LEN); - pos = 4; - ident_offset = 0; - } - else - { - ident_len = (uint)(event_len - ROTATE_EVENT_OVERHEAD); - pos = uint8korr(buf + R_POS_OFFSET); - ident_offset = ROTATE_HEADER_LEN; - } + pos = post_header_len ? uint8korr(buf + R_POS_OFFSET) : 4; + ident_len = (uint)(event_len - + (header_size+post_header_len)); + ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); - new_log_ident= my_strdup_with_length((byte*) buf + ident_offset, + new_log_ident= my_strdup_with_length(buf + ident_offset, (uint) ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; @@ -2089,30 +3303,32 @@ Rotate_log_event::Rotate_log_event(const char* buf, int event_len, /* - Rotate_log_event::write_data() + Rotate_log_event::write() */ -int Rotate_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Rotate_log_event::write(IO_CACHE* file) { char buf[ROTATE_HEADER_LEN]; - DBUG_ASSERT(!(flags & ZERO_LEN)); // such an event cannot be written int8store(buf + R_POS_OFFSET, pos); - return (my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) || - my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len)); + return (write_header(file, ROTATE_HEADER_LEN + ident_len) || + my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) || + my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len)); } - +#endif /* Rotate_log_event::exec_event() - Got a rotate log even from the master + Got a rotate log event from the master IMPLEMENTATION This is mainly used so that we can later figure out the logname and position for the master. - We can't rotate the slave as this will cause infinitive rotations + We can't rotate the slave's BINlog as this will cause infinitive rotations in a A -> B -> A setup. + The NOTES below is a wrong comment which will disappear when 4.1 is merged. RETURN VALUES 0 ok @@ -2124,7 +3340,7 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) DBUG_ENTER("Rotate_log_event::exec_event"); pthread_mutex_lock(&rli->data_lock); - rli->event_relay_log_pos += get_event_len(); + rli->event_relay_log_pos= my_b_tell(rli->cur_log); /* If we are in a transaction: the only normal case is when the I/O thread was copying a big transaction, then it was stopped and restarted: we have this @@ -2136,15 +3352,31 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) COMMIT or ROLLBACK In that case, we don't want to touch the coordinates which correspond to the beginning of the transaction. + Starting from 5.0.0, there also are some rotates from the slave itself, in + the relay log. */ if (!(thd->options & OPTION_BEGIN)) { memcpy(rli->group_master_log_name, new_log_ident, ident_len+1); rli->notify_group_master_log_name_update(); - rli->group_master_log_pos = pos; - rli->group_relay_log_pos = rli->event_relay_log_pos; - DBUG_PRINT("info", ("group_master_log_pos: %lu", + rli->group_master_log_pos= pos; + rli->group_relay_log_pos= rli->event_relay_log_pos; + DBUG_PRINT("info", ("group_master_log_name: '%s' group_master_log_pos:\ +%lu", + rli->group_master_log_name, (ulong) rli->group_master_log_pos)); + /* + Reset thd->options and sql_mode etc, because this could be the signal of + a master's downgrade from 5.0 to 4.0. + However, no need to reset description_event_for_exec: indeed, if the next + master is 5.0 (even 5.0.1) we will soon get a Format_desc; if the next + master is 4.0 then the events are in the slave's format (conversion). + */ + set_slave_thread_options(thd); + set_slave_thread_default_charset(thd, rli); + thd->variables.sql_mode= global_system_variables.sql_mode; + thd->variables.auto_increment_increment= + thd->variables.auto_increment_offset= 1; } pthread_mutex_unlock(&rli->data_lock); pthread_cond_broadcast(&rli->data_cond); @@ -2178,12 +3410,13 @@ void Intvar_log_event::pack_info(Protocol *protocol) Intvar_log_event::Intvar_log_event() */ -Intvar_log_event::Intvar_log_event(const char* buf, bool old_format) - :Log_event(buf, old_format) +Intvar_log_event::Intvar_log_event(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - type = buf[I_TYPE_OFFSET]; - val = uint8korr(buf+I_VAL_OFFSET); + buf+= description_event->common_header_len; + type= buf[I_TYPE_OFFSET]; + val= uint8korr(buf+I_VAL_OFFSET); } @@ -2202,16 +3435,19 @@ const char* Intvar_log_event::get_var_type_name() /* - Intvar_log_event::write_data() + Intvar_log_event::write() */ -int Intvar_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Intvar_log_event::write(IO_CACHE* file) { - char buf[9]; - buf[I_TYPE_OFFSET] = type; + byte buf[9]; + buf[I_TYPE_OFFSET]= (byte) type; int8store(buf + I_VAL_OFFSET, val); - return my_b_safe_write(file, (byte*) buf, sizeof(buf)); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } +#endif /* @@ -2219,15 +3455,15 @@ int Intvar_log_event::write_data(IO_CACHE* file) */ #ifdef MYSQL_CLIENT -void Intvar_log_event::print(FILE* file, bool short_form, char* last_db) +void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { char llbuff[22]; const char *msg; LINT_INIT(msg); - if (!short_form) + if (!print_event_info->short_form) { - print_header(file); + print_header(file, print_event_info); fprintf(file, "\tIntvar\n"); } @@ -2239,8 +3475,13 @@ void Intvar_log_event::print(FILE* file, bool short_form, char* last_db) case INSERT_ID_EVENT: msg="INSERT_ID"; break; + case INVALID_INT_EVENT: + default: // cannot happen + msg="INVALID_INT"; + break; } - fprintf(file, "%s=%s;\n", msg, llstr(val,llbuff)); + fprintf(file, "%s=%s%s\n", + msg, llstr(val,llbuff), print_event_info->delimiter); fflush(file); } #endif @@ -2261,7 +3502,7 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) thd->next_insert_id = val; break; } - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); return 0; } #endif @@ -2284,35 +3525,40 @@ void Rand_log_event::pack_info(Protocol *protocol) #endif -Rand_log_event::Rand_log_event(const char* buf, bool old_format) - :Log_event(buf, old_format) +Rand_log_event::Rand_log_event(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - seed1 = uint8korr(buf+RAND_SEED1_OFFSET); - seed2 = uint8korr(buf+RAND_SEED2_OFFSET); + buf+= description_event->common_header_len; + seed1= uint8korr(buf+RAND_SEED1_OFFSET); + seed2= uint8korr(buf+RAND_SEED2_OFFSET); } -int Rand_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Rand_log_event::write(IO_CACHE* file) { - char buf[16]; + byte buf[16]; int8store(buf + RAND_SEED1_OFFSET, seed1); int8store(buf + RAND_SEED2_OFFSET, seed2); - return my_b_safe_write(file, (byte*) buf, sizeof(buf)); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } +#endif #ifdef MYSQL_CLIENT -void Rand_log_event::print(FILE* file, bool short_form, char* last_db) +void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { char llbuff[22],llbuff2[22]; - if (!short_form) + if (!print_event_info->short_form) { - print_header(file); + print_header(file, print_event_info); fprintf(file, "\tRand\n"); } - fprintf(file, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s;\n", - llstr(seed1, llbuff),llstr(seed2, llbuff2)); + fprintf(file, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s%s\n", + llstr(seed1, llbuff),llstr(seed2, llbuff2), + print_event_info->delimiter); fflush(file); } #endif /* MYSQL_CLIENT */ @@ -2323,13 +3569,83 @@ int Rand_log_event::exec_event(struct st_relay_log_info* rli) { thd->rand.seed1= (ulong) seed1; thd->rand.seed2= (ulong) seed2; - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); return 0; } #endif /* !MYSQL_CLIENT */ /************************************************************************** + Xid_log_event methods +**************************************************************************/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Xid_log_event::pack_info(Protocol *protocol) +{ + char buf[128], *pos; + pos= strmov(buf, "COMMIT /* xid="); + pos= longlong10_to_str(xid, pos, 10); + pos= strmov(pos, " */"); + protocol->store(buf, (uint) (pos-buf), &my_charset_bin); +} +#endif + +/* + NOTE it's ok not to use int8store here, + as long as xid_t::set(ulonglong) and + xid_t::get_my_xid doesn't do it either + + we don't care about actual values of xids as long as + identical numbers compare identically +*/ + +Xid_log_event:: +Xid_log_event(const char* buf, + const Format_description_log_event *description_event) + :Log_event(buf, description_event) +{ + buf+= description_event->common_header_len; + memcpy((char*) &xid, buf, sizeof(xid)); +} + + +#ifndef MYSQL_CLIENT +bool Xid_log_event::write(IO_CACHE* file) +{ + return write_header(file, sizeof(xid)) || + my_b_safe_write(file, (byte*) &xid, sizeof(xid)); +} +#endif + + +#ifdef MYSQL_CLIENT +void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) +{ + if (!print_event_info->short_form) + { + char buf[64]; + longlong10_to_str(xid, buf, 10); + + print_header(file, print_event_info); + fprintf(file, "\tXid = %s\n", buf); + fflush(file); + } + fprintf(file, "COMMIT%s\n", print_event_info->delimiter); +} +#endif /* MYSQL_CLIENT */ + + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int Xid_log_event::exec_event(struct st_relay_log_info* rli) +{ + /* For a slave Xid_log_event is COMMIT */ + mysql_log.write(thd,COM_QUERY,"COMMIT /* implicit, from Xid_log_event */"); + return end_trans(thd, COMMIT) || Log_event::exec_event(rli); +} +#endif /* !MYSQL_CLIENT */ + + +/************************************************************************** User_var_log_event methods **************************************************************************/ @@ -2360,6 +3676,16 @@ void User_var_log_event::pack_info(Protocol* protocol) buf= my_malloc(val_offset + 22, MYF(MY_WME)); event_len= longlong10_to_str(uint8korr(val), buf + val_offset,-10)-buf; break; + case DECIMAL_RESULT: + { + buf= my_malloc(val_offset + DECIMAL_MAX_STR_LENGTH, MYF(MY_WME)); + String str(buf+val_offset, DECIMAL_MAX_STR_LENGTH, &my_charset_bin); + my_decimal dec; + binary2my_decimal(E_DEC_FATAL_ERROR, val+2, &dec, val[0], val[1]); + my_decimal2string(E_DEC_FATAL_ERROR, &dec, 0, 0, 0, &str); + event_len= str.length() + val_offset; + break; + } case STRING_RESULT: /* 15 is for 'COLLATE' and other chars */ buf= my_malloc(event_len+val_len*2+1+2*MY_CS_NAME_SIZE+15, MYF(MY_WME)); @@ -2394,10 +3720,12 @@ void User_var_log_event::pack_info(Protocol* protocol) #endif /* !MYSQL_CLIENT */ -User_var_log_event::User_var_log_event(const char* buf, bool old_format) - :Log_event(buf, old_format) +User_var_log_event:: +User_var_log_event(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf+= (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + buf+= description_event->common_header_len; name_len= uint4korr(buf); name= (char *) buf + UV_NAME_LEN_SIZE; buf+= UV_NAME_LEN_SIZE + name_len; @@ -2421,13 +3749,15 @@ User_var_log_event::User_var_log_event(const char* buf, bool old_format) } -int User_var_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool User_var_log_event::write(IO_CACHE* file) { char buf[UV_NAME_LEN_SIZE]; char buf1[UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE]; - char buf2[8], *pos= buf2; + char buf2[max(8, DECIMAL_MAX_FIELD_SIZE + 2)], *pos= buf2; uint buf1_length; + ulong event_length; int4store(buf, name_len); @@ -2440,8 +3770,6 @@ int User_var_log_event::write_data(IO_CACHE* file) { buf1[1]= type; int4store(buf1 + 2, charset_number); - int4store(buf1 + 2 + UV_CHARSET_NUMBER_SIZE, val_len); - buf1_length= 10; switch (type) { case REAL_RESULT: @@ -2450,6 +3778,16 @@ int User_var_log_event::write_data(IO_CACHE* file) case INT_RESULT: int8store(buf2, *(longlong*) val); break; + case DECIMAL_RESULT: + { + my_decimal *dec= (my_decimal *)val; + dec->fix_buffer_pointer(); + buf2[0]= (char)(dec->intg + dec->frac); + buf2[1]= (char)dec->frac; + decimal2bin((decimal_t*)val, buf2+2, buf2[0], buf2[1]); + val_len= decimal_bin_size(buf2[0], buf2[1]) + 2; + break; + } case STRING_RESULT: pos= val; break; @@ -2458,12 +3796,20 @@ int User_var_log_event::write_data(IO_CACHE* file) DBUG_ASSERT(1); return 0; } + int4store(buf1 + 2 + UV_CHARSET_NUMBER_SIZE, val_len); + buf1_length= 10; } - return (my_b_safe_write(file, (byte*) buf, sizeof(buf)) || + + /* Length of the whole event */ + event_length= sizeof(buf)+ name_len + buf1_length + val_len; + + return (write_header(file, event_length) || + my_b_safe_write(file, (byte*) buf, sizeof(buf)) || my_b_safe_write(file, (byte*) name, name_len) || my_b_safe_write(file, (byte*) buf1, buf1_length) || my_b_safe_write(file, (byte*) pos, val_len)); } +#endif /* @@ -2471,11 +3817,11 @@ int User_var_log_event::write_data(IO_CACHE* file) */ #ifdef MYSQL_CLIENT -void User_var_log_event::print(FILE* file, bool short_form, char* last_db) +void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { - if (!short_form) + if (!print_event_info->short_form) { - print_header(file); + print_header(file, print_event_info); fprintf(file, "\tUser_var\n"); } @@ -2485,7 +3831,7 @@ void User_var_log_event::print(FILE* file, bool short_form, char* last_db) if (is_null) { - fprintf(file, ":=NULL;\n"); + fprintf(file, ":=NULL%s\n", print_event_info->delimiter); } else { @@ -2493,13 +3839,30 @@ void User_var_log_event::print(FILE* file, bool short_form, char* last_db) case REAL_RESULT: double real_val; float8get(real_val, val); - fprintf(file, ":=%.14g;\n", real_val); + fprintf(file, ":=%.14g%s\n", real_val, print_event_info->delimiter); break; case INT_RESULT: char int_buf[22]; longlong10_to_str(uint8korr(val), int_buf, -10); - fprintf(file, ":=%s;\n", int_buf); + fprintf(file, ":=%s%s\n", int_buf, print_event_info->delimiter); + break; + case DECIMAL_RESULT: + { + char str_buf[200]; + int str_len= sizeof(str_buf) - 1; + int precision= (int)val[0]; + int scale= (int)val[1]; + decimal_digit_t dec_buf[10]; + decimal_t dec; + dec.len= 10; + dec.buf= dec_buf; + + bin2decimal(val+2, &dec, precision, scale); + decimal2string(&dec, str_buf, &str_len, 0, 0, 0); + str_buf[str_len]= 0; + fprintf(file, ":=%s%s\n",str_buf, print_event_info->delimiter); break; + } case STRING_RESULT: { /* @@ -2533,9 +3896,10 @@ void User_var_log_event::print(FILE* file, bool short_form, char* last_db) Generate an unusable command (=> syntax error) is probably the best thing we can do here. */ - fprintf(file, ":=???;\n"); + fprintf(file, ":=???%s\n", print_event_info->delimiter); else - fprintf(file, ":=_%s %s COLLATE `%s`;\n", cs->csname, hex_str, cs->name); + fprintf(file, ":=_%s %s COLLATE `%s`%s\n", + cs->csname, hex_str, cs->name, print_event_info->delimiter); my_afree(hex_str); } break; @@ -2576,7 +3940,7 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) switch (type) { case REAL_RESULT: float8get(real_val, val); - it= new Item_real(real_val); + it= new Item_float(real_val); val= (char*) &real_val; // Pointer to value in native format val_len= 8; break; @@ -2586,6 +3950,14 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) val= (char*) &int_val; // Pointer to value in native format val_len= 8; break; + case DECIMAL_RESULT: + { + Item_decimal *dec= new Item_decimal(val+2, val[0], val[1]); + it= dec; + val= (char *)dec->val_decimal(NULL); + val_len= sizeof(my_decimal); + break; + } case STRING_RESULT: it= new Item_string(val, val_len, charset); break; @@ -2600,16 +3972,16 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) Item_func_set_user_var can't substitute something else on its place => 0 can be passed as last argument (reference on item) */ - e.fix_fields(thd, 0, 0); + e.fix_fields(thd, 0); /* A variable can just be considered as a table with a single record and with a single column. Thus, like a column value, it could always have IMPLICIT derivation. */ - e.update_hash(val, val_len, type, charset, DERIVATION_IMPLICIT); + e.update_hash(val, val_len, type, charset, DERIVATION_IMPLICIT, 0); free_root(thd->mem_root,0); - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); return 0; } #endif /* !MYSQL_CLIENT */ @@ -2621,11 +3993,11 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) #ifdef HAVE_REPLICATION #ifdef MYSQL_CLIENT -void Unknown_log_event::print(FILE* file, bool short_form, char* last_db) +void Unknown_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { - if (short_form) + if (print_event_info->short_form) return; - print_header(file); + print_header(file, print_event_info); fputc('\n', file); fprintf(file, "# %s", "Unknown event\n"); } @@ -2651,12 +4023,12 @@ void Slave_log_event::pack_info(Protocol *protocol) #ifndef MYSQL_CLIENT Slave_log_event::Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli) - :Log_event(thd_arg, 0, 0), mem_pool(0), master_host(0) + :Log_event(thd_arg, 0, 0) , mem_pool(0), master_host(0) { DBUG_ENTER("Slave_log_event"); if (!rli->inited) // QQ When can this happen ? DBUG_VOID_RETURN; - + MASTER_INFO* mi = rli->mi; // TODO: re-write this better without holding both locks at the same time pthread_mutex_lock(&mi->data_lock); @@ -2673,7 +4045,7 @@ Slave_log_event::Slave_log_event(THD* thd_arg, memcpy(master_log, rli->group_master_log_name, master_log_len + 1); master_port = mi->port; master_pos = rli->group_master_log_pos; - DBUG_PRINT("info", ("master_log: %s pos: %d", master_log, + DBUG_PRINT("info", ("master_log: %s pos: %lu", master_log, (ulong) master_pos)); } else @@ -2692,12 +4064,12 @@ Slave_log_event::~Slave_log_event() #ifdef MYSQL_CLIENT -void Slave_log_event::print(FILE* file, bool short_form, char* last_db) +void Slave_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { char llbuff[22]; - if (short_form) + if (print_event_info->short_form) return; - print_header(file); + print_header(file, print_event_info); fputc('\n', file); fprintf(file, "\ Slave: master_host: '%s' master_port: %d master_log: '%s' master_pos: %s\n", @@ -2712,13 +4084,18 @@ int Slave_log_event::get_data_size() } -int Slave_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Slave_log_event::write(IO_CACHE* file) { + ulong event_length= get_data_size(); int8store(mem_pool + SL_MASTER_POS_OFFSET, master_pos); int2store(mem_pool + SL_MASTER_PORT_OFFSET, master_port); // log and host are already there - return my_b_safe_write(file, (byte*)mem_pool, get_data_size()); + + return (write_header(file, event_length) || + my_b_safe_write(file, (byte*) mem_pool, event_length)); } +#endif void Slave_log_event::init_from_mem_pool(int data_size) @@ -2738,12 +4115,13 @@ void Slave_log_event::init_from_mem_pool(int data_size) } -Slave_log_event::Slave_log_event(const char* buf, int event_len) - :Log_event(buf,0),mem_pool(0),master_host(0) +/* This code is not used, so has not been updated to be format-tolerant */ +Slave_log_event::Slave_log_event(const char* buf, uint event_len) + :Log_event(buf,0) /*unused event*/ ,mem_pool(0),master_host(0) { - event_len -= LOG_EVENT_HEADER_LEN; - if (event_len < 0) + if (event_len < LOG_EVENT_HEADER_LEN) return; + event_len -= LOG_EVENT_HEADER_LEN; if (!(mem_pool = (char*) my_malloc(event_len + 1, MYF(MY_WME)))) return; memcpy(mem_pool, buf + LOG_EVENT_HEADER_LEN, event_len); @@ -2771,12 +4149,12 @@ int Slave_log_event::exec_event(struct st_relay_log_info* rli) */ #ifdef MYSQL_CLIENT -void Stop_log_event::print(FILE* file, bool short_form, char* last_db) +void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { - if (short_form) + if (print_event_info->short_form) return; - print_header(file); + print_header(file, print_event_info); fprintf(file, "\tStop\n"); fflush(file); } @@ -2786,14 +4164,14 @@ void Stop_log_event::print(FILE* file, bool short_form, char* last_db) /* Stop_log_event::exec_event() - The master stopped. + The master stopped. We used to clean up all temporary tables but this is useless as, as the - master has shut down properly, it has written all DROP TEMPORARY TABLE and DO - RELEASE_LOCK (prepared statements' deletion is TODO). + master has shut down properly, it has written all DROP TEMPORARY TABLE + (prepared statements' deletion is TODO only when we binlog prep stmts). We used to clean up slave_load_tmpdir, but this is useless as it has been cleared at the end of LOAD DATA INFILE. So we have nothing to do here. - The place were we must do this cleaning is in Start_log_event::exec_event(), + The place were we must do this cleaning is in Start_log_event_v3::exec_event(), not here. Because if we come here, the master was sane. */ @@ -2807,8 +4185,13 @@ int Stop_log_event::exec_event(struct st_relay_log_info* rli) could give false triggers in MASTER_POS_WAIT() that we have reached the target position when in fact we have not. */ - rli->inc_group_relay_log_pos(get_event_len(), 0); - flush_relay_log_info(rli); + if (thd->options & OPTION_BEGIN) + rli->inc_event_relay_log_pos(); + else + { + rli->inc_group_relay_log_pos(0); + flush_relay_log_info(rli); + } return 0; } #endif /* !MYSQL_CLIENT */ @@ -2839,20 +4222,19 @@ Create_file_log_event(THD* thd_arg, sql_exchange* ex, sql_ex.force_new_format(); DBUG_VOID_RETURN; } -#endif /* !MYSQL_CLIENT */ /* Create_file_log_event::write_data_body() */ -int Create_file_log_event::write_data_body(IO_CACHE* file) +bool Create_file_log_event::write_data_body(IO_CACHE* file) { - int res; - if ((res = Load_log_event::write_data_body(file)) || fake_base) + bool res; + if ((res= Load_log_event::write_data_body(file)) || fake_base) return res; return (my_b_safe_write(file, (byte*) "", 1) || - my_b_safe_write(file, (byte*) block, block_len)); + my_b_safe_write(file, (byte*) block, block_len)); } @@ -2860,14 +4242,14 @@ int Create_file_log_event::write_data_body(IO_CACHE* file) Create_file_log_event::write_data_header() */ -int Create_file_log_event::write_data_header(IO_CACHE* file) +bool Create_file_log_event::write_data_header(IO_CACHE* file) { - int res; - if ((res = Load_log_event::write_data_header(file)) || fake_base) - return res; + bool res; byte buf[CREATE_FILE_HEADER_LEN]; + if ((res= Load_log_event::write_data_header(file)) || fake_base) + return res; int4store(buf + CF_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN); + return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN) != 0; } @@ -2875,42 +4257,58 @@ int Create_file_log_event::write_data_header(IO_CACHE* file) Create_file_log_event::write_base() */ -int Create_file_log_event::write_base(IO_CACHE* file) +bool Create_file_log_event::write_base(IO_CACHE* file) { - int res; - fake_base = 1; // pretend we are Load event - res = write(file); - fake_base = 0; + bool res; + fake_base= 1; // pretend we are Load event + res= write(file); + fake_base= 0; return res; } +#endif /* !MYSQL_CLIENT */ /* Create_file_log_event ctor */ -Create_file_log_event::Create_file_log_event(const char* buf, int len, - bool old_format) - :Load_log_event(buf,0,old_format),fake_base(0),block(0),inited_from_old(0) +Create_file_log_event::Create_file_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Load_log_event(buf,0,description_event),fake_base(0),block(0),inited_from_old(0) { - int block_offset; - DBUG_ENTER("Create_file_log_event"); - - /* - We must make copy of 'buf' as this event may have to live over a - rotate log entry when used in mysqlbinlog - */ + DBUG_ENTER("Create_file_log_event::Create_file_log_event(char*,...)"); + uint block_offset; + uint header_len= description_event->common_header_len; + uint8 load_header_len= description_event->post_header_len[LOAD_EVENT-1]; + uint8 create_file_header_len= description_event->post_header_len[CREATE_FILE_EVENT-1]; if (!(event_buf= my_memdup((byte*) buf, len, MYF(MY_WME))) || - (copy_log_event(event_buf, len, old_format))) + copy_log_event(event_buf,len, + ((buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ? + load_header_len + header_len : + (fake_base ? (header_len+load_header_len) : + (header_len+load_header_len) + + create_file_header_len)), + description_event)) DBUG_VOID_RETURN; - - if (!old_format) + if (description_event->binlog_version!=1) { - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + - + LOAD_HEADER_LEN + CF_FILE_ID_OFFSET); - // + 1 for \0 terminating fname - block_offset = (LOG_EVENT_HEADER_LEN + Load_log_event::get_data_size() + - CREATE_FILE_HEADER_LEN + 1); + file_id= uint4korr(buf + + header_len + + load_header_len + CF_FILE_ID_OFFSET); + /* + Note that it's ok to use get_data_size() below, because it is computed + with values we have already read from this event (because we called + copy_log_event()); we are not using slave's format info to decode + master's format, we are really using master's format info. + Anyway, both formats should be identical (except the common_header_len) + as these Load events are not changed between 4.0 and 5.0 (as logging of + LOAD DATA INFILE does not use Load_log_event in 5.0). + + The + 1 is for \0 terminating fname + */ + block_offset= (description_event->common_header_len + + Load_log_event::get_data_size() + + create_file_header_len + 1); if (len < block_offset) return; block = (char*)buf + block_offset; @@ -2930,19 +4328,20 @@ Create_file_log_event::Create_file_log_event(const char* buf, int len, */ #ifdef MYSQL_CLIENT -void Create_file_log_event::print(FILE* file, bool short_form, - char* last_db, bool enable_local) +void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info, + bool enable_local) { - if (short_form) + if (print_event_info->short_form) { if (enable_local && check_fname_outside_temp_buf()) - Load_log_event::print(file, 1, last_db); + Load_log_event::print(file, print_event_info); return; } if (enable_local) { - Load_log_event::print(file, short_form, last_db, !check_fname_outside_temp_buf()); + Load_log_event::print(file, print_event_info, + !check_fname_outside_temp_buf()); /* That one is for "file_id: etc" below: in mysqlbinlog we want the #, in SHOW BINLOG EVENTS we don't. @@ -2954,10 +4353,9 @@ void Create_file_log_event::print(FILE* file, bool short_form, } -void Create_file_log_event::print(FILE* file, bool short_form, - char* last_db) +void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) { - print(file,short_form,last_db,0); + print(file, print_event_info, 0); } #endif /* MYSQL_CLIENT */ @@ -2990,16 +4388,15 @@ void Create_file_log_event::pack_info(Protocol *protocol) #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) int Create_file_log_event::exec_event(struct st_relay_log_info* rli) { - char proc_info[17+FN_REFLEN+10], *fname_buf= proc_info+17; - char *p; + char proc_info[17+FN_REFLEN+10], *fname_buf; + char *ext; int fd = -1; IO_CACHE file; int error = 1; bzero((char*)&file, sizeof(file)); - p = slave_load_file_stem(fname_buf, file_id, server_id); - strmov(p, ".info"); // strmov takes less code than memcpy - strnmov(proc_info, "Making temp file ", 17); // no end 0 + fname_buf= strmov(proc_info, "Making temp file "); + ext= slave_load_file_stem(fname_buf, file_id, server_id, ".info"); thd->proc_info= proc_info; my_delete(fname_buf, MYF(0)); // old copy may exist already if ((fd= my_create(fname_buf, CREATE_MODE, @@ -3008,19 +4405,21 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli) init_io_cache(&file, fd, IO_SIZE, WRITE_CACHE, (my_off_t)0, 0, MYF(MY_WME|MY_NABP))) { - slave_print_error(rli,my_errno, "Error in Create_file event: could not open file '%s'", fname_buf); + slave_print_error(rli,my_errno, + "Error in Create_file event: could not open file '%s'", + fname_buf); goto err; } // a trick to avoid allocating another buffer - strmov(p, ".data"); - fname = fname_buf; - fname_len = (uint)(p-fname) + 5; + fname= fname_buf; + fname_len= (uint) (strmov(ext, ".data") - fname); if (write_base(&file)) { - strmov(p, ".info"); // to have it right in the error message + strmov(ext, ".info"); // to have it right in the error message slave_print_error(rli,my_errno, - "Error in Create_file event: could not write to file '%s'", + "Error in Create_file event: could not write to file " + "'%s'", fname_buf); goto err; } @@ -3033,12 +4432,16 @@ int Create_file_log_event::exec_event(struct st_relay_log_info* rli) O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, MYF(MY_WME))) < 0) { - slave_print_error(rli,my_errno, "Error in Create_file event: could not open file '%s'", fname_buf); + slave_print_error(rli,my_errno, + "Error in Create_file event: could not open file '%s'", + fname_buf); goto err; } if (my_write(fd, (byte*) block, block_len, MYF(MY_WME+MY_NABP))) { - slave_print_error(rli,my_errno, "Error in Create_file event: write to '%s' failed", fname_buf); + slave_print_error(rli,my_errno, + "Error in Create_file event: write to '%s' failed", + fname_buf); goto err; } error=0; // Everything is ok @@ -3078,30 +4481,38 @@ Append_block_log_event::Append_block_log_event(THD* thd_arg, const char* db_arg, Append_block_log_event ctor */ -Append_block_log_event::Append_block_log_event(const char* buf, int len) - :Log_event(buf, 0),block(0) +Append_block_log_event::Append_block_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event),block(0) { - DBUG_ENTER("Append_block_log_event"); - if ((uint)len < APPEND_BLOCK_EVENT_OVERHEAD) + DBUG_ENTER("Append_block_log_event::Append_block_log_event(char*,...)"); + uint8 common_header_len= description_event->common_header_len; + uint8 append_block_header_len= + description_event->post_header_len[APPEND_BLOCK_EVENT-1]; + uint total_header_len= common_header_len+append_block_header_len; + if (len < total_header_len) DBUG_VOID_RETURN; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); - block = (char*)buf + APPEND_BLOCK_EVENT_OVERHEAD; - block_len = len - APPEND_BLOCK_EVENT_OVERHEAD; + file_id= uint4korr(buf + common_header_len + AB_FILE_ID_OFFSET); + block= (char*)buf + total_header_len; + block_len= len - total_header_len; DBUG_VOID_RETURN; } /* - Append_block_log_event::write_data() + Append_block_log_event::write() */ -int Append_block_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Append_block_log_event::write(IO_CACHE* file) { byte buf[APPEND_BLOCK_HEADER_LEN]; int4store(buf + AB_FILE_ID_OFFSET, file_id); - return (my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) || + return (write_header(file, APPEND_BLOCK_HEADER_LEN + block_len) || + my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) || my_b_safe_write(file, (byte*) block, block_len)); } +#endif /* @@ -3109,15 +4520,15 @@ int Append_block_log_event::write_data(IO_CACHE* file) */ #ifdef MYSQL_CLIENT -void Append_block_log_event::print(FILE* file, bool short_form, - char* last_db) +void Append_block_log_event::print(FILE* file, + PRINT_EVENT_INFO* print_event_info) { - if (short_form) + if (print_event_info->short_form) return; - print_header(file); + print_header(file, print_event_info); fputc('\n', file); - fprintf(file, "#Append_block: file_id: %d block_len: %d\n", - file_id, block_len); + fprintf(file, "#%s: file_id: %d block_len: %d\n", + get_type_str(), file_id, block_len); } #endif /* MYSQL_CLIENT */ @@ -3136,33 +4547,57 @@ void Append_block_log_event::pack_info(Protocol *protocol) block_len)); protocol->store(buf, length, &my_charset_bin); } -#endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ /* + Append_block_log_event::get_create_or_append() +*/ + +int Append_block_log_event::get_create_or_append() const +{ + return 0; /* append to the file, fail if not exists */ +} + +/* Append_block_log_event::exec_event() */ -#if defined( HAVE_REPLICATION) && !defined(MYSQL_CLIENT) int Append_block_log_event::exec_event(struct st_relay_log_info* rli) { char proc_info[17+FN_REFLEN+10], *fname= proc_info+17; - char *p= slave_load_file_stem(fname, file_id, server_id); int fd; int error = 1; DBUG_ENTER("Append_block_log_event::exec_event"); - memcpy(p, ".data", 6); - strnmov(proc_info, "Making temp file ", 17); // no end 0 + fname= strmov(proc_info, "Making temp file "); + slave_load_file_stem(fname, file_id, server_id, ".data"); thd->proc_info= proc_info; - if ((fd = my_open(fname, O_WRONLY|O_APPEND|O_BINARY|O_NOFOLLOW, MYF(MY_WME))) < 0) + if (get_create_or_append()) + { + my_delete(fname, MYF(0)); // old copy may exist already + if ((fd= my_create(fname, CREATE_MODE, + O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, + MYF(MY_WME))) < 0) + { + slave_print_error(rli, my_errno, + "Error in %s event: could not create file '%s'", + get_type_str(), fname); + goto err; + } + } + else if ((fd = my_open(fname, O_WRONLY | O_APPEND | O_BINARY | O_NOFOLLOW, + MYF(MY_WME))) < 0) { - slave_print_error(rli,my_errno, "Error in Append_block event: could not open file '%s'", fname); + slave_print_error(rli, my_errno, + "Error in %s event: could not open file '%s'", + get_type_str(), fname); goto err; } if (my_write(fd, (byte*) block, block_len, MYF(MY_WME+MY_NABP))) { - slave_print_error(rli,my_errno, "Error in Append_block event: write to '%s' failed", fname); + slave_print_error(rli, my_errno, + "Error in %s event: write to '%s' failed", + get_type_str(), fname); goto err; } error=0; @@ -3196,25 +4631,31 @@ Delete_file_log_event::Delete_file_log_event(THD *thd_arg, const char* db_arg, Delete_file_log_event ctor */ -Delete_file_log_event::Delete_file_log_event(const char* buf, int len) - :Log_event(buf, 0),file_id(0) +Delete_file_log_event::Delete_file_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event),file_id(0) { - if ((uint)len < DELETE_FILE_EVENT_OVERHEAD) + uint8 common_header_len= description_event->common_header_len; + uint8 delete_file_header_len= description_event->post_header_len[DELETE_FILE_EVENT-1]; + if (len < (uint)(common_header_len + delete_file_header_len)) return; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); + file_id= uint4korr(buf + common_header_len + DF_FILE_ID_OFFSET); } /* - Delete_file_log_event::write_data() + Delete_file_log_event::write() */ -int Delete_file_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Delete_file_log_event::write(IO_CACHE* file) { byte buf[DELETE_FILE_HEADER_LEN]; int4store(buf + DF_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, DELETE_FILE_HEADER_LEN); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } +#endif /* @@ -3222,12 +4663,12 @@ int Delete_file_log_event::write_data(IO_CACHE* file) */ #ifdef MYSQL_CLIENT -void Delete_file_log_event::print(FILE* file, bool short_form, - char* last_db) +void Delete_file_log_event::print(FILE* file, + PRINT_EVENT_INFO* print_event_info) { - if (short_form) + if (print_event_info->short_form) return; - print_header(file); + print_header(file, print_event_info); fputc('\n', file); fprintf(file, "#Delete_file: file_id=%u\n", file_id); } @@ -3255,10 +4696,9 @@ void Delete_file_log_event::pack_info(Protocol *protocol) int Delete_file_log_event::exec_event(struct st_relay_log_info* rli) { char fname[FN_REFLEN+10]; - char *p= slave_load_file_stem(fname, file_id, server_id); - memcpy(p, ".data", 6); + char *ext= slave_load_file_stem(fname, file_id, server_id, ".data"); (void) my_delete(fname, MYF(MY_WME)); - memcpy(p, ".info", 6); + strmov(ext, ".info"); (void) my_delete(fname, MYF(MY_WME)); return Log_event::exec_event(rli); } @@ -3286,25 +4726,31 @@ Execute_load_log_event::Execute_load_log_event(THD *thd_arg, const char* db_arg, Execute_load_log_event ctor */ -Execute_load_log_event::Execute_load_log_event(const char* buf, int len) - :Log_event(buf, 0), file_id(0) +Execute_load_log_event::Execute_load_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event), file_id(0) { - if ((uint)len < EXEC_LOAD_EVENT_OVERHEAD) + uint8 common_header_len= description_event->common_header_len; + uint8 exec_load_header_len= description_event->post_header_len[EXEC_LOAD_EVENT-1]; + if (len < (uint)(common_header_len+exec_load_header_len)) return; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + EL_FILE_ID_OFFSET); + file_id= uint4korr(buf + common_header_len + EL_FILE_ID_OFFSET); } /* - Execute_load_log_event::write_data() + Execute_load_log_event::write() */ -int Execute_load_log_event::write_data(IO_CACHE* file) +#ifndef MYSQL_CLIENT +bool Execute_load_log_event::write(IO_CACHE* file) { byte buf[EXEC_LOAD_HEADER_LEN]; int4store(buf + EL_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, EXEC_LOAD_HEADER_LEN); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } +#endif /* @@ -3312,12 +4758,12 @@ int Execute_load_log_event::write_data(IO_CACHE* file) */ #ifdef MYSQL_CLIENT -void Execute_load_log_event::print(FILE* file, bool short_form, - char* last_db) +void Execute_load_log_event::print(FILE* file, + PRINT_EVENT_INFO* print_event_info) { - if (short_form) + if (print_event_info->short_form) return; - print_header(file); + print_header(file, print_event_info); fputc('\n', file); fprintf(file, "#Exec_load: file_id=%d\n", file_id); @@ -3345,26 +4791,31 @@ void Execute_load_log_event::pack_info(Protocol *protocol) int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) { char fname[FN_REFLEN+10]; - char *p= slave_load_file_stem(fname, file_id, server_id); + char *ext; int fd; - int error = 1; + int error= 1; IO_CACHE file; - Load_log_event* lev = 0; + Load_log_event *lev= 0; - memcpy(p, ".info", 6); - if ((fd = my_open(fname, O_RDONLY|O_BINARY|O_NOFOLLOW, MYF(MY_WME))) < 0 || + ext= slave_load_file_stem(fname, file_id, server_id, ".info"); + if ((fd = my_open(fname, O_RDONLY | O_BINARY | O_NOFOLLOW, + MYF(MY_WME))) < 0 || init_io_cache(&file, fd, IO_SIZE, READ_CACHE, (my_off_t)0, 0, MYF(MY_WME|MY_NABP))) { - slave_print_error(rli,my_errno, "Error in Exec_load event: could not open file '%s'", fname); + slave_print_error(rli,my_errno, + "Error in Exec_load event: could not open file '%s'", + fname); goto err; } if (!(lev = (Load_log_event*)Log_event::read_log_event(&file, - (pthread_mutex_t*)0, - (bool)0)) || + (pthread_mutex_t*)0, + rli->relay_log.description_event_for_exec)) || lev->get_type_code() != NEW_LOAD_EVENT) { - slave_print_error(rli,0, "Error in Exec_load event: file '%s' appears corrupted", fname); + slave_print_error(rli,0, + "Error in Exec_load event: file '%s' appears corrupted", + fname); goto err; } @@ -3376,15 +4827,7 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) mysql_load()). */ -#if MYSQL_VERSION_ID < 40100 - rli->future_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#elif MYSQL_VERSION_ID < 50000 - rli->future_group_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - rli->future_group_master_log_pos= log_pos; -#endif + rli->future_group_master_log_pos= log_pos; if (lev->exec_event(0,rli,1)) { /* @@ -3417,7 +4860,7 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) fd= -1; } (void) my_delete(fname, MYF(MY_WME)); - memcpy(p, ".data", 6); + memcpy(ext, ".data", 6); (void) my_delete(fname, MYF(MY_WME)); error = 0; @@ -3435,6 +4878,222 @@ err: /************************************************************************** + Begin_load_query_log_event methods +**************************************************************************/ + +#ifndef MYSQL_CLIENT +Begin_load_query_log_event:: +Begin_load_query_log_event(THD* thd_arg, const char* db_arg, char* block_arg, + uint block_len_arg, bool using_trans) + :Append_block_log_event(thd_arg, db_arg, block_arg, block_len_arg, + using_trans) +{ + file_id= thd_arg->file_id= mysql_bin_log.next_file_id(); +} +#endif + + +Begin_load_query_log_event:: +Begin_load_query_log_event(const char* buf, uint len, + const Format_description_log_event* desc_event) + :Append_block_log_event(buf, len, desc_event) +{ +} + + +#if defined( HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int Begin_load_query_log_event::get_create_or_append() const +{ + return 1; /* create the file */ +} +#endif /* defined( HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ + + +/************************************************************************** + Execute_load_query_log_event methods +**************************************************************************/ + + +#ifndef MYSQL_CLIENT +Execute_load_query_log_event:: +Execute_load_query_log_event(THD* thd_arg, const char* query_arg, + ulong query_length_arg, uint fn_pos_start_arg, + uint fn_pos_end_arg, + enum_load_dup_handling dup_handling_arg, + bool using_trans, bool suppress_use): + Query_log_event(thd_arg, query_arg, query_length_arg, using_trans, + suppress_use), + file_id(thd_arg->file_id), fn_pos_start(fn_pos_start_arg), + fn_pos_end(fn_pos_end_arg), dup_handling(dup_handling_arg) +{ +} +#endif /* !MYSQL_CLIENT */ + + +Execute_load_query_log_event:: +Execute_load_query_log_event(const char* buf, uint event_len, + const Format_description_log_event* desc_event): + Query_log_event(buf, event_len, desc_event, EXECUTE_LOAD_QUERY_EVENT), + file_id(0), fn_pos_start(0), fn_pos_end(0) +{ + if (!Query_log_event::is_valid()) + return; + + buf+= desc_event->common_header_len; + + fn_pos_start= uint4korr(buf + ELQ_FN_POS_START_OFFSET); + fn_pos_end= uint4korr(buf + ELQ_FN_POS_END_OFFSET); + dup_handling= (enum_load_dup_handling)(*(buf + ELQ_DUP_HANDLING_OFFSET)); + + if (fn_pos_start > q_len || fn_pos_end > q_len || + dup_handling > LOAD_DUP_REPLACE) + return; + + file_id= uint4korr(buf + ELQ_FILE_ID_OFFSET); +} + + +ulong Execute_load_query_log_event::get_post_header_size_for_derived() +{ + return EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN; +} + + +#ifndef MYSQL_CLIENT +bool +Execute_load_query_log_event::write_post_header_for_derived(IO_CACHE* file) +{ + char buf[EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN]; + int4store(buf, file_id); + int4store(buf + 4, fn_pos_start); + int4store(buf + 4 + 4, fn_pos_end); + *(buf + 4 + 4 + 4)= (char)dup_handling; + return my_b_safe_write(file, (byte*) buf, EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN); +} +#endif + + +#ifdef MYSQL_CLIENT +void Execute_load_query_log_event::print(FILE* file, + PRINT_EVENT_INFO* print_event_info) +{ + print(file, print_event_info, 0); +} + + +void Execute_load_query_log_event::print(FILE* file, + PRINT_EVENT_INFO* print_event_info, + const char *local_fname) +{ + print_query_header(file, print_event_info); + + if (local_fname) + { + my_fwrite(file, (byte*) query, fn_pos_start, MYF(MY_NABP | MY_WME)); + fprintf(file, " LOCAL INFILE \'"); + fprintf(file, local_fname); + fprintf(file, "\'"); + if (dup_handling == LOAD_DUP_REPLACE) + fprintf(file, " REPLACE"); + fprintf(file, " INTO"); + my_fwrite(file, (byte*) query + fn_pos_end, q_len-fn_pos_end, + MYF(MY_NABP | MY_WME)); + fprintf(file, "%s\n", print_event_info->delimiter); + } + else + { + my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME)); + fprintf(file, "%s\n", print_event_info->delimiter); + } + + if (!print_event_info->short_form) + fprintf(file, "# file_id: %d \n", file_id); +} +#endif + + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +void Execute_load_query_log_event::pack_info(Protocol *protocol) +{ + char *buf, *pos; + if (!(buf= my_malloc(9 + db_len + q_len + 10 + 21, MYF(MY_WME)))) + return; + pos= buf; + if (db && db_len) + { + pos= strmov(buf, "use `"); + memcpy(pos, db, db_len); + pos= strmov(pos+db_len, "`; "); + } + if (query && q_len) + { + memcpy(pos, query, q_len); + pos+= q_len; + } + pos= strmov(pos, " ;file_id="); + pos= int10_to_str((long) file_id, pos, 10); + protocol->store(buf, pos-buf, &my_charset_bin); + my_free(buf, MYF(MY_ALLOW_ZERO_PTR)); +} + + +int +Execute_load_query_log_event::exec_event(struct st_relay_log_info* rli) +{ + char *p; + char *buf; + char *fname; + char *fname_end; + int error; + + /* Replace filename and LOCAL keyword in query before executing it */ + if (!(buf = my_malloc(q_len + 1 - (fn_pos_end - fn_pos_start) + + (FN_REFLEN + 10) + 10 + 8 + 5, MYF(MY_WME)))) + { + slave_print_error(rli, my_errno, "Not enough memory"); + return 1; + } + + p= buf; + memcpy(p, query, fn_pos_start); + p+= fn_pos_start; + fname= (p= strmake(p, STRING_WITH_LEN(" INFILE \'"))); + p= slave_load_file_stem(p, file_id, server_id, ".data"); + fname_end= p= strend(p); // Safer than p=p+5 + *(p++)='\''; + switch (dup_handling) { + case LOAD_DUP_IGNORE: + p= strmake(p, STRING_WITH_LEN(" IGNORE")); + break; + case LOAD_DUP_REPLACE: + p= strmake(p, STRING_WITH_LEN(" REPLACE")); + break; + default: + /* Ordinary load data */ + break; + } + p= strmake(p, STRING_WITH_LEN(" INTO")); + p= strmake(p, query+fn_pos_end, q_len-fn_pos_end); + + error= Query_log_event::exec_event(rli, buf, p-buf); + + /* Forging file name for deletion in same buffer */ + *fname_end= 0; + + /* + If there was an error the slave is going to stop, leave the + file so that we can re-execute this event at START SLAVE. + */ + if (!error) + (void) my_delete(fname, MYF(MY_WME)); + + my_free(buf, MYF(MY_ALLOW_ZERO_PTR)); + return error; +} +#endif + + +/************************************************************************** sql_ex_info methods **************************************************************************/ @@ -3442,15 +5101,15 @@ err: sql_ex_info::write_data() */ -int sql_ex_info::write_data(IO_CACHE* file) +bool sql_ex_info::write_data(IO_CACHE* file) { if (new_format()) { - return (write_str(file, field_term, field_term_len) || - write_str(file, enclosed, enclosed_len) || - write_str(file, line_term, line_term_len) || - write_str(file, line_start, line_start_len) || - write_str(file, escaped, escaped_len) || + return (write_str(file, field_term, (uint) field_term_len) || + write_str(file, enclosed, (uint) enclosed_len) || + write_str(file, line_term, (uint) line_term_len) || + write_str(file, line_start, (uint) line_start_len) || + write_str(file, escaped, (uint) escaped_len) || my_b_safe_write(file,(byte*) &opt_flags,1)); } else @@ -3463,7 +5122,7 @@ int sql_ex_info::write_data(IO_CACHE* file) old_ex.escaped= *escaped; old_ex.opt_flags= opt_flags; old_ex.empty_flags=empty_flags; - return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex)); + return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex)) != 0; } } @@ -3485,11 +5144,11 @@ char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) the case when we have old format because we will be reusing net buffer to read the actual file before we write out the Create_file event. */ - if (read_str(buf, buf_end, field_term, field_term_len) || - read_str(buf, buf_end, enclosed, enclosed_len) || - read_str(buf, buf_end, line_term, line_term_len) || - read_str(buf, buf_end, line_start, line_start_len) || - read_str(buf, buf_end, escaped, escaped_len)) + if (read_str(&buf, buf_end, &field_term, &field_term_len) || + read_str(&buf, buf_end, &enclosed, &enclosed_len) || + read_str(&buf, buf_end, &line_term, &line_term_len) || + read_str(&buf, buf_end, &line_start, &line_start_len) || + read_str(&buf, buf_end, &escaped, &escaped_len)) return 0; opt_flags = *buf++; } diff --git a/sql/log_event.h b/sql/log_event.h index ec2d8993bca..a1e7adb6487 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1,15 +1,14 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - +/* Copyright (C) 2000-2006 MySQL AB + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - + the Free Software Foundation; version 2 of the License. + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -35,12 +34,42 @@ #define LOG_EVENT_OFFSET 4 -#define BINLOG_VERSION 3 +/* + 3 is MySQL 4.x; 4 is MySQL 5.0.0. + Compared to version 3, version 4 has: + - a different Start_log_event, which includes info about the binary log + (sizes of headers); this info is included for better compatibility if the + master's MySQL version is different from the slave's. + - all events have a unique ID (the triplet (server_id, timestamp at server + start, other) to be sure an event is not executed more than once in a + multimaster setup, example: + M1 + / \ + v v + M2 M3 + \ / + v v + S + if a query is run on M1, it will arrive twice on S, so we need that S + remembers the last unique ID it has processed, to compare and know if the + event should be skipped or not. Example of ID: we already have the server id + (4 bytes), plus: + timestamp_when_the_master_started (4 bytes), a counter (a sequence number + which increments every time we write an event to the binlog) (3 bytes). + Q: how do we handle when the counter is overflowed and restarts from 0 ? + + - Query and Load (Create or Execute) events may have a more precise timestamp + (with microseconds), number of matched/affected/warnings rows + and fields of session variables: SQL_MODE, + FOREIGN_KEY_CHECKS, UNIQUE_CHECKS, SQL_AUTO_IS_NULL, the collations and + charsets, the PASSWORD() version (old/new/...). +*/ +#define BINLOG_VERSION 4 /* We could have used SERVER_VERSION_LENGTH, but this introduces an obscure dependency - if somebody decided to change SERVER_VERSION_LENGTH - this would have broken the replication protocol + this would break the replication protocol */ #define ST_SERVER_VER_LEN 50 @@ -49,6 +78,12 @@ TERMINATED etc). */ +/* + These are flags and structs to handle all the LOAD DATA INFILE options (LINES + TERMINATED etc). + DUMPFILE_FLAG is probably useless (DUMPFILE is a clause of SELECT, not of LOAD + DATA). +*/ #define DUMPFILE_FLAG 0x1 #define OPT_ENCLOSED_FLAG 0x2 #define REPLACE_FLAG 0x4 @@ -85,6 +120,7 @@ struct old_sql_ex ****************************************************************************/ struct sql_ex_info { + sql_ex_info() {} /* Remove gcc warning */ char* field_term; char* enclosed; char* line_term; @@ -92,18 +128,18 @@ struct sql_ex_info char* escaped; int cached_new_format; uint8 field_term_len,enclosed_len,line_term_len,line_start_len, escaped_len; - char opt_flags; + char opt_flags; char empty_flags; - + // store in new format even if old is possible - void force_new_format() { cached_new_format = 1;} + void force_new_format() { cached_new_format = 1;} int data_size() { return (new_format() ? field_term_len + enclosed_len + line_term_len + line_start_len + escaped_len + 6 : 7); } - int write_data(IO_CACHE* file); + bool write_data(IO_CACHE* file); char* init(char* buf,char* buf_end,bool use_new_format); bool new_format() { @@ -127,33 +163,59 @@ struct sql_ex_info See the #defines below for the format specifics. - The events which really update data are Query_log_event and - Load_log_event/Create_file_log_event/Execute_load_log_event (these 3 act - together to replicate LOAD DATA INFILE, with the help of - Append_block_log_event which prepares temporary files to load into the table). + The events which really update data are Query_log_event, + Execute_load_query_log_event and old Load_log_event and + Execute_load_log_event events (Execute_load_query is used together with + Begin_load_query and Append_block events to replicate LOAD DATA INFILE. + Create_file/Append_block/Execute_load (which includes Load_log_event) + were used to replicate LOAD DATA before the 5.0.3). ****************************************************************************/ #define LOG_EVENT_HEADER_LEN 19 /* the fixed header length */ #define OLD_HEADER_LEN 13 /* the fixed header length in 3.23 */ +/* + Fixed header length, where 4.x and 5.0 agree. That is, 5.0 may have a longer + header (it will for sure when we have the unique event's ID), but at least + the first 19 bytes are the same in 4.x and 5.0. So when we have the unique + event's ID, LOG_EVENT_HEADER_LEN will be something like 26, but + LOG_EVENT_MINIMAL_HEADER_LEN will remain 19. +*/ +#define LOG_EVENT_MINIMAL_HEADER_LEN 19 /* event-specific post-header sizes */ -#define QUERY_HEADER_LEN (4 + 4 + 1 + 2) +// where 3.23, 4.x and 5.0 agree +#define QUERY_HEADER_MINIMAL_LEN (4 + 4 + 1 + 2) +// where 5.0 differs: 2 for len of N-bytes vars. +#define QUERY_HEADER_LEN (QUERY_HEADER_MINIMAL_LEN + 2) #define LOAD_HEADER_LEN (4 + 4 + 4 + 1 +1 + 4) -#define START_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4) -#define ROTATE_HEADER_LEN 8 +#define START_V3_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4) +#define ROTATE_HEADER_LEN 8 // this is FROZEN (the Rotate post-header is frozen) #define CREATE_FILE_HEADER_LEN 4 #define APPEND_BLOCK_HEADER_LEN 4 #define EXEC_LOAD_HEADER_LEN 4 #define DELETE_FILE_HEADER_LEN 4 +#define FORMAT_DESCRIPTION_HEADER_LEN (START_V3_HEADER_LEN+1+LOG_EVENT_TYPES) +#define EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN (4 + 4 + 4 + 1) +#define EXECUTE_LOAD_QUERY_HEADER_LEN (QUERY_HEADER_LEN + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN) /* Max number of possible extra bytes in a replication event compared to a - packet (i.e. a query) sent from client to master. + packet (i.e. a query) sent from client to master; + First, an auxiliary log_event status vars estimation: */ -#define MAX_LOG_EVENT_HEADER (LOG_EVENT_HEADER_LEN + /* write_header */ \ - QUERY_HEADER_LEN + /* write_data */ \ - NAME_LEN + 1) +#define MAX_SIZE_LOG_EVENT_STATUS (4 /* flags2 */ + \ + 8 /* sql mode */ + \ + 1 + 1 + 255 /* catalog */ + \ + 4 /* autoinc */ + \ + 6 /* charset */ + \ + MAX_TIME_ZONE_NAME_LENGTH) +#define MAX_LOG_EVENT_HEADER ( /* in order of Query_log_event::write */ \ + LOG_EVENT_HEADER_LEN + /* write_header */ \ + QUERY_HEADER_LEN + /* write_data */ \ + EXECUTE_LOAD_QUERY_EXTRA_HEADER_LEN + /*write_post_header_for_derived */ \ + MAX_SIZE_LOG_EVENT_STATUS + /* status */ \ + NAME_LEN + 1) /* Event header offsets; @@ -166,11 +228,12 @@ struct sql_ex_info #define LOG_POS_OFFSET 13 #define FLAGS_OFFSET 17 -/* start event post-header */ +/* start event post-header (for v3 and v4) */ #define ST_BINLOG_VER_OFFSET 0 #define ST_SERVER_VER_OFFSET 2 #define ST_CREATED_OFFSET (ST_SERVER_VER_OFFSET + ST_SERVER_VER_LEN) +#define ST_COMMON_HEADER_LEN_OFFSET (ST_CREATED_OFFSET + 4) /* slave event post-header (this event is never written) */ @@ -184,8 +247,32 @@ struct sql_ex_info #define Q_EXEC_TIME_OFFSET 4 #define Q_DB_LEN_OFFSET 8 #define Q_ERR_CODE_OFFSET 9 +#define Q_STATUS_VARS_LEN_OFFSET 11 #define Q_DATA_OFFSET QUERY_HEADER_LEN +/* these are codes, not offsets; not more than 256 values (1 byte). */ +#define Q_FLAGS2_CODE 0 +#define Q_SQL_MODE_CODE 1 +/* + Q_CATALOG_CODE is catalog with end zero stored; it is used only by MySQL + 5.0.x where 0<=x<=3. We have to keep it to be able to replicate these + old masters. +*/ +#define Q_CATALOG_CODE 2 +#define Q_AUTO_INCREMENT 3 +#define Q_CHARSET_CODE 4 +#define Q_TIME_ZONE_CODE 5 +/* + Q_CATALOG_NZ_CODE is catalog withOUT end zero stored; it is used by MySQL + 5.0.x where x>=4. Saves one byte in every Query_log_event in binlog, + compared to Q_CATALOG_CODE. The reason we didn't simply re-use + Q_CATALOG_CODE is that then a 5.0.3 slave of this 5.0.x (x>=4) master would + crash (segfault etc) because it would expect a 0 when there is none. +*/ +#define Q_CATALOG_NZ_CODE 6 + +#define Q_LC_TIME_NAMES_CODE 7 +#define Q_CHARSET_DATABASE_CODE 8 /* Intvar event post-header */ #define I_TYPE_OFFSET 0 @@ -236,15 +323,11 @@ struct sql_ex_info /* DF = "Delete File" */ #define DF_FILE_ID_OFFSET 0 -#define QUERY_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN) -#define QUERY_DATA_OFFSET (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN) -#define ROTATE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+ROTATE_HEADER_LEN) -#define LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+LOAD_HEADER_LEN) -#define CREATE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+\ - +LOAD_HEADER_LEN+CREATE_FILE_HEADER_LEN) -#define DELETE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+DELETE_FILE_HEADER_LEN) -#define EXEC_LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+EXEC_LOAD_HEADER_LEN) -#define APPEND_BLOCK_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+APPEND_BLOCK_HEADER_LEN) +/* ELQ = "Execute Load Query" */ +#define ELQ_FILE_ID_OFFSET QUERY_HEADER_LEN +#define ELQ_FN_POS_START_OFFSET ELQ_FILE_ID_OFFSET + 4 +#define ELQ_FN_POS_END_OFFSET ELQ_FILE_ID_OFFSET + 8 +#define ELQ_DUP_HANDLING_OFFSET ELQ_FILE_ID_OFFSET + 12 /* 4 bytes which all binlogs should begin with */ #define BINLOG_MAGIC "\xfe\x62\x69\x6e" @@ -257,20 +340,45 @@ struct sql_ex_info So they are now removed and their place may later be reused for other flags. Then one must remember that Rotate events in 4.x have LOG_EVENT_FORCED_ROTATE_F set, so one should not rely on the value of the - replacing flag when reading a Rotate event. + replacing flag when reading a Rotate event. I keep the defines here just to remember what they were. */ #ifdef TO_BE_REMOVED #define LOG_EVENT_TIME_F 0x1 -#define LOG_EVENT_FORCED_ROTATE_F 0x2 +#define LOG_EVENT_FORCED_ROTATE_F 0x2 #endif -/* + +/* + This flag only makes sense for Format_description_log_event. It is set + when the event is written, and *reset* when a binlog file is + closed (yes, it's the only case when MySQL modifies already written + part of binlog). Thus it is a reliable indicator that binlog was + closed correctly. (Stop_log_event is not enough, there's always a + small chance that mysqld crashes in the middle of insert and end of + the binlog would look like a Stop_log_event). + + This flag is used to detect a restart after a crash, and to provide + "unbreakable" binlog. The problem is that on a crash storage engines + rollback automatically, while binlog does not. To solve this we use this + flag and automatically append ROLLBACK to every non-closed binlog (append + virtually, on reading, file itself is not changed). If this flag is found, + mysqlbinlog simply prints "ROLLBACK" Replication master does not abort on + binlog corruption, but takes it as EOF, and replication slave forces a + rollback in this case. + + Note, that old binlogs does not have this flag set, so we get a + a backward-compatible behaviour. +*/ + +#define LOG_EVENT_BINLOG_IN_USE_F 0x1 + +/* If the query depends on the thread (for example: TEMPORARY TABLE). Currently this is used by mysqlbinlog to know it must print SET @@PSEUDO_THREAD_ID=xx; before the query (it would not hurt to print it for every query but this would be slow). */ -#define LOG_EVENT_THREAD_SPECIFIC_F 0x4 +#define LOG_EVENT_THREAD_SPECIFIC_F 0x4 /* Suppress the generation of 'USE' statements before the actual @@ -285,15 +393,77 @@ struct sql_ex_info */ #define LOG_EVENT_SUPPRESS_USE_F 0x8 +/* + OPTIONS_WRITTEN_TO_BIN_LOG are the bits of thd->options which must be + written to the binlog. OPTIONS_WRITTEN_TO_BINLOG could be written + into the Format_description_log_event, so that if later we don't want + to replicate a variable we did replicate, or the contrary, it's + doable. But it should not be too hard to decide once for all of what + we replicate and what we don't, among the fixed 32 bits of + thd->options. + I (Guilhem) have read through every option's usage, and it looks like + OPTION_AUTO_IS_NULL and OPTION_NO_FOREIGN_KEYS are the only ones + which alter how the query modifies the table. It's good to replicate + OPTION_RELAXED_UNIQUE_CHECKS too because otherwise, the slave may + insert data slower than the master, in InnoDB. + OPTION_BIG_SELECTS is not needed (the slave thread runs with + max_join_size=HA_POS_ERROR) and OPTION_BIG_TABLES is not needed + either, as the manual says (because a too big in-memory temp table is + automatically written to disk). +*/ +#define OPTIONS_WRITTEN_TO_BIN_LOG (OPTION_AUTO_IS_NULL | \ +OPTION_NO_FOREIGN_KEY_CHECKS | OPTION_RELAXED_UNIQUE_CHECKS) + +#if OPTIONS_WRITTEN_TO_BIN_LOG != ((1L << 14) | (1L << 26) | (1L << 27)) +#error OPTIONS_WRITTEN_TO_BIN_LOG must NOT change their values! +#endif + enum Log_event_type { - UNKNOWN_EVENT= 0, START_EVENT= 1, QUERY_EVENT= 2, STOP_EVENT= 3, - ROTATE_EVENT= 4, INTVAR_EVENT= 5, LOAD_EVENT=6, SLAVE_EVENT= 7, - CREATE_FILE_EVENT= 8, APPEND_BLOCK_EVENT= 9, EXEC_LOAD_EVENT= 10, - DELETE_FILE_EVENT= 11, NEW_LOAD_EVENT= 12, RAND_EVENT= 13, - USER_VAR_EVENT= 14 + /* + Every time you update this enum (when you add a type), you have to + fix Format_description_log_event::Format_description_log_event(). + */ + UNKNOWN_EVENT= 0, + START_EVENT_V3= 1, + QUERY_EVENT= 2, + STOP_EVENT= 3, + ROTATE_EVENT= 4, + INTVAR_EVENT= 5, + LOAD_EVENT= 6, + SLAVE_EVENT= 7, + CREATE_FILE_EVENT= 8, + APPEND_BLOCK_EVENT= 9, + EXEC_LOAD_EVENT= 10, + DELETE_FILE_EVENT= 11, + /* + NEW_LOAD_EVENT is like LOAD_EVENT except that it has a longer + sql_ex, allowing multibyte TERMINATED BY etc; both types share the + same class (Load_log_event) + */ + NEW_LOAD_EVENT= 12, + RAND_EVENT= 13, + USER_VAR_EVENT= 14, + FORMAT_DESCRIPTION_EVENT= 15, + XID_EVENT= 16, + BEGIN_LOAD_QUERY_EVENT= 17, + EXECUTE_LOAD_QUERY_EVENT= 18, + + /* + Add new events here - right above this comment! + Existing events (except ENUM_END_EVENT) should never change their numbers + */ + + ENUM_END_EVENT /* end marker */ }; +/* + The number of types we handle in Format_description_log_event (UNKNOWN_EVENT + is not to be handled, it does not exist in binlogs, it does not have a + format). +*/ +#define LOG_EVENT_TYPES (ENUM_END_EVENT-1) + enum Int_event_type { INVALID_INT_EVENT = 0, LAST_INSERT_ID_EVENT = 1, INSERT_ID_EVENT = 2 @@ -306,8 +476,67 @@ class MYSQL_LOG; class THD; #endif +class Format_description_log_event; + struct st_relay_log_info; +#ifdef MYSQL_CLIENT +/* + A structure for mysqlbinlog to know how to print events + + This structure is passed to the event's print() methods, + + There are two types of settings stored here: + 1. Last db, flags2, sql_mode etc comes from the last printed event. + They are stored so that only the necessary USE and SET commands + are printed. + 2. Other information on how to print the events, e.g. short_form, + hexdump_from. These are not dependent on the last event. +*/ +typedef struct st_print_event_info +{ + /* + Settings for database, sql_mode etc that comes from the last event + that was printed. + */ + // TODO: have the last catalog here ?? + char db[FN_REFLEN+1]; // TODO: make this a LEX_STRING when thd->db is + bool flags2_inited; + uint32 flags2; + bool sql_mode_inited; + ulong sql_mode; /* must be same as THD.variables.sql_mode */ + ulong auto_increment_increment, auto_increment_offset; + bool charset_inited; + char charset[6]; // 3 variables, each of them storable in 2 bytes + char time_zone_str[MAX_TIME_ZONE_NAME_LENGTH]; + uint lc_time_names_number; + uint charset_database_number; + st_print_event_info() + :flags2_inited(0), sql_mode_inited(0), + auto_increment_increment(1),auto_increment_offset(1), charset_inited(0), + lc_time_names_number(0), charset_database_number(0) + { + /* + Currently we only use static PRINT_EVENT_INFO objects, so zeroed at + program's startup, but these explicit bzero() is for the day someone + creates dynamic instances. + */ + bzero(db, sizeof(db)); + bzero(charset, sizeof(charset)); + bzero(time_zone_str, sizeof(time_zone_str)); + strcpy(delimiter, ";"); + } + + /* Settings on how to print the events */ + bool short_form; + my_off_t hexdump_from; + uint8 common_header_len; + char delimiter[16]; + +} PRINT_EVENT_INFO; +#endif + + /***************************************************************************** Log_event class @@ -318,69 +547,83 @@ struct st_relay_log_info; class Log_event { public: - /* - The offset in the log where this event originally appeared (it is preserved - in relay logs, making SHOW SLAVE STATUS able to print coordinates of the - event in the master's binlog). Note: when a transaction is written by the - master to its binlog (wrapped in BEGIN/COMMIT) the log_pos of all the - queries it contains is the one of the BEGIN (this way, when one does SHOW - SLAVE STATUS it sees the offset of the BEGIN, which is logical as rollback - may occur), except the COMMIT query which has its real offset. + /* + The following type definition is to be used whenever data is placed + and manipulated in a common buffer. Use this typedef for buffers + that contain data containing binary and character data. + */ + typedef unsigned char Byte; + + /* + The offset in the log where this event originally appeared (it is + preserved in relay logs, making SHOW SLAVE STATUS able to print + coordinates of the event in the master's binlog). Note: when a + transaction is written by the master to its binlog (wrapped in + BEGIN/COMMIT) the log_pos of all the queries it contains is the + one of the BEGIN (this way, when one does SHOW SLAVE STATUS it + sees the offset of the BEGIN, which is logical as rollback may + occur), except the COMMIT query which has its real offset. */ my_off_t log_pos; - /* + /* A temp buffer for read_log_event; it is later analysed according to the event's type, and its content is distributed in the event-specific fields. */ - char *temp_buf; + char *temp_buf; /* - Timestamp on the master(for debugging and replication of NOW()/TIMESTAMP). - It is important for queries and LOAD DATA INFILE. This is set at the event's - creation time, except for Query and Load (et al.) events where this is set - at the query's execution time, which guarantees good replication (otherwise, - we could have a query and its event with different timestamps). + Timestamp on the master(for debugging and replication of + NOW()/TIMESTAMP). It is important for queries and LOAD DATA + INFILE. This is set at the event's creation time, except for Query + and Load (et al.) events where this is set at the query's + execution time, which guarantees good replication (otherwise, we + could have a query and its event with different timestamps). */ time_t when; /* The number of seconds the query took to run on the master. */ ulong exec_time; - /* - The master's server id (is preserved in the relay log; used to prevent from - infinite loops in circular replication). + /* Number of bytes written by write() function */ + ulong data_written; + + /* + The master's server id (is preserved in the relay log; used to + prevent from infinite loops in circular replication). */ uint32 server_id; - uint cached_event_len; /* - Some 16 flags. Only one is really used now; look above for - LOG_EVENT_TIME_F, LOG_EVENT_FORCED_ROTATE_F, - LOG_EVENT_THREAD_SPECIFIC_F, and LOG_EVENT_SUPPRESS_USE_F for - notes. + Some 16 flags. Look above for LOG_EVENT_TIME_F, + LOG_EVENT_FORCED_ROTATE_F, LOG_EVENT_THREAD_SPECIFIC_F, and + LOG_EVENT_SUPPRESS_USE_F for notes. */ uint16 flags; bool cache_stmt; + #ifndef MYSQL_CLIENT THD* thd; - Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt); Log_event(); + Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt); /* - read_log_event() functions read an event from a binlog or relay log; used by - SHOW BINLOG EVENTS, the binlog_dump thread on the master (reads master's - binlog), the slave IO thread (reads the event sent by binlog_dump), the - slave SQL thread (reads the event from the relay log). + read_log_event() functions read an event from a binlog or relay + log; used by SHOW BINLOG EVENTS, the binlog_dump thread on the + master (reads master's binlog), the slave IO thread (reads the + event sent by binlog_dump), the slave SQL thread (reads the event + from the relay log). If mutex is 0, the read will proceed without + mutex. We need the description_event to be able to parse the + event (to know the post-header's size); in fact in read_log_event + we detect the event's type, then call the specific event's + constructor and pass description_event as an argument. */ - // if mutex is 0, the read will proceed without mutex static Log_event* read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, - bool old_format); + const Format_description_log_event *description_event); static int read_log_event(IO_CACHE* file, String* packet, pthread_mutex_t* log_lock); - /* set_log_pos() is used to fill log_pos with tell(log). */ - void set_log_pos(MYSQL_LOG* log); /* - init_show_field_list() prepares the column names and types for the output of - SHOW BINLOG EVENTS; it is used only by SHOW BINLOG EVENTS. + init_show_field_list() prepares the column names and types for the + output of SHOW BINLOG EVENTS; it is used only by SHOW BINLOG + EVENTS. */ static void init_show_field_list(List<Item>* field_list); #ifdef HAVE_REPLICATION @@ -401,13 +644,15 @@ public: return thd ? thd->db : 0; } #else - // avoid having to link mysqlbinlog against libpthread - static Log_event* read_log_event(IO_CACHE* file, bool old_format); + Log_event() : temp_buf(0) {} + /* avoid having to link mysqlbinlog against libpthread */ + static Log_event* read_log_event(IO_CACHE* file, + const Format_description_log_event *description_event); /* print*() functions are used by mysqlbinlog */ - virtual void print(FILE* file, bool short_form = 0, char* last_db = 0) = 0; + virtual void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0) = 0; void print_timestamp(FILE* file, time_t *ts = 0); - void print_header(FILE* file); -#endif + void print_header(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); +#endif static void *operator new(size_t size) { @@ -417,19 +662,25 @@ public: { my_free((gptr) ptr, MYF(MY_WME|MY_ALLOW_ZERO_PTR)); } - - int write(IO_CACHE* file); - int write_header(IO_CACHE* file); - virtual int write_data(IO_CACHE* file) - { return write_data_header(file) || write_data_body(file); } - virtual int write_data_header(IO_CACHE* file __attribute__((unused))) + +#ifndef MYSQL_CLIENT + bool write_header(IO_CACHE* file, ulong data_length); + virtual bool write(IO_CACHE* file) + { + return (write_header(file, get_data_size()) || + write_data_header(file) || + write_data_body(file)); + } + virtual bool write_data_header(IO_CACHE* file) { return 0; } - virtual int write_data_body(IO_CACHE* file __attribute__((unused))) + virtual bool write_data_body(IO_CACHE* file __attribute__((unused))) { return 0; } +#endif virtual Log_event_type get_type_code() = 0; - virtual bool is_valid() = 0; + virtual bool is_valid() const = 0; + virtual bool is_artificial_event() { return 0; } inline bool get_cache_stmt() { return cache_stmt; } - Log_event(const char* buf, bool old_format); + Log_event(const char* buf, const Format_description_log_event* description_event); virtual ~Log_event() { free_temp_buf();} void register_temp_buf(char* buf) { temp_buf = buf; } void free_temp_buf() @@ -440,19 +691,31 @@ public: temp_buf = 0; } } + /* + Get event length for simple events. For complicated events the length + is calculated during write() + */ virtual int get_data_size() { return 0;} - virtual int get_data_body_offset() { return 0; } - virtual int get_event_len() - { - return (cached_event_len ? cached_event_len : - (cached_event_len = LOG_EVENT_HEADER_LEN + get_data_size())); - } - static Log_event* read_log_event(const char* buf, int event_len, - const char **error, bool old_format); + static Log_event* read_log_event(const char* buf, uint event_len, + const char **error, + const Format_description_log_event + *description_event); /* returns the human readable name of the event's type */ const char* get_type_str(); }; +/* + One class for each type of event. + Two constructors for each class: + - one to create the event for logging (when the server acts as a master), + called after an update to the database is done, + which accepts parameters like the query, the database, the options for LOAD + DATA INFILE... + - one to create the event from a packet (when the server acts as a slave), + called before reproducing the update, which accepts parameters (like a + buffer). Used to read from the master, from the relay log, and in + mysqlbinlog. This constructor must be format-tolerant. +*/ /***************************************************************************** @@ -464,9 +727,10 @@ public: class Query_log_event: public Log_event { protected: - char* data_buf; + Log_event::Byte* data_buf; public: const char* query; + const char* catalog; const char* db; /* If we already know the length of the query string @@ -477,13 +741,66 @@ public: uint32 db_len; uint16 error_code; ulong thread_id; - /* - For events created by Query_log_event::exec_event (and - Load_log_event::exec_event()) we need the *original* thread id, to be able - to log the event with the original (=master's) thread id (fix for - BUG#1686). + /* + For events created by Query_log_event::exec_event (and + Load_log_event::exec_event()) we need the *original* thread id, to be able + to log the event with the original (=master's) thread id (fix for + BUG#1686). */ ulong slave_proxy_id; + + /* + Binlog format 3 and 4 start to differ (as far as class members are + concerned) from here. + */ + + uint catalog_len; // <= 255 char; 0 means uninited + + /* + We want to be able to store a variable number of N-bit status vars: + (generally N=32; but N=64 for SQL_MODE) a user may want to log the number + of affected rows (for debugging) while another does not want to lose 4 + bytes in this. + The storage on disk is the following: + status_vars_len is part of the post-header, + status_vars are in the variable-length part, after the post-header, before + the db & query. + status_vars on disk is a sequence of pairs (code, value) where 'code' means + 'sql_mode', 'affected' etc. Sometimes 'value' must be a short string, so + its first byte is its length. For now the order of status vars is: + flags2 - sql_mode - catalog - autoinc - charset + We should add the same thing to Load_log_event, but in fact + LOAD DATA INFILE is going to be logged with a new type of event (logging of + the plain text query), so Load_log_event would be frozen, so no need. The + new way of logging LOAD DATA INFILE would use a derived class of + Query_log_event, so automatically benefit from the work already done for + status variables in Query_log_event. + */ + uint16 status_vars_len; + + /* + 'flags2' is a second set of flags (on top of those in Log_event), for + session variables. These are thd->options which is & against a mask + (OPTIONS_WRITTEN_TO_BINLOG). + flags2_inited helps make a difference between flags2==0 (3.23 or 4.x + master, we don't know flags2, so use the slave server's global options) and + flags2==0 (5.0 master, we know this has a meaning of flags all down which + must influence the query). + */ + bool flags2_inited; + bool sql_mode_inited; + bool charset_inited; + + uint32 flags2; + /* In connections sql_mode is 32 bits now but will be 64 bits soon */ + ulong sql_mode; + ulong auto_increment_increment, auto_increment_offset; + char charset[6]; + uint time_zone_len; /* 0 means uninited */ + const char *time_zone_str; + uint lc_time_names_number; /* 0 means en_US */ + uint charset_database_number; + #ifndef MYSQL_CLIENT Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, @@ -492,33 +809,60 @@ public: #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); + int exec_event(struct st_relay_log_info* rli, const char *query_arg, + uint32 q_len_arg); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print_query_header(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); #endif - Query_log_event(const char* buf, int event_len, bool old_format); + Query_log_event(); + Query_log_event(const char* buf, uint event_len, + const Format_description_log_event *description_event, + Log_event_type event_type); ~Query_log_event() { if (data_buf) - { my_free((gptr) data_buf, MYF(0)); - } } Log_event_type get_type_code() { return QUERY_EVENT; } - int write(IO_CACHE* file); - int write_data(IO_CACHE* file); // returns 0 on success, -1 on error - bool is_valid() { return query != 0; } - int get_data_size() - { - return (q_len + db_len + 2 - + 4 // thread_id - + 4 // exec_time - + 2 // error_code - ); - } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); + virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; } +#endif + bool is_valid() const { return query != 0; } + + /* + Returns number of bytes additionaly written to post header by derived + events (so far it is only Execute_load_query event). + */ + virtual ulong get_post_header_size_for_derived() { return 0; } + /* Writes derived event-specific part of post header. */ +}; + + +/***************************************************************************** + + Muted Query Log Event class + + Pretends to Log SQL queries, but doesn't actually do so. + + ****************************************************************************/ +class Muted_query_log_event: public Query_log_event +{ +public: +#ifndef MYSQL_CLIENT + Muted_query_log_event(); + + bool write(IO_CACHE* file) { return(false); }; + virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; } +#else + Muted_query_log_event() {} +#endif }; + #ifdef HAVE_REPLICATION /***************************************************************************** @@ -526,6 +870,7 @@ public: Slave Log Event class Note that this class is currently not used at all; no code writes a Slave_log_event (though some code in repl_failsafe.cc reads Slave_log_event). + So it's not a problem if this code is not maintained. ****************************************************************************/ class Slave_log_event: public Log_event @@ -541,20 +886,22 @@ public: int master_log_len; uint16 master_port; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli); void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); +#endif - Slave_log_event(const char* buf, int event_len); + Slave_log_event(const char* buf, uint event_len); ~Slave_log_event(); int get_data_size(); - bool is_valid() { return master_host != 0; } + bool is_valid() const { return master_host != 0; } Log_event_type get_type_code() { return SLAVE_EVENT; } - int write_data(IO_CACHE* file ); +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif }; #endif /* HAVE_REPLICATION */ @@ -567,13 +914,23 @@ public: ****************************************************************************/ class Load_log_event: public Log_event { +private: + uint get_query_buffer_length(); + void print_query(bool need_db, char *buf, char **end, + char **fn_start, char **fn_end); protected: - int copy_log_event(const char *buf, ulong event_len, bool old_format); + int copy_log_event(const char *buf, ulong event_len, + int body_offset, const Format_description_log_event* description_event); public: ulong thread_id; ulong slave_proxy_id; uint32 table_name_len; + /* + No need to have a catalog, as these events can only come from 4.x. + TODO: this may become false if Dmitri pushes his new LOAD DATA INFILE in + 5.0 only (not in 4.x). + */ uint32 db_len; uint32 fname_len; uint32 num_fields; @@ -604,12 +961,13 @@ public: #ifndef MYSQL_CLIENT String field_lens_buf; String fields_buf; - + Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List<Item>& fields_arg, enum enum_duplicates handle_dup, bool ignore, bool using_trans); - void set_fields(const char* db, List<Item> &fields_arg); + void set_fields(const char* db, List<Item> &fields_arg, + Name_resolution_context *context); const char* get_db() { return db; } #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); @@ -617,60 +975,76 @@ public: { return exec_event(thd->slave_net,rli,0); } - int exec_event(NET* net, struct st_relay_log_info* rli, + int exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool commented); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info = 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool commented); #endif - Load_log_event(const char* buf, int event_len, bool old_format); + /* + Note that for all the events related to LOAD DATA (Load_log_event, + Create_file/Append/Exec/Delete, we pass description_event; however as + logging of LOAD DATA is going to be changed in 4.1 or 5.0, this is only used + for the common_header_len (post_header_len will not be changed). + */ + Load_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Load_log_event() {} Log_event_type get_type_code() { return sql_ex.new_format() ? NEW_LOAD_EVENT: LOAD_EVENT; } - int write_data_header(IO_CACHE* file); - int write_data_body(IO_CACHE* file); - bool is_valid() { return table_name != 0; } +#ifndef MYSQL_CLIENT + bool write_data_header(IO_CACHE* file); + bool write_data_body(IO_CACHE* file); +#endif + bool is_valid() const { return table_name != 0; } int get_data_size() { - return (table_name_len + 2 + db_len + 2 + fname_len - + 4 // thread_id - + 4 // exec_time - + 4 // skip_lines - + 4 // field block len + return (table_name_len + db_len + 2 + fname_len + + LOAD_HEADER_LEN + sql_ex.data_size() + field_block_len + num_fields); } - int get_data_body_offset() { return LOAD_EVENT_OVERHEAD; } }; extern char server_version[SERVER_VERSION_LENGTH]; /***************************************************************************** - Start Log Event class + Start Log Event_v3 class + + Start_log_event_v3 is the Start_log_event of binlog format 3 (MySQL 3.23 and + 4.x). + Format_description_log_event derives from Start_log_event_v3; it is the + Start_log_event of binlog format 4 (MySQL 5.0), that is, the event that + describes the other events' header/postheader lengths. This event is sent by + MySQL 5.0 whenever it starts sending a new binlog if the requested position + is >4 (otherwise if ==4 the event will be sent naturally). ****************************************************************************/ -class Start_log_event: public Log_event + +class Start_log_event_v3: public Log_event { public: - /* - If this event is at the start of the first binary log since server startup - 'created' should be the timestamp when the event (and the binary log) was - created. - In the other case (i.e. this event is at the start of a binary log created - by FLUSH LOGS or automatic rotation), 'created' should be 0. - This "trick" is used by MySQL >=4.0.14 slaves to know if they must drop the - stale temporary tables or not. - Note that when 'created'!=0, it is always equal to the event's timestamp; - indeed Start_log_event is written only in log.cc where the first - constructor below is called, in which 'created' is set to 'when'. - So in fact 'created' is a useless variable. When it is 0 - we can read the actual value from timestamp ('when') and when it is - non-zero we can read the same value from timestamp ('when'). Conclusion: + /* + If this event is at the start of the first binary log since server + startup 'created' should be the timestamp when the event (and the + binary log) was created. In the other case (i.e. this event is at + the start of a binary log created by FLUSH LOGS or automatic + rotation), 'created' should be 0. This "trick" is used by MySQL + >=4.0.14 slaves to know whether they must drop stale temporary + tables and whether they should abort unfinished transaction. + + Note that when 'created'!=0, it is always equal to the event's + timestamp; indeed Start_log_event is written only in log.cc where + the first constructor below is called, in which 'created' is set + to 'when'. So in fact 'created' is a useless variable. When it is + 0 we can read the actual value from timestamp ('when') and when it + is non-zero we can read the same value from timestamp + ('when'). Conclusion: - we use timestamp to print when the binlog was created. - we use 'created' only to know if this is a first binlog or not. In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in @@ -680,30 +1054,92 @@ public: time_t created; uint16 binlog_version; char server_version[ST_SERVER_VER_LEN]; + /* + artifical_event is 1 in the case where this is a generated event that + should not case any cleanup actions. We handle this in the log by + setting log_event == 0 (for now). + */ + bool artificial_event; #ifndef MYSQL_CLIENT - Start_log_event() :Log_event(), binlog_version(BINLOG_VERSION) - { - created = (time_t) when; - memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); - } + Start_log_event_v3(); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif - - Start_log_event(const char* buf, bool old_format); - ~Start_log_event() {} - Log_event_type get_type_code() { return START_EVENT;} - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + Start_log_event_v3() {} + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); +#endif + + Start_log_event_v3(const char* buf, + const Format_description_log_event* description_event); + ~Start_log_event_v3() {} + Log_event_type get_type_code() { return START_EVENT_V3;} +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif + bool is_valid() const { return 1; } + int get_data_size() + { + return START_V3_HEADER_LEN; //no variable-sized part + } + virtual bool is_artificial_event() { return artificial_event; } +}; + + +/* + For binlog version 4. + This event is saved by threads which read it, as they need it for future + use (to decode the ordinary events). +*/ + +class Format_description_log_event: public Start_log_event_v3 +{ +public: + /* + The size of the fixed header which _all_ events have + (for binlogs written by this version, this is equal to + LOG_EVENT_HEADER_LEN), except FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT + (those have a header of size LOG_EVENT_MINIMAL_HEADER_LEN). + */ + uint8 common_header_len; + uint8 number_of_event_types; + /* The list of post-headers' lengthes */ + uint8 *post_header_len; + uchar server_version_split[3]; + + Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); + +#ifndef MYSQL_CLIENT +#ifdef HAVE_REPLICATION + int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ +#endif + + Format_description_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); + ~Format_description_log_event() { my_free((gptr)post_header_len, MYF(0)); } + Log_event_type get_type_code() { return FORMAT_DESCRIPTION_EVENT;} +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif + bool is_valid() const + { + return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN : + LOG_EVENT_MINIMAL_HEADER_LEN)) && + (post_header_len != NULL)); + } int get_data_size() { - return START_HEADER_LEN; + /* + The vector of post-header lengths is considered as part of the + post-header, because in a given version it never changes (contrary to the + query in a Query_log_event). + */ + return FORMAT_DESCRIPTION_HEADER_LEN; } + void calc_server_version_split(); }; @@ -714,13 +1150,14 @@ public: Logs special variables such as auto_increment values ****************************************************************************/ + class Intvar_log_event: public Log_event { public: ulonglong val; uchar type; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Intvar_log_event(THD* thd_arg,uchar type_arg, ulonglong val_arg) :Log_event(thd_arg,0,0),val(val_arg),type(type_arg) {} @@ -729,25 +1166,32 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); +#endif - Intvar_log_event(const char* buf, bool old_format); + Intvar_log_event(const char* buf, const Format_description_log_event* description_event); ~Intvar_log_event() {} Log_event_type get_type_code() { return INTVAR_EVENT;} const char* get_var_type_name(); int get_data_size() { return 9; /* sizeof(type) + sizeof(val) */;} - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif + bool is_valid() const { return 1; } }; + /***************************************************************************** Rand Log Event class - Logs random seed used by the next RAND(), and by PASSWORD() in 4.1. + Logs random seed used by the next RAND(), and by PASSWORD() in 4.1.0. + 4.1.1 does not need it (it's repeatable again) so this event needn't be + written in 4.1.1 for PASSWORD() (but the fact that it is written is just a + waste, it does not cause bugs). ****************************************************************************/ + class Rand_log_event: public Log_event { public: @@ -763,15 +1207,54 @@ class Rand_log_event: public Log_event int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); #endif - Rand_log_event(const char* buf, bool old_format); + Rand_log_event(const char* buf, const Format_description_log_event* description_event); ~Rand_log_event() {} Log_event_type get_type_code() { return RAND_EVENT;} int get_data_size() { return 16; /* sizeof(ulonglong) * 2*/ } - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif + bool is_valid() const { return 1; } +}; + +/***************************************************************************** + + Xid Log Event class + + Logs xid of the transaction-to-be-committed in the 2pc protocol. + Has no meaning in replication, slaves ignore it. + + ****************************************************************************/ +#ifdef MYSQL_CLIENT +typedef ulonglong my_xid; // this line is the same as in handler.h +#endif + +class Xid_log_event: public Log_event +{ + public: + my_xid xid; + +#ifndef MYSQL_CLIENT + Xid_log_event(THD* thd_arg, my_xid x): Log_event(thd_arg,0,0), xid(x) {} +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); + int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ +#else + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); +#endif + + Xid_log_event(const char* buf, const Format_description_log_event* description_event); + ~Xid_log_event() {} + Log_event_type get_type_code() { return XID_EVENT;} + int get_data_size() { return sizeof(xid); } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif + bool is_valid() const { return 1; } }; /***************************************************************************** @@ -782,6 +1265,7 @@ class Rand_log_event: public Log_event written before the Query_log_event, to set the user variable. ****************************************************************************/ + class User_var_log_event: public Log_event { public: @@ -802,29 +1286,24 @@ public: void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); #endif - User_var_log_event(const char* buf, bool old_format); + User_var_log_event(const char* buf, const Format_description_log_event* description_event); ~User_var_log_event() {} Log_event_type get_type_code() { return USER_VAR_EVENT;} - int get_data_size() - { - return (is_null ? UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL : - UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + - UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE + val_len); - } - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif + bool is_valid() const { return 1; } }; + /***************************************************************************** Stop Log Event class ****************************************************************************/ -#ifdef HAVE_REPLICATION - class Stop_log_event: public Log_event { public: @@ -833,39 +1312,36 @@ public: {} int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); +#endif - Stop_log_event(const char* buf, bool old_format): - Log_event(buf, old_format) + Stop_log_event(const char* buf, const Format_description_log_event* description_event): + Log_event(buf, description_event) {} ~Stop_log_event() {} Log_event_type get_type_code() { return STOP_EVENT;} - bool is_valid() { return 1; } + bool is_valid() const { return 1; } }; -#endif /* HAVE_REPLICATION */ - - /***************************************************************************** Rotate Log Event class - This will be depricated when we move to using sequence ids. + This will be deprecated when we move to using sequence ids. ****************************************************************************/ + class Rotate_log_event: public Log_event { public: enum { - ZERO_LEN= 1, // if event should report 0 as its length DUP_NAME= 2 // if constructor should dup the string argument }; const char* new_log_ident; ulonglong pos; uint ident_len; uint flags; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Rotate_log_event(THD* thd_arg, const char* new_log_ident_arg, uint ident_len_arg, ulonglong pos_arg, uint flags); @@ -874,36 +1350,31 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); #endif - Rotate_log_event(const char* buf, int event_len, bool old_format); + Rotate_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Rotate_log_event() { if (flags & DUP_NAME) my_free((gptr) new_log_ident, MYF(MY_ALLOW_ZERO_PTR)); } Log_event_type get_type_code() { return ROTATE_EVENT;} - virtual int get_event_len() - { - if (flags & ZERO_LEN) - return 0; - if (cached_event_len == 0) - cached_event_len= LOG_EVENT_HEADER_LEN + get_data_size(); - return cached_event_len; - } int get_data_size() { return ident_len + ROTATE_HEADER_LEN;} - bool is_valid() { return new_log_ident != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return new_log_ident != 0; } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif }; + /* the classes below are for the new LOAD DATA INFILE logging */ /***************************************************************************** - Create File Log Event class - ****************************************************************************/ + class Create_file_log_event: public Load_log_event { protected: @@ -912,7 +1383,7 @@ protected: our Load part - used on the slave when writing event out to SQL_LOAD-*.info file */ - bool fake_base; + bool fake_base; public: char* block; const char *event_buf; @@ -932,11 +1403,12 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool enable_local); -#endif - - Create_file_log_event(const char* buf, int event_len, bool old_format); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool enable_local); +#endif + + Create_file_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Create_file_log_event() { my_free((char*) event_buf, MYF(MY_ALLOW_ZERO_PTR)); @@ -952,19 +1424,16 @@ public: Load_log_event::get_data_size() + 4 + 1 + block_len); } - int get_data_body_offset() - { - return (fake_base ? LOAD_EVENT_OVERHEAD: - LOAD_EVENT_OVERHEAD + CREATE_FILE_HEADER_LEN); - } - bool is_valid() { return inited_from_old || block != 0; } - int write_data_header(IO_CACHE* file); - int write_data_body(IO_CACHE* file); + bool is_valid() const { return inited_from_old || block != 0; } +#ifndef MYSQL_CLIENT + bool write_data_header(IO_CACHE* file); + bool write_data_body(IO_CACHE* file); /* Cut out Create_file extentions and write it as Load event - used on the slave */ - int write_base(IO_CACHE* file); + bool write_base(IO_CACHE* file); +#endif }; @@ -973,6 +1442,7 @@ public: Append Block Log Event class ****************************************************************************/ + class Append_block_log_event: public Log_event { public: @@ -980,14 +1450,15 @@ public: uint block_len; uint file_id; /* - 'db' is filled when the event is created in mysql_load() (the event needs to - have a 'db' member to be well filtered by binlog-*-db rules). 'db' is not - written to the binlog (it's not used by Append_block_log_event::write()), so - it can't be read in the Append_block_log_event(const char* buf, int - event_len) constructor. - In other words, 'db' is used only for filtering by binlog-*-db rules. - Create_file_log_event is different: its 'db' (which is inherited from - Load_log_event) is written to the binlog and can be re-read. + 'db' is filled when the event is created in mysql_load() (the + event needs to have a 'db' member to be well filtered by + binlog-*-db rules). 'db' is not written to the binlog (it's not + used by Append_block_log_event::write()), so it can't be read in + the Append_block_log_event(const char* buf, int event_len) + constructor. In other words, 'db' is used only for filtering by + binlog-*-db rules. Create_file_log_event is different: it's 'db' + (which is inherited from Load_log_event) is written to the binlog + and can be re-read. */ const char* db; @@ -997,31 +1468,37 @@ public: #ifdef HAVE_REPLICATION int exec_event(struct st_relay_log_info* rli); void pack_info(Protocol* protocol); + virtual int get_create_or_append() const; #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); #endif - - Append_block_log_event(const char* buf, int event_len); + + Append_block_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Append_block_log_event() {} Log_event_type get_type_code() { return APPEND_BLOCK_EVENT;} int get_data_size() { return block_len + APPEND_BLOCK_HEADER_LEN ;} - bool is_valid() { return block != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return block != 0; } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif const char* get_db() { return db; } }; + /***************************************************************************** Delete File Log Event class ****************************************************************************/ + class Delete_file_log_event: public Log_event { public: uint file_id; const char* db; /* see comment in Append_block_log_event */ - + #ifndef MYSQL_CLIENT Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION @@ -1029,29 +1506,34 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool enable_local); -#endif - - Delete_file_log_event(const char* buf, int event_len); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info, bool enable_local); +#endif + + Delete_file_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Delete_file_log_event() {} Log_event_type get_type_code() { return DELETE_FILE_EVENT;} int get_data_size() { return DELETE_FILE_HEADER_LEN ;} - bool is_valid() { return file_id != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return file_id != 0; } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif const char* get_db() { return db; } }; + /***************************************************************************** Execute Load Log Event class ****************************************************************************/ + class Execute_load_log_event: public Log_event { public: uint file_id; - const char* db; /* see comment in Append_block_log_event */ + const char* db; /* see comment in Append_block_log_event */ #ifndef MYSQL_CLIENT Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans); @@ -1060,30 +1542,127 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif - - Execute_load_log_event(const char* buf, int event_len); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); +#endif + + Execute_load_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Execute_load_log_event() {} Log_event_type get_type_code() { return EXEC_LOAD_EVENT;} int get_data_size() { return EXEC_LOAD_HEADER_LEN ;} - bool is_valid() { return file_id != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return file_id != 0; } +#ifndef MYSQL_CLIENT + bool write(IO_CACHE* file); +#endif const char* get_db() { return db; } }; + +/*************************************************************************** + + Begin load query Log Event class + + Event for the first block of file to be loaded, its only difference from + Append_block event is that this event creates or truncates existing file + before writing data. + +****************************************************************************/ +class Begin_load_query_log_event: public Append_block_log_event +{ +public: +#ifndef MYSQL_CLIENT + Begin_load_query_log_event(THD* thd_arg, const char *db_arg, + char* block_arg, uint block_len_arg, + bool using_trans); +#ifdef HAVE_REPLICATION + Begin_load_query_log_event(THD* thd); + int get_create_or_append() const; +#endif /* HAVE_REPLICATION */ +#endif + Begin_load_query_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); + ~Begin_load_query_log_event() {} + Log_event_type get_type_code() { return BEGIN_LOAD_QUERY_EVENT; } +}; + + +/* + Elements of this enum describe how LOAD DATA handles duplicates. +*/ +enum enum_load_dup_handling { LOAD_DUP_ERROR= 0, LOAD_DUP_IGNORE, + LOAD_DUP_REPLACE }; + +/**************************************************************************** + + Execute load query Log Event class + + Event responsible for LOAD DATA execution, it similar to Query_log_event + but before executing the query it substitutes original filename in LOAD DATA + query with name of temporary file. + +****************************************************************************/ +class Execute_load_query_log_event: public Query_log_event +{ +public: + uint file_id; // file_id of temporary file + uint fn_pos_start; // pointer to the part of the query that should + // be substituted + uint fn_pos_end; // pointer to the end of this part of query + /* + We have to store type of duplicate handling explicitly, because + for LOAD DATA it also depends on LOCAL option. And this part + of query will be rewritten during replication so this information + may be lost... + */ + enum_load_dup_handling dup_handling; + +#ifndef MYSQL_CLIENT + Execute_load_query_log_event(THD* thd, const char* query_arg, + ulong query_length, uint fn_pos_start_arg, + uint fn_pos_end_arg, + enum_load_dup_handling dup_handling_arg, + bool using_trans, bool suppress_use); +#ifdef HAVE_REPLICATION + void pack_info(Protocol* protocol); + int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ +#else + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); + /* Prints the query as LOAD DATA LOCAL and with rewritten filename */ + void print(FILE* file, PRINT_EVENT_INFO* print_event_info, + const char *local_fname); +#endif + Execute_load_query_log_event(const char* buf, uint event_len, + const Format_description_log_event *description_event); + ~Execute_load_query_log_event() {} + + Log_event_type get_type_code() { return EXECUTE_LOAD_QUERY_EVENT; } + bool is_valid() const { return Query_log_event::is_valid() && file_id != 0; } + + ulong get_post_header_size_for_derived(); +#ifndef MYSQL_CLIENT + bool write_post_header_for_derived(IO_CACHE* file); +#endif + }; + + #ifdef MYSQL_CLIENT class Unknown_log_event: public Log_event { public: - Unknown_log_event(const char* buf, bool old_format): - Log_event(buf, old_format) + /* + Even if this is an unknown event, we still pass description_event to + Log_event's ctor, this way we can extract maximum information from the + event's header (the unique ID for example). + */ + Unknown_log_event(const char* buf, const Format_description_log_event* description_event): + Log_event(buf, description_event) {} ~Unknown_log_event() {} - void print(FILE* file, bool short_form= 0, char* last_db= 0); + void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); Log_event_type get_type_code() { return UNKNOWN_EVENT;} - bool is_valid() { return 1; } + bool is_valid() const { return 1; } }; -#endif +#endif char *str_to_hex(char *to, const char *from, uint len); #endif /* _log_event_h */ diff --git a/sql/matherr.c b/sql/matherr.c index ea0c15d2feb..4998d8b4961 100644 --- a/sql/matherr.c +++ b/sql/matherr.c @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/message.mc b/sql/message.mc new file mode 100644 index 00000000000..a1a7c8cff7e --- /dev/null +++ b/sql/message.mc @@ -0,0 +1,8 @@ +MessageId = 100 +Severity = Error +Facility = Application +SymbolicName = MSG_DEFAULT +Language = English +%1For more information, see Help and Support Center at http://www.mysql.com. + + diff --git a/sql/mf_iocache.cc b/sql/mf_iocache.cc index 71c8d588de7..f237f15dbc9 100644 --- a/sql/mf_iocache.cc +++ b/sql/mf_iocache.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc new file mode 100644 index 00000000000..0ef1f9794ba --- /dev/null +++ b/sql/my_decimal.cc @@ -0,0 +1,253 @@ +/* Copyright (C) 2005-2006 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" +#include <time.h> + + +#ifndef MYSQL_CLIENT +/* + report result of decimal operation + + SYNOPSIS + decimal_operation_results() + result decimal library return code (E_DEC_* see include/decimal.h) + + TODO + Fix error messages + + RETURN + result +*/ + +int decimal_operation_results(int result) +{ + switch (result) { + case E_DEC_OK: + break; + case E_DEC_TRUNCATED: + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED), + "", (long)-1); + break; + case E_DEC_OVERFLOW: + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_TRUNCATED_WRONG_VALUE, + ER(ER_TRUNCATED_WRONG_VALUE), + "DECIMAL", ""); + break; + case E_DEC_DIV_ZERO: + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_DIVISION_BY_ZERO, ER(ER_DIVISION_BY_ZERO)); + break; + case E_DEC_BAD_NUM: + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "decimal", "", "", (long)-1); + break; + case E_DEC_OOM: + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + break; + default: + DBUG_ASSERT(0); + } + return result; +} + + +/* + Converting decimal to string + + SYNOPSIS + my_decimal2string() + + return + E_DEC_OK + E_DEC_TRUNCATED + E_DEC_OVERFLOW + E_DEC_OOM +*/ + +int my_decimal2string(uint mask, const my_decimal *d, + uint fixed_prec, uint fixed_dec, + char filler, String *str) +{ + int length= (fixed_prec ? (fixed_prec + 1) : my_decimal_string_length(d)); + int result; + if (str->alloc(length)) + return check_result(mask, E_DEC_OOM); + result= decimal2string((decimal_t*) d, (char*) str->ptr(), + &length, (int)fixed_prec, fixed_dec, + filler); + str->length(length); + return check_result(mask, result); +} + + +/* + Convert from decimal to binary representation + + SYNOPSIS + my_decimal2binary() + mask error processing mask + d number for conversion + bin pointer to buffer where to write result + prec overall number of decimal digits + scale number of decimal digits after decimal point + + NOTE + Before conversion we round number if it need but produce truncation + error in this case + + RETURN + E_DEC_OK + E_DEC_TRUNCATED + E_DEC_OVERFLOW +*/ + +int my_decimal2binary(uint mask, const my_decimal *d, char *bin, int prec, + int scale) +{ + int err1= E_DEC_OK, err2; + my_decimal rounded; + my_decimal2decimal(d, &rounded); + rounded.frac= decimal_actual_fraction(&rounded); + if (scale < rounded.frac) + { + err1= E_DEC_TRUNCATED; + /* decimal_round can return only E_DEC_TRUNCATED */ + decimal_round(&rounded, &rounded, scale, HALF_UP); + } + err2= decimal2bin(&rounded, bin, prec, scale); + if (!err2) + err2= err1; + return check_result(mask, err2); +} + + +/* + Convert string for decimal when string can be in some multibyte charset + + SYNOPSIS + str2my_decimal() + mask error processing mask + from string to process + length length of given string + charset charset of given string + decimal_value buffer for result storing + + RESULT + E_DEC_OK + E_DEC_TRUNCATED + E_DEC_OVERFLOW + E_DEC_BAD_NUM + E_DEC_OOM +*/ + +int str2my_decimal(uint mask, const char *from, uint length, + CHARSET_INFO *charset, my_decimal *decimal_value) +{ + char *end, *from_end; + int err; + char buff[STRING_BUFFER_USUAL_SIZE]; + String tmp(buff, sizeof(buff), &my_charset_bin); + if (charset->mbminlen > 1) + { + uint dummy_errors; + tmp.copy(from, length, charset, &my_charset_latin1, &dummy_errors); + from= tmp.ptr(); + length= tmp.length(); + charset= &my_charset_bin; + } + from_end= end= (char*) from+length; + err= string2decimal((char *)from, (decimal_t*) decimal_value, &end); + if (end != from_end && !err) + { + /* Give warning if there is something other than end space */ + for ( ; end < from_end; end++) + { + if (!my_isspace(&my_charset_latin1, *end)) + { + err= E_DEC_TRUNCATED; + break; + } + } + } + check_result_and_overflow(mask, err, decimal_value); + return err; +} + + +my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec) +{ + longlong date; + date = (ltime->year*100L + ltime->month)*100L + ltime->day; + if (ltime->time_type > MYSQL_TIMESTAMP_DATE) + date= ((date*100L + ltime->hour)*100L+ ltime->minute)*100L + ltime->second; + if (int2my_decimal(E_DEC_FATAL_ERROR, date, FALSE, dec)) + return dec; + if (ltime->second_part) + { + dec->buf[(dec->intg-1) / 9 + 1]= ltime->second_part * 1000; + dec->frac= 6; + } + return dec; +} + + +#ifndef DBUG_OFF +/* routines for debugging print */ + +/* print decimal */ +void +print_decimal(const my_decimal *dec) +{ + fprintf(DBUG_FILE, + "\nDecimal: sign: %d intg: %d frac: %d \n\ +%09d,%09d,%09d,%09d,%09d,%09d,%09d,%09d\n", + dec->sign(), dec->intg, dec->frac, + dec->buf[0], dec->buf[1], dec->buf[2], dec->buf[3], + dec->buf[4], dec->buf[5], dec->buf[6], dec->buf[7]); +} + + +/* print decimal with its binary representation */ +void +print_decimal_buff(const my_decimal *dec, const byte* ptr, int length) +{ + print_decimal(dec); + fprintf(DBUG_FILE, "Record: "); + for (int i= 0; i < length; i++) + { + fprintf(DBUG_FILE, "%02X ", (uint)((uchar *)ptr)[i]); + } + fprintf(DBUG_FILE, "\n"); +} + + +const char *dbug_decimal_as_string(char *buff, const my_decimal *val) +{ + int length= DECIMAL_MAX_STR_LENGTH; + if (!val) + return "NULL"; + (void)decimal2string((decimal_t*) val, buff, &length, 0,0,0); + return buff; +} + +#endif /*DBUG_OFF*/ + + +#endif /*MYSQL_CLIENT*/ diff --git a/sql/my_decimal.h b/sql/my_decimal.h new file mode 100644 index 00000000000..45270150d22 --- /dev/null +++ b/sql/my_decimal.h @@ -0,0 +1,391 @@ +/* Copyright (C) 2005-2006 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + It is interface module to fixed precision decimals library. + + Most functions use 'uint mask' as parameter, if during operation error + which fit in this mask is detected then it will be processed automatically + here. (errors are E_DEC_* constants, see include/decimal.h) + + Most function are just inline wrappers around library calls +*/ + +#ifndef my_decimal_h +#define my_decimal_h + +C_MODE_START +#include <decimal.h> +C_MODE_END + +#define DECIMAL_LONGLONG_DIGITS 22 +#define DECIMAL_LONG_DIGITS 10 +#define DECIMAL_LONG3_DIGITS 8 + +/* maximum length of buffer in our big digits (uint32) */ +#define DECIMAL_BUFF_LENGTH 9 +/* + maximum guaranteed precision of number in decimal digits (number of our + digits * number of decimal digits in one our big digit - number of decimal + digits in one our big digit decreased on 1 (because we always put decimal + point on the border of our big digits)) +*/ +#define DECIMAL_MAX_PRECISION ((DECIMAL_BUFF_LENGTH * 9) - 8*2) +#define DECIMAL_MAX_SCALE 30 +#define DECIMAL_NOT_SPECIFIED 31 + +/* + maximum length of string representation (number of maximum decimal + digits + 1 position for sign + 1 position for decimal point) +*/ +#define DECIMAL_MAX_STR_LENGTH (DECIMAL_MAX_PRECISION + 2) +/* + maximum size of packet length +*/ +#define DECIMAL_MAX_FIELD_SIZE DECIMAL_MAX_PRECISION + + +inline uint my_decimal_size(uint precision, uint scale) +{ + /* + Always allocate more space to allow library to put decimal point + where it want + */ + return decimal_size(precision, scale) + 1; +} + + +inline int my_decimal_int_part(uint precision, uint decimals) +{ + return precision - ((decimals == DECIMAL_NOT_SPECIFIED) ? 0 : decimals); +} + + +/* + my_decimal class limits 'decimal_t' type to what we need in MySQL + It contains internally all necessary space needed by the instance so + no extra memory is needed. One should call fix_buffer_pointer() function + when he moves my_decimal objects in memory +*/ + +class my_decimal :public decimal_t +{ + decimal_digit_t buffer[DECIMAL_BUFF_LENGTH]; + +public: + + void init() + { + len= DECIMAL_BUFF_LENGTH; + buf= buffer; +#if !defined (HAVE_purify) && !defined(DBUG_OFF) + /* Set buffer to 'random' value to find wrong buffer usage */ + for (uint i= 0; i < DECIMAL_BUFF_LENGTH; i++) + buffer[i]= i; +#endif + } + my_decimal() + { + init(); + } + void fix_buffer_pointer() { buf= buffer; } + + bool sign() const { return decimal_t::sign; } + void sign(bool s) { decimal_t::sign= s; } + uint precision() const { return intg + frac; } +}; + + +#ifndef DBUG_OFF +void print_decimal(const my_decimal *dec); +void print_decimal_buff(const my_decimal *dec, const byte* ptr, int length); +const char *dbug_decimal_as_string(char *buff, const my_decimal *val); +#else +#define dbug_decimal_as_string(A) NULL +#endif + +#ifndef MYSQL_CLIENT +int decimal_operation_results(int result); +#else +inline int decimal_operation_results(int result) +{ + return result; +} +#endif /*MYSQL_CLIENT*/ + +inline +void max_my_decimal(my_decimal *to, int precision, int frac) +{ + DBUG_ASSERT((precision <= DECIMAL_MAX_PRECISION)&& + (frac <= DECIMAL_MAX_SCALE)); + max_decimal(precision, frac, (decimal_t*) to); +} + +inline void max_internal_decimal(my_decimal *to) +{ + max_my_decimal(to, DECIMAL_MAX_PRECISION, 0); +} + +inline int check_result(uint mask, int result) +{ + if (result & mask) + decimal_operation_results(result); + return result; +} + +inline int check_result_and_overflow(uint mask, int result, my_decimal *val) +{ + if (check_result(mask, result) & E_DEC_OVERFLOW) + { + bool sign= val->sign(); + val->fix_buffer_pointer(); + max_internal_decimal(val); + val->sign(sign); + } + return result; +} + +inline uint my_decimal_length_to_precision(uint length, uint scale, + bool unsigned_flag) +{ + return (uint) (length - (scale>0 ? 1:0) - (unsigned_flag ? 0:1)); +} + +inline uint32 my_decimal_precision_to_length(uint precision, uint8 scale, + bool unsigned_flag) +{ + set_if_smaller(precision, DECIMAL_MAX_PRECISION); + return (uint32)(precision + (scale>0 ? 1:0) + (unsigned_flag ? 0:1)); +} + +inline +int my_decimal_string_length(const my_decimal *d) +{ + return decimal_string_size(d); +} + + +inline +int my_decimal_max_length(const my_decimal *d) +{ + /* -1 because we do not count \0 */ + return decimal_string_size(d) - 1; +} + + +inline +int my_decimal_get_binary_size(uint precision, uint scale) +{ + return decimal_bin_size((int)precision, (int)scale); +} + + +inline +void my_decimal2decimal(const my_decimal *from, my_decimal *to) +{ + *to= *from; + to->fix_buffer_pointer(); +} + + +int my_decimal2binary(uint mask, const my_decimal *d, char *bin, int prec, + int scale); + + +inline +int binary2my_decimal(uint mask, const char *bin, my_decimal *d, int prec, + int scale) +{ + return check_result(mask, bin2decimal((char *)bin, (decimal_t*) d, prec, + scale)); +} + + +inline +int my_decimal_set_zero(my_decimal *d) +{ + decimal_make_zero(((decimal_t*) d)); + return 0; +} + + +inline +bool my_decimal_is_zero(const my_decimal *decimal_value) +{ + return decimal_is_zero((decimal_t*) decimal_value); +} + + +inline +int my_decimal_round(uint mask, const my_decimal *from, int scale, + bool truncate, my_decimal *to) +{ + return check_result(mask, decimal_round((decimal_t*) from, to, scale, + (truncate ? TRUNCATE : HALF_UP))); +} + + +inline +int my_decimal_floor(uint mask, const my_decimal *from, my_decimal *to) +{ + return check_result(mask, decimal_round((decimal_t*) from, to, 0, FLOOR)); +} + + +inline +int my_decimal_ceiling(uint mask, const my_decimal *from, my_decimal *to) +{ + return check_result(mask, decimal_round((decimal_t*) from, to, 0, CEILING)); +} + + +#ifndef MYSQL_CLIENT +int my_decimal2string(uint mask, const my_decimal *d, uint fixed_prec, + uint fixed_dec, char filler, String *str); +#endif + +inline +int my_decimal2int(uint mask, const my_decimal *d, my_bool unsigned_flag, + longlong *l) +{ + my_decimal rounded; + /* decimal_round can return only E_DEC_TRUNCATED */ + decimal_round((decimal_t*)d, &rounded, 0, HALF_UP); + return check_result(mask, (unsigned_flag ? + decimal2ulonglong(&rounded, (ulonglong *)l) : + decimal2longlong(&rounded, l))); +} + + +inline +int my_decimal2double(uint mask, const my_decimal *d, double *result) +{ + /* No need to call check_result as this will always succeed */ + return decimal2double((decimal_t*) d, result); +} + + +inline +int str2my_decimal(uint mask, const char *str, my_decimal *d, char **end) +{ + return check_result_and_overflow(mask, string2decimal(str,(decimal_t*)d,end), + d); +} + + +int str2my_decimal(uint mask, const char *from, uint length, + CHARSET_INFO *charset, my_decimal *decimal_value); + +#if defined(MYSQL_SERVER) || defined(EMBEDDED_LIBRARY) +inline +int string2my_decimal(uint mask, const String *str, my_decimal *d) +{ + return str2my_decimal(mask, str->ptr(), str->length(), str->charset(), d); +} + + +my_decimal *date2my_decimal(TIME *ltime, my_decimal *dec); + + +#endif /*defined(MYSQL_SERVER) || defined(EMBEDDED_LIBRARY) */ + +inline +int double2my_decimal(uint mask, double val, my_decimal *d) +{ + return check_result_and_overflow(mask, double2decimal(val, (decimal_t*)d), d); +} + + +inline +int int2my_decimal(uint mask, longlong i, my_bool unsigned_flag, my_decimal *d) +{ + return check_result(mask, (unsigned_flag ? + ulonglong2decimal((ulonglong)i, d) : + longlong2decimal(i, d))); +} + + +inline +void my_decimal_neg(decimal_t *arg) +{ + if (decimal_is_zero(arg)) + { + arg->sign= 0; + return; + } + decimal_neg(arg); +} + + +inline +int my_decimal_add(uint mask, my_decimal *res, const my_decimal *a, + const my_decimal *b) +{ + return check_result_and_overflow(mask, + decimal_add((decimal_t*)a,(decimal_t*)b,res), + res); +} + + +inline +int my_decimal_sub(uint mask, my_decimal *res, const my_decimal *a, + const my_decimal *b) +{ + return check_result_and_overflow(mask, + decimal_sub((decimal_t*)a,(decimal_t*)b,res), + res); +} + + +inline +int my_decimal_mul(uint mask, my_decimal *res, const my_decimal *a, + const my_decimal *b) +{ + return check_result_and_overflow(mask, + decimal_mul((decimal_t*)a,(decimal_t*)b,res), + res); +} + + +inline +int my_decimal_div(uint mask, my_decimal *res, const my_decimal *a, + const my_decimal *b, int div_scale_inc) +{ + return check_result_and_overflow(mask, + decimal_div((decimal_t*)a,(decimal_t*)b,res, + div_scale_inc), + res); +} + + +inline +int my_decimal_mod(uint mask, my_decimal *res, const my_decimal *a, + const my_decimal *b) +{ + return check_result_and_overflow(mask, + decimal_mod((decimal_t*)a,(decimal_t*)b,res), + res); +} + + +/* Returns -1 if a<b, 1 if a>b and 0 if a==b */ +inline +int my_decimal_cmp(const my_decimal *a, const my_decimal *b) +{ + return decimal_cmp((decimal_t*) a, (decimal_t*) b); +} + +#endif /*my_decimal_h*/ + diff --git a/sql/my_lock.c b/sql/my_lock.c index 7f47256703a..cbd00521a9b 100644 --- a/sql/my_lock.c +++ b/sql/my_lock.c @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index e5ac91e1814..de567eacbeb 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -14,6 +13,18 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* + Mostly this file is used in the server. But a little part of it is used in + mysqlbinlog too (definition of SELECT_DISTINCT and others). + The consequence is that 90% of the file is wrapped in #ifndef MYSQL_CLIENT, + except the part which must be in the server and in the client. +*/ + +#ifndef MYSQL_PRIV_H_INCLUDED +#define MYSQL_PRIV_H_INCLUDED + +#ifndef MYSQL_CLIENT + #include <my_global.h> #include <mysql_version.h> #include <mysql_embed.h> @@ -25,6 +36,7 @@ #include <thr_lock.h> #include <my_base.h> /* Needed by field.h */ #include "sql_bitmap.h" +#include "sql_array.h" #ifdef __EMX__ #undef write /* remove pthread.h macro definition for EMX */ @@ -32,8 +44,26 @@ /* TODO convert all these three maps to Bitmap classes */ typedef ulonglong table_map; /* Used for table bits in join */ -typedef Bitmap<64> key_map; /* Used for finding keys */ +#if MAX_INDEXES <= 64 +typedef Bitmap<64> key_map; /* Used for finding keys */ +#else +typedef Bitmap<((MAX_INDEXES+7)/8*8)> key_map; /* Used for finding keys */ +#endif typedef ulong key_part_map; /* Used for finding key parts */ +typedef ulong nesting_map; /* Used for flags of nesting constructs */ +/* + Used to identify NESTED_JOIN structures within a join (applicable only to + structures that have not been simplified away and embed more the one + element) +*/ +typedef ulonglong nested_join_map; + +/* query_id */ +typedef ulonglong query_id_t; +extern query_id_t global_query_id; + +/* increment query_id and return it. */ +inline query_id_t next_query_id() { return global_query_id++; } /* useful constants */ extern const key_map key_map_empty; @@ -55,7 +85,7 @@ char *sql_strmake_with_convert(const char *str, uint32 arg_length, CHARSET_INFO *from_cs, uint32 max_res_length, CHARSET_INFO *to_cs, uint32 *result_length); -void kill_one_thread(THD *thd, ulong id); +void kill_one_thread(THD *thd, ulong id, bool only_kill_query); bool net_request_file(NET* net, const char* fname); char* query_table_status(THD *thd,const char *db,const char *table_name); @@ -67,10 +97,33 @@ void net_set_read_timeout(NET *net, uint timeout); #define PREV_BITS(type,A) ((type) (((type) 1 << (A)) -1)) #define all_bits_set(A,B) ((A) & (B) != (B)) +#define WARN_DEPRECATED(Thd,Ver,Old,New) \ + do { \ + DBUG_ASSERT(strncmp(Ver, MYSQL_SERVER_VERSION, sizeof(Ver)-1) > 0); \ + if (((gptr)Thd) != NULL) \ + push_warning_printf(((THD *)Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \ + ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), \ + (Old), (Ver), (New)); \ + else \ + sql_print_warning("The syntax %s is deprecated and will be removed " \ + "in MySQL %s. Please use %s instead.", (Old), (Ver), (New)); \ + } while(0) + extern CHARSET_INFO *system_charset_info, *files_charset_info ; extern CHARSET_INFO *national_charset_info, *table_alias_charset; +enum Derivation +{ + DERIVATION_IGNORABLE= 5, + DERIVATION_COERCIBLE= 4, + DERIVATION_SYSCONST= 3, + DERIVATION_IMPLICIT= 2, + DERIVATION_NONE= 1, + DERIVATION_EXPLICIT= 0 +}; + + typedef struct my_locale_st { uint number; @@ -81,6 +134,17 @@ typedef struct my_locale_st TYPELIB *ab_month_names; TYPELIB *day_names; TYPELIB *ab_day_names; +#ifdef __cplusplus + my_locale_st(uint number_par, + const char *name_par, const char *descr_par, bool is_ascii_par, + TYPELIB *month_names_par, TYPELIB *ab_month_names_par, + TYPELIB *day_names_par, TYPELIB *ab_day_names_par) : + number(number_par), + name(name_par), description(descr_par), is_ascii(is_ascii_par), + month_names(month_names_par), ab_month_names(ab_month_names_par), + day_names(day_names_par), ab_day_names(ab_day_names_par) + {} +#endif } MY_LOCALE; extern MY_LOCALE my_locale_en_US; @@ -101,8 +165,20 @@ MY_LOCALE *my_locale_by_number(uint number); #define USER_VARS_HASH_SIZE 16 #define TABLE_OPEN_CACHE_MIN 64 #define TABLE_OPEN_CACHE_DEFAULT 64 -#define STACK_MIN_SIZE 8192 // Abort if less stack during eval. -#define STACK_BUFF_ALLOC 64 // For stack overrun checks + +/* + Value of 9236 discovered through binary search 2006-09-26 on Ubuntu Dapper + Drake, libc6 2.3.6-0ubuntu2, Linux kernel 2.6.15-27-686, on x86. (Added + 100 bytes as reasonable buffer against growth and other environments' + requirements.) + + Feel free to raise this by the smallest amount you can to get the + "execution_constants" test to pass. + */ +#define STACK_MIN_SIZE 12000 // Abort if less stack during eval. + +#define STACK_MIN_SIZE_FOR_OPEN 1024*80 +#define STACK_BUFF_ALLOC 256 // For stack overrun checks #ifndef MYSQLD_NET_RETRY_COUNT #define MYSQLD_NET_RETRY_COUNT 10 // Abort read after this many int. #endif @@ -124,7 +200,7 @@ MY_LOCALE *my_locale_by_number(uint number); The following parameters is to decide when to use an extra cache to optimise seeks when reading a big table in sorted order */ -#define MIN_FILE_LENGTH_TO_USE_ROW_CACHE (16L*1024*1024) +#define MIN_FILE_LENGTH_TO_USE_ROW_CACHE (10L*1024*1024) #define MIN_ROWS_TO_USE_TABLE_CACHE 100 #define MIN_ROWS_TO_USE_BULK_INSERT 100 @@ -136,6 +212,26 @@ MY_LOCALE *my_locale_by_number(uint number); #define TIME_FOR_COMPARE 5 // 5 compares == one read /* + Number of comparisons of table rowids equivalent to reading one row from a + table. +*/ +#define TIME_FOR_COMPARE_ROWID (TIME_FOR_COMPARE*2) + +/* + For sequential disk seeks the cost formula is: + DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST * #blocks_to_skip + + The cost of average seek + DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*BLOCKS_IN_AVG_SEEK =1.0. +*/ +#define DISK_SEEK_BASE_COST ((double)0.5) + +#define BLOCKS_IN_AVG_SEEK 128 + +#define DISK_SEEK_PROP_COST ((double)0.5/BLOCKS_IN_AVG_SEEK) + + +/* Number of rows in a reference table when refereed through a not unique key. This value is only used when we don't know anything about the key distribution. @@ -160,6 +256,17 @@ MY_LOCALE *my_locale_by_number(uint number); #define FLUSH_TIME 0 /* Don't flush tables */ #define MAX_CONNECT_ERRORS 10 // errors before disabling host +#ifdef HAVE_INNOBASE_DB +#define IF_INNOBASE_DB(A, B) (A) +#else +#define IF_INNOBASE_DB(A, B) (B) +#endif +#ifdef __NETWARE__ +#define IF_NETWARE(A,B) (A) +#else +#define IF_NETWARE(A,B) (B) +#endif + #if defined(__WIN__) || defined(OS2) #define IF_WIN(A,B) (A) #undef FLUSH_TIME @@ -189,64 +296,92 @@ MY_LOCALE *my_locale_by_number(uint number); #define TEST_CORE_ON_SIGNAL 256 /* Give core if signal */ #define TEST_NO_STACKTRACE 512 #define TEST_SIGINT 1024 /* Allow sigint on threads */ -#define TEST_SYNCHRONIZATION 2048 /* get server to do sleep in some - places */ - -/* options for select set by the yacc parser (stored in lex->options) */ -#define SELECT_DISTINCT (1L << 0) -#define SELECT_STRAIGHT_JOIN (1L << 1) -#define SELECT_DESCRIBE (1L << 2) -#define SELECT_SMALL_RESULT (1L << 3) -#define SELECT_BIG_RESULT (1L << 4) -#define OPTION_FOUND_ROWS (1L << 5) -#define OPTION_TO_QUERY_CACHE (1L << 6) -#define SELECT_NO_JOIN_CACHE (1L << 7) /* Intern */ -#define OPTION_BIG_TABLES (1L << 8) /* for SQL OPTION */ -#define OPTION_BIG_SELECTS (1L << 9) /* for SQL OPTION */ -#define OPTION_LOG_OFF (1L << 10) -#define OPTION_UPDATE_LOG (1L << 11) /* update log flag */ -#define TMP_TABLE_ALL_COLUMNS (1L << 12) -#define OPTION_WARNINGS (1L << 13) -#define OPTION_AUTO_IS_NULL (1L << 14) -#define OPTION_FOUND_COMMENT (1L << 15) -#define OPTION_SAFE_UPDATES (1L << 16) -#define OPTION_BUFFER_RESULT (1L << 17) -#define OPTION_BIN_LOG (1L << 18) -#define OPTION_NOT_AUTOCOMMIT (1L << 19) -#define OPTION_BEGIN (1L << 20) -#define OPTION_TABLE_LOCK (1L << 21) -#define OPTION_QUICK (1L << 22) -#define OPTION_QUOTE_SHOW_CREATE (1L << 23) -#define OPTION_INTERNAL_SUBTRANSACTIONS (1L << 24) +#define TEST_SYNCHRONIZATION 2048 /* get server to do sleep in + some places */ +#endif + +/* + This is included in the server and in the client. + Options for select set by the yacc parser (stored in lex->options). + + XXX: + log_event.h defines OPTIONS_WRITTEN_TO_BIN_LOG to specify what THD + options list are written into binlog. These options can NOT change their + values, or it will break replication between version. + + context is encoded as following: + SELECT - SELECT_LEX_NODE::options + THD - THD::options + intern - neither. used only as + func(..., select_node->options | thd->options | OPTION_XXX, ...) + + TODO: separate three contexts above, move them to separate bitfields. +*/ + +#define SELECT_DISTINCT (ULL(1) << 0) // SELECT, user +#define SELECT_STRAIGHT_JOIN (ULL(1) << 1) // SELECT, user +#define SELECT_DESCRIBE (ULL(1) << 2) // SELECT, user +#define SELECT_SMALL_RESULT (ULL(1) << 3) // SELECT, user +#define SELECT_BIG_RESULT (ULL(1) << 4) // SELECT, user +#define OPTION_FOUND_ROWS (ULL(1) << 5) // SELECT, user +#define OPTION_TO_QUERY_CACHE (ULL(1) << 6) // SELECT, user +#define SELECT_NO_JOIN_CACHE (ULL(1) << 7) // intern +#define OPTION_BIG_TABLES (ULL(1) << 8) // THD, user +#define OPTION_BIG_SELECTS (ULL(1) << 9) // THD, user +#define OPTION_LOG_OFF (ULL(1) << 10) // THD, user +#define OPTION_UPDATE_LOG (ULL(1) << 11) // THD, user, unused +#define TMP_TABLE_ALL_COLUMNS (ULL(1) << 12) // SELECT, intern +#define OPTION_WARNINGS (ULL(1) << 13) // THD, user +#define OPTION_AUTO_IS_NULL (ULL(1) << 14) // THD, user, binlog +#define OPTION_FOUND_COMMENT (ULL(1) << 15) // SELECT, intern, parser +#define OPTION_SAFE_UPDATES (ULL(1) << 16) // THD, user +#define OPTION_BUFFER_RESULT (ULL(1) << 17) // SELECT, user +#define OPTION_BIN_LOG (ULL(1) << 18) // THD, user +#define OPTION_NOT_AUTOCOMMIT (ULL(1) << 19) // THD, user +#define OPTION_BEGIN (ULL(1) << 20) // THD, intern +#define OPTION_TABLE_LOCK (ULL(1) << 21) // THD, intern +#define OPTION_QUICK (ULL(1) << 22) // SELECT (for DELETE) +#define OPTION_QUOTE_SHOW_CREATE (ULL(1) << 23) // THD, user + +/* Thr following is used to detect a conflict with DISTINCT + in the user query has requested */ +#define SELECT_ALL (ULL(1) << 24) // SELECT, user, parser /* Set if we are updating a non-transaction safe table */ -#define OPTION_STATUS_NO_TRANS_UPDATE (1L << 25) +#define OPTION_STATUS_NO_TRANS_UPDATE (ULL(1) << 25) // THD, intern /* The following can be set when importing tables in a 'wrong order' to suppress foreign key checks */ -#define OPTION_NO_FOREIGN_KEY_CHECKS (1L << 26) +#define OPTION_NO_FOREIGN_KEY_CHECKS (ULL(1) << 26) // THD, user, binlog /* The following speeds up inserts to InnoDB tables by suppressing unique key checks in some cases */ -#define OPTION_RELAXED_UNIQUE_CHECKS (1L << 27) -#define SELECT_NO_UNLOCK (1L << 28) -/* Thr following is used to detect a conflict with DISTINCT - in the user query has requested */ -#define SELECT_ALL (1L << 29) - +#define OPTION_RELAXED_UNIQUE_CHECKS (ULL(1) << 27) // THD, user, binlog +#define SELECT_NO_UNLOCK (ULL(1) << 28) // SELECT, intern +#define OPTION_SCHEMA_TABLE (ULL(1) << 29) // SELECT, intern +/* Flag set if setup_tables already done */ +#define OPTION_SETUP_TABLES_DONE (ULL(1) << 30) // intern +/* If not set then the thread will ignore all warnings with level notes. */ +#define OPTION_SQL_NOTES (ULL(1) << 31) // THD, user /* Force the used temporary table to be a MyISAM table (because we will use fulltext functions when reading from it. */ -#define TMP_TABLE_FORCE_MYISAM (1L << 30) +#define TMP_TABLE_FORCE_MYISAM (ULL(1) << 32) + + +/* + Maximum length of time zone name that we support + (Time zone name is char(64) in db). mysqlbinlog needs it. +*/ +#define MAX_TIME_ZONE_NAME_LENGTH 72 -/* If set to 0, then the thread will ignore all warnings with level notes. - Set by executing SET SQL_NOTES=1 */ -#define OPTION_SQL_NOTES (1L << 31) +/* The rest of the file is included in the server only */ +#ifndef MYSQL_CLIENT /* Bits for different SQL modes modes (including ANSI mode) */ -#define MODE_REAL_AS_FLOAT 1 -#define MODE_PIPES_AS_CONCAT 2 -#define MODE_ANSI_QUOTES 4 +#define MODE_REAL_AS_FLOAT 1 +#define MODE_PIPES_AS_CONCAT 2 +#define MODE_ANSI_QUOTES 4 #define MODE_IGNORE_SPACE 8 #define MODE_NOT_USED 16 #define MODE_ONLY_FULL_GROUP_BY 32 @@ -258,12 +393,34 @@ MY_LOCALE *my_locale_by_number(uint number); #define MODE_DB2 2048 #define MODE_MAXDB 4096 #define MODE_NO_KEY_OPTIONS 8192 -#define MODE_NO_TABLE_OPTIONS 16384 -#define MODE_NO_FIELD_OPTIONS 32768 -#define MODE_MYSQL323 65536 -#define MODE_MYSQL40 (MODE_MYSQL323*2) -#define MODE_ANSI (MODE_MYSQL40*2) -#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2) +#define MODE_NO_TABLE_OPTIONS 16384 +#define MODE_NO_FIELD_OPTIONS 32768 +#define MODE_MYSQL323 65536 +#define MODE_MYSQL40 (MODE_MYSQL323*2) +#define MODE_ANSI (MODE_MYSQL40*2) +#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2) +#define MODE_NO_BACKSLASH_ESCAPES (MODE_NO_AUTO_VALUE_ON_ZERO*2) +#define MODE_STRICT_TRANS_TABLES (MODE_NO_BACKSLASH_ESCAPES*2) +#define MODE_STRICT_ALL_TABLES (MODE_STRICT_TRANS_TABLES*2) +#define MODE_NO_ZERO_IN_DATE (MODE_STRICT_ALL_TABLES*2) +#define MODE_NO_ZERO_DATE (MODE_NO_ZERO_IN_DATE*2) +#define MODE_INVALID_DATES (MODE_NO_ZERO_DATE*2) +#define MODE_ERROR_FOR_DIVISION_BY_ZERO (MODE_INVALID_DATES*2) +#define MODE_TRADITIONAL (MODE_ERROR_FOR_DIVISION_BY_ZERO*2) +#define MODE_NO_AUTO_CREATE_USER (MODE_TRADITIONAL*2) +#define MODE_HIGH_NOT_PRECEDENCE (MODE_NO_AUTO_CREATE_USER*2) +#define MODE_NO_ENGINE_SUBSTITUTION (MODE_HIGH_NOT_PRECEDENCE*2) +/* + Replication uses 8 bytes to store SQL_MODE in the binary log. The day you + use strictly more than 64 bits by adding one more define above, you should + contact the replication team because the replication code should then be + updated (to store more bytes on disk). + + NOTE: When adding new SQL_MODE types, make sure to also add them to + the scripts used for creating the MySQL system tables + in scripts/mysql_system_tables.sql and scripts/mysql_system_tables_fix.sql + +*/ #define RAID_BLOCK_SIZE 1024 @@ -277,7 +434,11 @@ MY_LOCALE *my_locale_by_number(uint number); #define UNCACHEABLE_EXPLAIN 8 /* Don't evaluate subqueries in prepare even if they're not correlated */ #define UNCACHEABLE_PREPARE 16 +/* For uncorrelated SELECT in an UNION with some correlated SELECTs */ +#define UNCACHEABLE_UNITED 32 +/* Used to check GROUP BY list in the MODE_ONLY_FULL_GROUP_BY mode */ +#define UNDEF_POS (-1) #ifdef EXTRA_DEBUG /* Sync points allow us to force the server to reach a certain line of code @@ -301,6 +462,10 @@ void debug_sync_point(const char* lock_name, uint lock_timeout); #define SHOW_LOG_STATUS_FREE "FREE" #define SHOW_LOG_STATUS_INUSE "IN USE" +struct st_table_list; +class String; +void view_store_options(THD *thd, st_table_list *table, String *buff); + /* Options to add_table_to_list() */ #define TL_OPTION_UPDATING 1 #define TL_OPTION_FORCE_INDEX 2 @@ -319,17 +484,19 @@ void debug_sync_point(const char* lock_name, uint lock_timeout); #define WEEK_YEAR 2 #define WEEK_FIRST_WEEKDAY 4 +#define STRING_BUFFER_USUAL_SIZE 80 + enum enum_parsing_place { NO_MATTER, IN_HAVING, SELECT_LIST, - IN_WHERE + IN_WHERE, + IN_ON }; struct st_table; class THD; -class Item_arena; /* Struct to handle simple linked lists */ @@ -338,6 +505,7 @@ typedef struct st_sql_list { byte *first; byte **next; + st_sql_list() {} /* Remove gcc warning */ inline void empty() { elements=0; @@ -362,11 +530,18 @@ typedef struct st_sql_list { first= save->first; elements+= save->elements; } + inline void push_back(struct st_sql_list *save) + { + if (save->first) + { + *next= save->first; + next= save->next; + elements+= save->elements; + } + } } SQL_LIST; -uint nr_of_decimals(const char *str); /* Neaded by sql_string.h */ - extern pthread_key(THD*, THR_THD); inline THD *_current_thd(void) { @@ -374,39 +549,80 @@ inline THD *_current_thd(void) } #define current_thd _current_thd() +/* + External variables +*/ +extern ulong server_id, concurrency; + + +typedef my_bool (*qc_engine_callback)(THD *thd, char *table_key, + uint key_length, + ulonglong *engine_data); #include "sql_string.h" #include "sql_list.h" #include "sql_map.h" +#include "my_decimal.h" #include "handler.h" +#include "parse_file.h" #include "table.h" +#include "sql_error.h" #include "field.h" /* Field definitions */ #include "protocol.h" #include "sql_udf.h" class user_var_entry; +class Security_context; enum enum_var_type { - OPT_DEFAULT, OPT_SESSION, OPT_GLOBAL + OPT_DEFAULT= 0, OPT_SESSION, OPT_GLOBAL }; class sys_var; class Comp_creator; typedef Comp_creator* (*chooser_compare_func_creator)(bool invert); #include "item.h" +extern my_decimal decimal_zero; + /* sql_parse.cc */ void free_items(Item *item); void cleanup_items(Item *item); class THD; void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0); -int check_one_table_access(THD *thd, ulong privilege, +bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables); +bool check_single_table_access(THD *thd, ulong privilege, + TABLE_LIST *tables); +bool check_routine_access(THD *thd,ulong want_access,char *db,char *name, + bool is_proc, bool no_errors); +bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table); bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list); -int multi_update_precheck(THD *thd, TABLE_LIST *tables); -int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count); -int update_precheck(THD *thd, TABLE_LIST *tables); -int delete_precheck(THD *thd, TABLE_LIST *tables); -int insert_precheck(THD *thd, TABLE_LIST *tables); -int create_table_precheck(THD *thd, TABLE_LIST *tables, - TABLE_LIST *create_table); +bool check_some_routine_access(THD *thd, const char *db, const char *name, bool is_proc); +bool multi_update_precheck(THD *thd, TABLE_LIST *tables); +bool multi_delete_precheck(THD *thd, TABLE_LIST *tables); +bool mysql_multi_update_prepare(THD *thd); +bool mysql_multi_delete_prepare(THD *thd); +bool mysql_insert_select_prepare(THD *thd); +bool update_precheck(THD *thd, TABLE_LIST *tables); +bool delete_precheck(THD *thd, TABLE_LIST *tables); +bool insert_precheck(THD *thd, TABLE_LIST *tables); +bool create_table_precheck(THD *thd, TABLE_LIST *tables, + TABLE_LIST *create_table); +int append_query_string(CHARSET_INFO *csinfo, + String const *from, String *to); + +void get_default_definer(THD *thd, LEX_USER *definer); +LEX_USER *create_default_definer(THD *thd); +LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name); +LEX_USER *get_current_user(THD *thd, LEX_USER *user); +bool check_string_length(LEX_STRING *str, + const char *err_msg, uint max_length); + +enum enum_mysql_completiontype { + ROLLBACK_RELEASE=-2, ROLLBACK=1, ROLLBACK_AND_CHAIN=7, + COMMIT_RELEASE=-1, COMMIT=0, COMMIT_AND_CHAIN=6 +}; + +int end_trans(THD *thd, enum enum_mysql_completiontype completion); + Item *negate_expression(THD *thd, Item *expr); #include "sql_class.h" #include "sql_acl.h" @@ -418,6 +634,8 @@ struct Query_cache_query_flags { unsigned int client_long_flag:1; unsigned int client_protocol_41:1; + unsigned int more_results_exists:1; + unsigned int pkt_nr; uint character_set_client_num; uint character_set_results_num; uint collation_connection_num; @@ -460,45 +678,50 @@ struct Query_cache_query_flags #define query_cache_invalidate_by_MyISAM_filename_ref NULL #endif /*HAVE_QUERY_CACHE*/ -int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); -int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); -int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); +bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); +bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); +bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, ushort flags); -int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, - my_bool drop_temporary); +bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, + my_bool drop_temporary); int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool log_query); + bool drop_temporary, bool drop_view, bool log_query); int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables, bool if_exists, bool drop_temporary, bool log_query); int quick_rm_table(enum db_type base,const char *db, const char *table_name); +void close_cached_table(THD *thd, TABLE *table); bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list); -bool mysql_change_db(THD *thd,const char *name); +bool do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, + char *new_table_name, char *new_table_alias, + bool skip_error); +bool mysql_change_db(THD *thd,const char *name,bool no_access_check); void mysql_parse(THD *thd,char *inBuf,uint length); bool mysql_test_parse_for_slave(THD *thd,char *inBuf,uint length); bool is_update_query(enum enum_sql_command command); -bool alloc_query(THD *thd, char *packet, ulong packet_length); +bool alloc_query(THD *thd, const char *packet, uint packet_length); void mysql_init_select(LEX *lex); -void mysql_init_query(THD *thd, uchar *buf, uint length); void mysql_reset_thd_for_next_command(THD *thd); +void mysql_init_query(THD *thd, uchar *buf, uint length); bool mysql_new_select(LEX *lex, bool move_down); void create_select_for_variable(const char *var_name); void mysql_init_multi_delete(LEX *lex); -void fix_multi_delete_lex(LEX* lex); +bool multi_delete_set_locks_and_link_aux_tables(LEX *lex); void init_max_user_conn(void); void init_update_queries(void); void free_max_user_conn(void); -extern "C" pthread_handler_decl(handle_one_connection,arg); -extern "C" pthread_handler_decl(handle_bootstrap,arg); +pthread_handler_t handle_one_connection(void *arg); +pthread_handler_t handle_bootstrap(void *arg); void end_thread(THD *thd,bool put_in_cache); void flush_thread_cache(); -void mysql_execute_command(THD *thd); +bool mysql_execute_command(THD *thd); bool do_command(THD *thd); bool dispatch_command(enum enum_server_command command, THD *thd, char* packet, uint packet_length); void log_slow_statement(THD *thd); bool check_dup(const char *db, const char *name, TABLE_LIST *tables); +bool compare_record(TABLE *table, query_id_t query_id); bool table_cache_init(void); void table_cache_free(void); @@ -508,124 +731,133 @@ void close_connection(THD *thd, uint errcode, bool lock); bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, bool *write_to_binlog); bool check_access(THD *thd, ulong access, const char *db, ulong *save_priv, - bool no_grant, bool no_errors); + bool no_grant, bool no_errors, bool schema_db); bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables, bool no_errors); bool check_global_access(THD *thd, ulong want_access); -int mysql_backup_table(THD* thd, TABLE_LIST* table_list); -int mysql_restore_table(THD* thd, TABLE_LIST* table_list); - -int mysql_checksum_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_check_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_repair_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_analyze_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_optimize_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list, - LEX_STRING *key_cache_name); -int mysql_preload_keys(THD* thd, TABLE_LIST* table_list); +bool mysql_backup_table(THD* thd, TABLE_LIST* table_list); +bool mysql_restore_table(THD* thd, TABLE_LIST* table_list); + +bool mysql_checksum_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_check_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_repair_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_analyze_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_optimize_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list, + LEX_STRING *key_cache_name); +bool mysql_preload_keys(THD* thd, TABLE_LIST* table_list); int reassign_keycache_tables(THD* thd, KEY_CACHE *src_cache, KEY_CACHE *dst_cache); +TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list); + +bool mysql_xa_recover(THD *thd); bool check_simple_select(); -SORT_FIELD * make_unireg_sortorder(ORDER *order, uint *length); +SORT_FIELD * make_unireg_sortorder(ORDER *order, uint *length, + SORT_FIELD *sortorder); int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, List<Item> &fields, List <Item> &all_fields, ORDER *order); int setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, List<Item> &fields, List<Item> &all_fields, ORDER *order, bool *hidden_group_fields); - -int handle_select(THD *thd, LEX *lex, select_result *result); -int mysql_select(THD *thd, Item ***rref_pointer_array, - TABLE_LIST *tables, uint wild_num, List<Item> &list, - COND *conds, uint og_num, ORDER *order, ORDER *group, - Item *having, ORDER *proc_param, ulong select_type, - select_result *result, SELECT_LEX_UNIT *unit, - SELECT_LEX *select_lex); +bool fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, + Item **ref_pointer_array); + +bool handle_select(THD *thd, LEX *lex, select_result *result, + ulong setup_tables_done_option); +bool mysql_select(THD *thd, Item ***rref_pointer_array, + TABLE_LIST *tables, uint wild_num, List<Item> &list, + COND *conds, uint og_num, ORDER *order, ORDER *group, + Item *having, ORDER *proc_param, ulonglong select_type, + select_result *result, SELECT_LEX_UNIT *unit, + SELECT_LEX *select_lex); void free_underlaid_joins(THD *thd, SELECT_LEX *select); -int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, - select_result *result); +bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, + select_result *result); int mysql_explain_select(THD *thd, SELECT_LEX *sl, char const *type, select_result *result); -int mysql_union(THD *thd, LEX *lex, select_result *result, - SELECT_LEX_UNIT *unit); -int mysql_handle_derived(LEX *lex); +bool mysql_union(THD *thd, LEX *lex, select_result *result, + SELECT_LEX_UNIT *unit, ulong setup_tables_done_option); +bool mysql_handle_derived(LEX *lex, bool (*processor)(THD *thd, + LEX *lex, + TABLE_LIST *table)); +bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *t); +bool mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *t); Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, - Item ***copy_func, Field **from_field, - bool group, bool modify_item, uint convert_blob_length, - bool make_copy_field); -int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, - List<create_field> &fields, - List<Key> &keys, uint &db_options, - handler *file, KEY *&key_info_buffer, - uint &key_count, int select_field_count); -int mysql_create_table(THD *thd,const char *db, const char *table_name, - HA_CREATE_INFO *create_info, + Item ***copy_func, Field **from_field, + Field **def_field, + bool group, bool modify_item, + bool table_cant_handle_bit_fields, + bool make_copy_field, + uint convert_blob_length); +void sp_prepare_create_field(THD *thd, create_field *sql_field); +int prepare_create_field(create_field *sql_field, + uint *blob_columns, + int *timestamps, int *timestamps_with_niladic, + uint table_flags); +bool mysql_create_table(THD *thd,const char *db, const char *table_name, + HA_CREATE_INFO *create_info, + Alter_info *alter_info, + bool tmp_table, uint select_field_count); + +bool mysql_alter_table(THD *thd, char *new_db, char *new_name, + HA_CREATE_INFO *create_info, + TABLE_LIST *table_list, Alter_info *alter_info, - bool tmp_table, uint select_field_count); - -TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, - const char *db, const char *name, - Alter_info *alter_info, - List<Item> *items, - MYSQL_LOCK **lock); -int mysql_alter_table(THD *thd, char *new_db, char *new_name, - HA_CREATE_INFO *create_info, - TABLE_LIST *table_list, - Alter_info *alter_info, - uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, - bool ignore); -int mysql_recreate_table(THD *thd, TABLE_LIST *table_list); -int mysql_create_like_table(THD *thd, TABLE_LIST *table, - HA_CREATE_INFO *create_info, - Table_ident *src_table); + uint order_num, ORDER *order, bool ignore); +bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list); +bool mysql_create_like_table(THD *thd, TABLE_LIST *table, + HA_CREATE_INFO *create_info, + Table_ident *src_table); bool mysql_rename_table(enum db_type base, const char *old_db, const char * old_name, const char *new_db, const char * new_name); -int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *update_table_list, - Item **conds, uint order_num, ORDER *order); +bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, + Item **conds, uint order_num, ORDER *order); int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields, List<Item> &values,COND *conds, - uint order_num, ORDER *order, ha_rows limit, + uint order_num, ORDER *order, ha_rows limit, enum enum_duplicates handle_duplicates, bool ignore); -int mysql_multi_update(THD *thd, TABLE_LIST *table_list, - List<Item> *fields, List<Item> *values, - COND *conds, ulong options, - enum enum_duplicates handle_duplicates, bool ignore, - SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); -int mysql_multi_update_lock(THD *thd, - TABLE_LIST *table_list, - List<Item> *fields, - SELECT_LEX *select_lex); -int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *insert_table_list, - TABLE_LIST *dup_table_list, TABLE *table, - List<Item> &fields, List_item *values, - List<Item> &update_fields, - List<Item> &update_values, enum_duplicates duplic); -int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, - List<List_item> &values, List<Item> &update_fields, - List<Item> &update_values, enum_duplicates flag, bool ignore); -int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); -int mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order, - ha_rows rows, ulong options); -int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok); +bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, + List<Item> *fields, List<Item> *values, + COND *conds, ulonglong options, + enum enum_duplicates handle_duplicates, bool ignore, + SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); +bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, + List<Item> &fields, List_item *values, + List<Item> &update_fields, + List<Item> &update_values, enum_duplicates duplic, + COND **where, bool select_insert, + bool check_fields, bool abort_on_warning); +bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, + List<List_item> &values, List<Item> &update_fields, + List<Item> &update_values, enum_duplicates flag, + bool ignore); +int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, + TABLE_LIST *table_list); +void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table, + enum_duplicates duplic); +bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); +bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, + SQL_LIST *order, ha_rows rows, ulonglong options, + bool reset_auto_increment); +bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok); +bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create); TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update); -TABLE *open_table(THD *thd,const char *db,const char *table,const char *alias, - bool *refresh); -TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table); +TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT* mem, + bool *refresh, uint flags); +bool reopen_name_locked_table(THD* thd, TABLE_LIST* table); TABLE *find_locked_table(THD *thd, const char *db,const char *table_name); -bool reopen_table(TABLE *table,bool locked=0); +bool reopen_table(TABLE *table,bool locked); bool reopen_tables(THD *thd,bool get_locks,bool in_refresh); void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, bool send_refresh); @@ -636,12 +868,29 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name); void abort_locked_tables(THD *thd,const char *db, const char *table_name); void execute_init_command(THD *thd, sys_var_str *init_command_var, rw_lock_t *var_mutex); -extern const Field *not_found_field; -Field *find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, - TABLE_LIST **where, bool report_error); -Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grant,bool allow_rowid, - uint *cached_field_index_ptr); +extern Field *not_found_field; +extern Field *view_ref_found; + +enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, + IGNORE_ERRORS, REPORT_EXCEPT_NON_UNIQUE, + IGNORE_EXCEPT_NON_UNIQUE}; +Field * +find_field_in_tables(THD *thd, Item_ident *item, + TABLE_LIST *first_table, TABLE_LIST *last_table, + Item **ref, find_item_error_report_type report_error, + bool check_privileges, bool register_tree_change); +Field * +find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, + const char *name, uint length, + const char *item_name, const char *db_name, + const char *table_name, Item **ref, + bool check_privileges, bool allow_rowid, + uint *cached_field_index_ptr, + bool register_tree_change, TABLE_LIST **actual_table); +Field * +find_field_in_table(THD *thd, TABLE *table, const char *name, uint length, + bool allow_rowid, uint *cached_field_index_ptr); + #ifdef HAVE_OPENSSL #include <openssl/des.h> struct st_des_keyblock @@ -660,66 +909,78 @@ bool load_des_key_file(const char *file_name); #endif /* HAVE_OPENSSL */ /* sql_do.cc */ -int mysql_do(THD *thd, List<Item> &values); +bool mysql_do(THD *thd, List<Item> &values); + +/* sql_analyse.h */ +bool append_escaped(String *to_str, String *from_str); /* sql_show.cc */ -int mysqld_show_dbs(THD *thd,const char *wild); -int mysqld_show_open_tables(THD *thd,const char *wild); -int mysqld_show_tables(THD *thd,const char *db,const char *wild); -int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild); -int mysqld_show_fields(THD *thd,TABLE_LIST *table, const char *wild, - bool verbose); -int mysqld_show_keys(THD *thd, TABLE_LIST *table); -int mysqld_show_logs(THD *thd); +bool mysqld_show_open_tables(THD *thd,const char *wild); +bool mysqld_show_logs(THD *thd); void append_identifier(THD *thd, String *packet, const char *name, uint length); int get_quote_char_for_identifier(THD *thd, const char *name, uint length); void mysqld_list_fields(THD *thd,TABLE_LIST *table, const char *wild); -int mysqld_dump_create_info(THD *thd, TABLE *table, int fd = -1); -int mysqld_show_create(THD *thd, TABLE_LIST *table_list); -int mysqld_show_create_db(THD *thd, char *dbname, - const HA_CREATE_INFO *create); +int mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd); +bool mysqld_show_create(THD *thd, TABLE_LIST *table_list); +bool mysqld_show_create_db(THD *thd, char *dbname, + const HA_CREATE_INFO *create); void mysqld_list_processes(THD *thd,const char *user,bool verbose); int mysqld_show_status(THD *thd); int mysqld_show_variables(THD *thd,const char *wild); -int mysqld_show(THD *thd, const char *wild, show_var_st *variables, - enum enum_var_type value_type, - pthread_mutex_t *mutex); -int mysql_find_files(THD *thd,List<char> *files, const char *db, - const char *path, const char *wild, bool dir); -int mysqld_show_charsets(THD *thd,const char *wild); -int mysqld_show_collations(THD *thd,const char *wild); -int mysqld_show_storage_engines(THD *thd); -int mysqld_show_privileges(THD *thd); -int mysqld_show_column_types(THD *thd); -int mysqld_help (THD *thd, const char *text); +bool mysqld_show_storage_engines(THD *thd); +bool mysqld_show_privileges(THD *thd); +bool mysqld_show_column_types(THD *thd); +bool mysqld_help (THD *thd, const char *text); +void calc_sum_of_all_status(STATUS_VAR *to); + +void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user, + const LEX_STRING *definer_host); + + +/* information schema */ +extern LEX_STRING information_schema_name; +LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, + const char* str, uint length, + bool allocate_lex_string); +ST_SCHEMA_TABLE *find_schema_table(THD *thd, const char* table_name); +ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx); +int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, + enum enum_schema_tables schema_table_idx); +int make_schema_select(THD *thd, SELECT_LEX *sel, + enum enum_schema_tables schema_table_idx); +int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list); +int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +bool get_schema_tables_result(JOIN *join, + enum enum_schema_table_state executed_place); +#define is_schema_db(X) \ + !my_strcasecmp(system_charset_info, information_schema_name.str, (X)) /* sql_prepare.cc */ -int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, - LEX_STRING *name=NULL); + +void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length); void mysql_stmt_execute(THD *thd, char *packet, uint packet_length); -void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name); -void mysql_stmt_free(THD *thd, char *packet); +void mysql_stmt_close(THD *thd, char *packet); +void mysql_sql_stmt_prepare(THD *thd); +void mysql_sql_stmt_execute(THD *thd); +void mysql_sql_stmt_close(THD *thd); +void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length); void mysql_stmt_reset(THD *thd, char *packet); void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length); - -/* sql_error.cc */ -MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, uint code, - const char *msg); -void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, - uint code, const char *format, ...) - ATTRIBUTE_FORMAT(printf,4,5); -void mysql_reset_errors(THD *thd); -my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show); +void reinit_stmt_before_use(THD *thd, LEX *lex); /* sql_handler.cc */ -int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen= 0); -int mysql_ha_close(THD *thd, TABLE_LIST *tables); -int mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes,char *, - List<Item> *,enum ha_rkey_function,Item *,ha_rows,ha_rows); +bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen); +bool mysql_ha_close(THD *thd, TABLE_LIST *tables); +bool mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes,char *, + List<Item> *,enum ha_rkey_function,Item *,ha_rows,ha_rows); int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, bool is_locked); +void mysql_ha_mark_tables_for_reopen(THD *thd, TABLE *table); /* mysql_ha_flush mode_flags bits */ #define MYSQL_HA_CLOSE_FINAL 0x00 #define MYSQL_HA_REOPEN_ON_USAGE 0x01 @@ -736,42 +997,97 @@ bool add_field_to_list(THD *thd, char *field_name, enum enum_field_types type, char *change, List<String> *interval_list, CHARSET_INFO *cs, uint uint_geom_type); +create_field * new_create_field(THD *thd, char *field_name, enum_field_types type, + char *length, char *decimals, + uint type_modifier, + Item *default_value, Item *on_update_value, + LEX_STRING *comment, char *change, + List<String> *interval_list, CHARSET_INFO *cs, + uint uint_geom_type); void store_position_for_column(const char *name); -bool add_to_list(THD *thd, SQL_LIST &list,Item *group,bool asc=0); +bool add_to_list(THD *thd, SQL_LIST &list,Item *group,bool asc); +bool push_new_name_resolution_context(THD *thd, + TABLE_LIST *left_op, + TABLE_LIST *right_op); void add_join_on(TABLE_LIST *b,Item *expr); -void add_join_natural(TABLE_LIST *a,TABLE_LIST *b); +void add_join_natural(TABLE_LIST *a,TABLE_LIST *b,List<String> *using_fields, + SELECT_LEX *lex); bool add_proc_to_list(THD *thd, Item *item); TABLE *unlink_open_table(THD *thd,TABLE *list,TABLE *find); +void update_non_unique_table_error(TABLE_LIST *update, + const char *operation, + TABLE_LIST *duplicate); SQL_SELECT *make_select(TABLE *head, table_map const_tables, - table_map read_tables, COND *conds, int *error); -enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, - IGNORE_ERRORS}; -extern const Item **not_found_item; + table_map read_tables, COND *conds, + bool allow_null_cond, int *error); +extern Item **not_found_item; + +/* + This enumeration type is used only by the function find_item_in_list + to return the info on how an item has been resolved against a list + of possibly aliased items. + The item can be resolved: + - against an alias name of the list's element (RESOLVED_AGAINST_ALIAS) + - against non-aliased field name of the list (RESOLVED_WITH_NO_ALIAS) + - against an aliased field name of the list (RESOLVED_BEHIND_ALIAS) + - ignoring the alias name in cases when SQL requires to ignore aliases + (e.g. when the resolved field reference contains a table name or + when the resolved item is an expression) (RESOLVED_IGNORING_ALIAS) +*/ +enum enum_resolution_type { + NOT_RESOLVED=0, + RESOLVED_IGNORING_ALIAS, + RESOLVED_BEHIND_ALIAS, + RESOLVED_WITH_NO_ALIAS, + RESOLVED_AGAINST_ALIAS +}; Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter, find_item_error_report_type report_error, - bool *unaliased); + enum_resolution_type *resolution); bool get_key_map_from_key_list(key_map *map, TABLE *table, List<String> *index_list); -bool insert_fields(THD *thd,TABLE_LIST *tables, +bool insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, const char *table_name, - List_iterator<Item> *it); -bool setup_tables(TABLE_LIST *tables); + List_iterator<Item> *it, bool any_privileges); +bool setup_tables(THD *thd, Name_resolution_context *context, + List<TABLE_LIST> *from_clause, TABLE_LIST *tables, + Item **conds, TABLE_LIST **leaves, bool select_insert); +bool setup_tables_and_check_access (THD *thd, + Name_resolution_context *context, + List<TABLE_LIST> *from_clause, + TABLE_LIST *tables, Item **conds, + TABLE_LIST **leaves, + bool select_insert, + ulong want_access_first, + ulong want_access); int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, List<Item> *sum_func_list, uint wild_num); -int setup_fields(THD *thd, Item** ref_pointer_array, TABLE_LIST *tables, - List<Item> &item, bool set_query_id, - List<Item> *sum_func_list, bool allow_sum_func); -int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds); +bool setup_fields(THD *thd, Item** ref_pointer_array, + List<Item> &item, bool set_query_id, + List<Item> *sum_func_list, bool allow_sum_func); +inline bool setup_fields_with_no_wrap(THD *thd, Item **ref_pointer_array, + List<Item> &item, bool set_query_id, + List<Item> *sum_func_list, + bool allow_sum_func) +{ + bool res; + thd->lex->select_lex.no_wrap_view_item= TRUE; + res= setup_fields(thd, ref_pointer_array, item, set_query_id, sum_func_list, + allow_sum_func); + thd->lex->select_lex.no_wrap_view_item= FALSE; + return res; +} +int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, + COND **conds); int setup_ftfuncs(SELECT_LEX* select); int init_ftfuncs(THD *thd, SELECT_LEX* select, bool no_order); void wait_for_refresh(THD *thd); -int open_tables(THD *thd, TABLE_LIST *tables, uint *counter); +int open_tables(THD *thd, TABLE_LIST **tables, uint *counter, uint flags); int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables); -int open_and_lock_tables(THD *thd,TABLE_LIST *tables); -int open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables); -void relink_tables_for_derived(THD *thd); -int lock_tables(THD *thd, TABLE_LIST *tables, uint counter); +bool open_and_lock_tables(THD *thd,TABLE_LIST *tables); +bool open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables, uint flags); +int lock_tables(THD *thd, TABLE_LIST *tables, uint counter, bool *need_reopen); TABLE *open_temporary_table(THD *thd, const char *path, const char *db, const char *table_name, bool link_in_list); bool rm_temporary_table(enum db_type base, char *path); @@ -779,18 +1095,21 @@ void free_io_cache(TABLE *entry); void intern_close_table(TABLE *entry); bool close_thread_table(THD *thd, TABLE **table_ptr); void close_temporary_tables(THD *thd); -TABLE_LIST * find_table_in_list(TABLE_LIST *table, - const char *db_name, const char *table_name); -TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, - const char *db_name, - const char *table_name); +void close_tables_for_reopen(THD *thd, TABLE_LIST **tables); +TABLE_LIST *find_table_in_list(TABLE_LIST *table, + st_table_list *TABLE_LIST::*link, + const char *db_name, + const char *table_name); +TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, + bool check_alias); TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name); bool close_temporary_table(THD *thd, const char *db, const char *table_name); -void close_temporary(TABLE *table, bool delete_table=1); +void close_temporary(TABLE *table, bool delete_table); bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db, const char *table_name); void remove_db_from_cache(const char *db); void flush_tables(); +bool is_equal(const LEX_STRING *a, const LEX_STRING *b); /* bits for last argument to remove_table_from_cache() */ #define RTFC_NO_FLAG 0x0000 @@ -802,19 +1121,47 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table, bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables); void copy_field_from_tmp_record(Field *field,int offset); -int fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors); -int fill_record(Field **field,List<Item> &values, bool ignore_errors); -OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild); +bool fill_record(THD *thd, Field **field, List<Item> &values, + bool ignore_errors); +bool fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields, + List<Item> &values, + bool ignore_errors, + Table_triggers_list *triggers, + enum trg_event_type event); +bool fill_record_n_invoke_before_triggers(THD *thd, Field **field, + List<Item> &values, + bool ignore_errors, + Table_triggers_list *triggers, + enum trg_event_type event); +OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild); + +inline TABLE_LIST *find_table_in_global_list(TABLE_LIST *table, + const char *db_name, + const char *table_name) +{ + return find_table_in_list(table, &TABLE_LIST::next_global, + db_name, table_name); +} + +inline TABLE_LIST *find_table_in_local_list(TABLE_LIST *table, + const char *db_name, + const char *table_name) +{ + return find_table_in_list(table, &TABLE_LIST::next_local, + db_name, table_name); +} + /* sql_calc.cc */ bool eval_const_cond(COND *cond); /* sql_load.cc */ -int mysql_load(THD *thd,sql_exchange *ex, TABLE_LIST *table_list, - List<Item> &fields, enum enum_duplicates handle_duplicates, - bool ignore, - bool local_file,thr_lock_type lock_type); -int write_record(TABLE *table,COPY_INFO *info); +bool mysql_load(THD *thd, sql_exchange *ex, TABLE_LIST *table_list, + List<Item> &fields_vars, List<Item> &set_fields, + List<Item> &set_values_list, + enum enum_duplicates handle_duplicates, bool ignore, + bool local_file); +int write_record(THD *thd, TABLE *table, COPY_INFO *info); /* sql_manager.cc */ /* bits set in manager_status */ @@ -822,22 +1169,25 @@ int write_record(TABLE *table,COPY_INFO *info); extern ulong volatile manager_status; extern bool volatile manager_thread_in_use, mqh_used; extern pthread_t manager_thread; -extern "C" pthread_handler_decl(handle_manager, arg); +pthread_handler_t handle_manager(void *arg); /* sql_test.cc */ #ifndef DBUG_OFF void print_where(COND *cond,const char *info); void print_cached_tables(void); void TEST_filesort(SORT_FIELD *sortorder,uint s_length); +void print_plan(JOIN* join,uint idx, double record_count, double read_time, + double current_read_time, const char *info); #endif -void mysql_print_status(THD *thd); +void mysql_print_status(); /* key.cc */ int find_ref_key(TABLE *form,Field *field, uint *offset); -void key_copy(byte *key,TABLE *form,uint index,uint key_length); -void key_restore(TABLE *form,byte *key,uint index,uint key_length); +void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length); +void key_restore(byte *to_record, byte *from_key, KEY *key_info, + uint key_length); bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length); void key_unpack(String *to,TABLE *form,uint index); -bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields); +bool is_key_used(TABLE *table, uint idx, List<Item> &fields); int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length); bool init_errmessage(void); @@ -852,14 +1202,12 @@ void sql_print_information(const char *format, ...) bool fn_format_relative_to_data_home(my_string to, const char *name, const char *dir, const char *extension); -bool open_log(MYSQL_LOG *log, const char *hostname, - const char *opt_name, const char *extension, - const char *index_file_name, - enum_log_type type, bool read_append, - bool no_auto_events, ulong max_size); +File open_binlog(IO_CACHE *log, const char *log_file_name, + const char **errmsg); /* mysqld.cc */ -extern void yyerror(const char*); +extern void MYSQLerror(const char*); +void refresh_status(THD *thd); my_bool mysql_rm_tmp_tables(void); /* item_func.cc */ @@ -874,11 +1222,14 @@ void unhex_type2(TYPELIB *lib); uint check_word(TYPELIB *lib, const char *val, const char *end, const char **end_of_word); -bool is_keyword(const char *name, uint len); +bool is_keyword(const char *name, uint len); #define MY_DB_OPT_FILE "db.opt" +bool check_db_dir_existence(const char *db_name); bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create); +bool load_db_opt_by_name(THD *thd, const char *db_name, + HA_CREATE_INFO *db_create_info); bool my_dbopt_init(void); void my_dbopt_cleanup(void); void my_dbopt_free(void); @@ -887,7 +1238,7 @@ void my_dbopt_free(void); External variables */ -extern time_t start_time; +extern time_t server_start_time; extern char *mysql_data_home,server_version[SERVER_VERSION_LENGTH], mysql_real_data_home[], *opt_mysql_tmpdir, mysql_charsets_dir[], def_ft_boolean_syntax[sizeof(ft_boolean_syntax)]; @@ -897,7 +1248,9 @@ extern const char *command_name[]; extern const char *first_keyword, *my_localhost, *delayed_user, *binary_keyword; extern const char **errmesg; /* Error messages */ extern const char *myisam_recover_options_str; -extern const char *in_left_expr_name, *in_additional_cond; +extern const char *in_left_expr_name, *in_additional_cond, *in_having_cond; +extern const char * const triggers_file_ext; +extern const char * const trigname_file_ext; extern Eq_creator eq_creator; extern Ne_creator ne_creator; extern Gt_creator gt_creator; @@ -907,56 +1260,49 @@ extern Le_creator le_creator; extern char language[FN_REFLEN], reg_ext[FN_EXTLEN]; extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN]; extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file; -extern char log_error_file[FN_REFLEN]; +extern char log_error_file[FN_REFLEN], *opt_tc_log_file; extern double log_10[32]; extern double log_01[32]; extern ulonglong log_10_int[20]; extern ulonglong keybuff_size; -extern ulong refresh_version,flush_version, thread_id,query_id,opened_tables; -extern ulong created_tmp_tables, created_tmp_disk_tables, bytes_sent; +extern ulonglong thd_startup_options; +extern ulong refresh_version,flush_version, thread_id; extern ulong binlog_cache_use, binlog_cache_disk_use; extern ulong aborted_threads,aborted_connects; extern ulong delayed_insert_timeout; extern ulong delayed_insert_limit, delayed_queue_size; extern ulong delayed_insert_threads, delayed_insert_writes; extern ulong delayed_rows_in_use,delayed_insert_errors; -extern ulong filesort_rows, filesort_range_count, filesort_scan_count; -extern ulong filesort_merge_passes; -extern ulong select_range_check_count, select_range_count, select_scan_count; -extern ulong select_full_range_join_count,select_full_join_count; extern ulong slave_open_temp_tables; extern ulong query_cache_size, query_cache_min_res_unit; -extern ulong thd_startup_options, slow_launch_threads, slow_launch_time; -extern ulong server_id, concurrency; -extern ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count; -extern ulong ha_read_key_count, ha_read_next_count, ha_read_prev_count; -extern ulong ha_read_first_count, ha_read_last_count; -extern ulong ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count; -extern ulong ha_commit_count, ha_rollback_count,table_cache_size; +extern ulong slow_launch_threads, slow_launch_time; +extern ulong table_cache_size; extern ulong max_connections,max_connect_errors, connect_timeout; extern ulong slave_net_timeout, slave_trans_retries; -extern ulong max_user_connections; +extern uint max_user_connections; +extern ulong what_to_log,flush_time; +extern ulong query_buff_size, thread_stack; extern ulong max_prepared_stmt_count, prepared_stmt_count; -extern ulong long_query_count, what_to_log,flush_time; -extern ulong query_buff_size, thread_stack,thread_stack_min; extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit; extern ulong max_binlog_size, max_relay_log_size; extern ulong rpl_recovery_rank, thread_cache_size; -extern ulong com_stat[(uint) SQLCOM_END], com_other, back_log; -extern ulong com_stmt_prepare, com_stmt_execute, com_stmt_send_long_data; -extern ulong com_stmt_reset, com_stmt_close; +extern ulong back_log; extern ulong specialflag, current_pid; extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter; -extern my_bool relay_log_purge, opt_innodb_safe_binlog; +extern ulong opt_tc_log_size, tc_log_max_pages_used, tc_log_page_size; +extern ulong tc_log_page_waits; +extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb; extern uint test_flags,select_errors,ha_open_options; extern uint protocol_version, mysqld_port, dropping_tables; extern uint delay_key_write_options, lower_case_table_names; -extern bool opt_endinfo, using_udf_functions, locked_in_memory; +extern bool opt_endinfo, using_udf_functions; +extern my_bool locked_in_memory; extern bool opt_using_transactions, mysqld_embedded; extern bool using_update_log, opt_large_files, server_id_supplied; extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log; +extern my_bool opt_log_queries_not_using_indexes; extern bool opt_disable_networking, opt_skip_show_db; -extern bool opt_character_set_client_handshake; +extern my_bool opt_character_set_client_handshake; extern bool volatile abort_loop, shutdown_in_progress, grant_option; extern uint volatile thread_count, thread_running, global_read_lock; extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types; @@ -965,28 +1311,36 @@ extern my_bool opt_slave_compressed_protocol, use_temp_pool; extern my_bool opt_readonly, lower_case_file_system; extern my_bool opt_enable_named_pipe, opt_sync_frm, opt_allow_suspicious_udfs; extern my_bool opt_secure_auth; +extern char* opt_secure_file_priv; extern my_bool opt_log_slow_admin_statements; +extern my_bool sp_automatic_privileges, opt_noacl; +extern my_bool opt_old_style_user_limits, trust_function_creators; extern uint opt_crash_binlog_innodb; extern char *shared_memory_base_name, *mysqld_unix_port; -extern bool opt_enable_shared_memory; +extern my_bool opt_enable_shared_memory; extern char *default_tz_name; +extern my_bool opt_large_pages; +extern uint opt_large_page_size; -extern MYSQL_LOG mysql_log,mysql_update_log,mysql_slow_log,mysql_bin_log; +extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log; extern FILE *bootstrap_file; +extern int bootstrap_error; extern FILE *stderror_file; extern pthread_key(MEM_ROOT**,THR_MALLOC); extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, LOCK_thread_count,LOCK_mapped_file,LOCK_user_locks, LOCK_status, LOCK_error_log, LOCK_delayed_insert, LOCK_uuid_generator, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone, - LOCK_slave_list, LOCK_active_mi, LOCK_manager, + LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_global_read_lock, LOCK_global_system_variables, LOCK_user_conn, - LOCK_prepared_stmt_count; + LOCK_prepared_stmt_count, + LOCK_bytes_sent, LOCK_bytes_received; #ifdef HAVE_OPENSSL extern pthread_mutex_t LOCK_des_key_file; #endif extern rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; extern pthread_cond_t COND_refresh, COND_thread_count, COND_manager; +extern pthread_cond_t COND_global_read_lock; extern pthread_attr_t connection_attrib; extern I_List<THD> threads; extern I_List<NAMED_LIST> key_caches; @@ -994,12 +1348,9 @@ extern MY_BITMAP temp_pool; extern String my_empty_string; extern const String my_null_string; extern SHOW_VAR init_vars[],status_vars[], internal_vars[]; -extern SHOW_COMP_OPTION have_isam; -extern SHOW_COMP_OPTION have_innodb; -extern SHOW_COMP_OPTION have_berkeley_db; -extern SHOW_COMP_OPTION have_ndbcluster; extern struct system_variables global_system_variables; extern struct system_variables max_system_variables; +extern struct system_status_var global_status_var; extern struct rand_struct sql_rand; extern const char *opt_date_time_formats[]; @@ -1011,30 +1362,85 @@ extern TABLE *unused_tables; extern I_List<i_string> binlog_do_db, binlog_ignore_db; extern const char* any_db; extern struct my_option my_long_options[]; +extern const LEX_STRING view_type; /* optional things, have_* variables */ -extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db; -extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db; -extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink; -extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb; +#ifdef HAVE_INNOBASE_DB +extern handlerton innobase_hton; +#define have_innodb innobase_hton.state +#else +extern SHOW_COMP_OPTION have_innodb; +#endif +#ifdef HAVE_BERKELEY_DB +extern handlerton berkeley_hton; +#define have_berkeley_db berkeley_hton.state +#else +extern SHOW_COMP_OPTION have_berkeley_db; +#endif +#ifdef HAVE_EXAMPLE_DB +extern handlerton example_hton; +#define have_example_db example_hton.state +#else +extern SHOW_COMP_OPTION have_example_db; +#endif +#ifdef HAVE_ARCHIVE_DB +extern handlerton archive_hton; +#define have_archive_db archive_hton.state +#else +extern SHOW_COMP_OPTION have_archive_db; +#endif +#ifdef HAVE_CSV_DB +extern handlerton tina_hton; +#define have_csv_db tina_hton.state +#else +extern SHOW_COMP_OPTION have_csv_db; +#endif +#ifdef HAVE_FEDERATED_DB +extern handlerton federated_hton; +#define have_federated_db federated_hton.state +#else +extern SHOW_COMP_OPTION have_federated_db; +#endif +#ifdef HAVE_BLACKHOLE_DB +extern handlerton blackhole_hton; +#define have_blackhole_db blackhole_hton.state +#else +extern SHOW_COMP_OPTION have_blackhole_db; +#endif +#ifdef HAVE_NDBCLUSTER_DB +extern handlerton ndbcluster_hton; +#define have_ndbcluster ndbcluster_hton.state +#else +extern SHOW_COMP_OPTION have_ndbcluster; +#endif + +/* MRG_MYISAM handler is always built, but may be skipped */ +extern handlerton myisammrg_hton; +#define have_merge_db myisammrg_hton.state + +extern SHOW_COMP_OPTION have_isam; +extern SHOW_COMP_OPTION have_raid, have_ssl, have_symlink, have_dlopen; +extern SHOW_COMP_OPTION have_query_cache; extern SHOW_COMP_OPTION have_geometry, have_rtree_keys; extern SHOW_COMP_OPTION have_crypt; extern SHOW_COMP_OPTION have_compress; -extern SHOW_COMP_OPTION have_blackhole_db, have_merge_db; #ifndef __WIN__ extern pthread_t signal_thread; #endif #ifdef HAVE_OPENSSL -extern struct st_VioSSLAcceptorFd * ssl_acceptor_fd; +extern struct st_VioSSLFd * ssl_acceptor_fd; #endif /* HAVE_OPENSSL */ -MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **table, uint count, uint flags); -/* mysql_lock_tables() flags bits */ +MYSQL_LOCK *mysql_lock_tables(THD *thd, TABLE **table, uint count, + uint flags, bool *need_reopen); +/* mysql_lock_tables() and open_table() flags bits */ #define MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK 0x0001 #define MYSQL_LOCK_IGNORE_FLUSH 0x0002 +#define MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN 0x0004 +#define MYSQL_OPEN_IGNORE_LOCKED_TABLES 0x0008 void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock); void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock); @@ -1043,14 +1449,17 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table); void mysql_lock_abort(THD *thd, TABLE *table); bool mysql_lock_abort_for_thread(THD *thd, TABLE *table); MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b); -int mysql_lock_have_duplicate(THD *thd, TABLE *table, TABLE_LIST *tables); +TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, + TABLE_LIST *haystack); bool lock_global_read_lock(THD *thd); void unlock_global_read_lock(THD *thd); -bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commit); +bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, + bool is_not_commit); void start_waiting_global_read_lock(THD *thd); -void make_global_read_lock_block_commit(THD *thd); -my_bool set_protect_against_global_read_lock(void); +bool make_global_read_lock_block_commit(THD *thd); +bool set_protect_against_global_read_lock(void); void unset_protect_against_global_read_lock(void); +void broadcast_refresh(void); /* Lock based on name */ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list); @@ -1059,7 +1468,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list); bool wait_for_locked_table_names(THD *thd, TABLE_LIST *table_list); bool lock_table_names(THD *thd, TABLE_LIST *table_list); void unlock_table_names(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *last_table= 0); + TABLE_LIST *last_table); /* old unireg functions */ @@ -1078,31 +1487,28 @@ int rea_create_table(THD *thd, my_string file_name, uint key_count,KEY *key_info); int format_number(uint inputflag,uint max_length,my_string pos,uint length, my_string *errpos); -int openfrm(const char *name,const char *alias,uint filestat,uint prgflag, - uint ha_open_flags, TABLE *outparam); +int openfrm(THD *thd, const char *name,const char *alias,uint filestat, + uint prgflag, uint ha_open_flags, TABLE *outparam); int readfrm(const char *name, const void** data, uint* length); int writefrm(const char* name, const void* data, uint len); int closefrm(TABLE *table); -db_type get_table_type(const char *name); int read_string(File file, gptr *to, uint length); void free_blobs(TABLE *table); int set_zone(int nr,int min_zone,int max_zone); ulong convert_period_to_month(ulong period); ulong convert_month_to_period(ulong month); -uint calc_days_in_year(uint year); void get_date_from_daynr(long daynr,uint *year, uint *month, uint *day); -my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist); +my_time_t TIME_to_timestamp(THD *thd, const TIME *t, my_bool *not_exist); bool str_to_time_with_warn(const char *str,uint length,TIME *l_time); timestamp_type str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, uint flags); -longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date, - int *was_cut); void localtime_to_TIME(TIME *to, struct tm *from); void calc_time_from_sec(TIME *to, long seconds, long microseconds); void make_truncated_value_warning(THD *thd, const char *str_val, - uint str_length, timestamp_type time_type); + uint str_length, timestamp_type time_type, + const char *field_name); extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type, const char *format_str, uint format_length); @@ -1118,10 +1524,6 @@ void make_date(const DATE_TIME_FORMAT *format, const TIME *l_time, String *str); void make_time(const DATE_TIME_FORMAT *format, const TIME *l_time, String *str); -ulonglong TIME_to_ulonglong_datetime(const TIME *time); -ulonglong TIME_to_ulonglong_date(const TIME *time); -ulonglong TIME_to_ulonglong_time(const TIME *time); -ulonglong TIME_to_ulonglong(const TIME *time); int test_if_number(char *str,int *res,bool allow_wildcards); void change_byte(byte *,uint,char,char); @@ -1134,8 +1536,9 @@ void end_read_record(READ_RECORD *info); ha_rows filesort(THD *thd, TABLE *form,struct st_sort_field *sortorder, uint s_length, SQL_SELECT *select, ha_rows max_rows, ha_rows *examined_rows); -void filesort_free_buffers(TABLE *table); +void filesort_free_buffers(TABLE *table, bool full); void change_double_for_sort(double nr,byte *to); +double my_double_round(double value, int dec, bool truncate); int get_quick_record(SQL_SELECT *select); int calc_weekday(long daynr,bool sunday_first_day_of_week); uint calc_week(TIME *l_time, uint week_behaviour, uint *year); @@ -1147,7 +1550,7 @@ ulong make_new_entry(File file,uchar *fileinfo,TYPELIB *formnames, const char *newname); ulong next_io_size(ulong pos); void append_unescaped(String *res, const char *pos, uint length); -int create_frm(char *name, const char *db, const char *table, +int create_frm(THD *thd, char *name, const char *db, const char *table, uint reclength,uchar *fileinfo, HA_CREATE_INFO *create_info, uint keys); void update_create_info_from_table(HA_CREATE_INFO *info, TABLE *form); @@ -1176,8 +1579,8 @@ extern int sql_cache_hit(THD *thd, char *inBuf, uint length); /* item_func.cc */ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name, LEX_STRING component); -int get_var_with_binlog(THD *thd, LEX_STRING &name, - user_var_entry **out_entry); +int get_var_with_binlog(THD *thd, enum_sql_command sql_command, + LEX_STRING &name, user_var_entry **out_entry); /* log.cc */ bool flush_error_log(void); @@ -1186,13 +1589,18 @@ void free_list(I_List <i_string_pair> *list); void free_list(I_List <i_string> *list); /* sql_yacc.cc */ -extern int yyparse(void *thd); +extern int MYSQLparse(void *thd); +#ifndef DBUG_OFF +extern void turn_parser_debug_on(); +#endif /* frm_crypt.cc */ #ifdef HAVE_CRYPTED_FRM SQL_CRYPT *get_crypt_for_frm(void); #endif +#include "sql_view.h" + /* Some inline functions for more speed */ inline bool add_item_to_list(THD *thd, Item *item) @@ -1219,13 +1627,14 @@ inline void mark_as_null_row(TABLE *table) { table->null_row=1; table->status|=STATUS_NULL_ROW; - bfill(table->null_flags,table->null_bytes,255); + bfill(table->null_flags,table->s->null_bytes,255); } inline void table_case_convert(char * name, uint length) { if (lower_case_table_names) - my_casedn(files_charset_info, name, length); + files_charset_info->cset->casedn(files_charset_info, + name, length, name, length); } inline const char *table_case_name(HA_CREATE_INFO *info, const char *name) @@ -1269,8 +1678,14 @@ inline void setup_table_map(TABLE *table, TABLE_LIST *table_list, uint tablenr) table->const_table= 0; table->null_row= 0; table->status= STATUS_NO_RECORD; - table->keys_in_use_for_query= table->keys_in_use; - table->maybe_null= test(table->outer_join= table_list->outer_join); + table->keys_in_use_for_query= table->s->keys_in_use; + table->maybe_null= table_list->outer_join; + TABLE_LIST *embedding= table_list->embedding; + while (!table->maybe_null && embedding) + { + table->maybe_null= embedding->outer_join; + embedding= embedding->embedding; + } table->tablenr= tablenr; table->map= (table_map) 1 << tablenr; table->force_index= table_list->force_index; @@ -1294,26 +1709,13 @@ inline int hexchar_to_int(char c) } /* - wrapper to use instead of mysql_bin_log.write when - query is generated by the server using system_charset encoding -*/ - -inline void write_binlog_with_system_charset(THD * thd, Query_log_event * qinfo) -{ - CHARSET_INFO * cs_save= thd->variables.character_set_client; - thd->variables.character_set_client= system_charset_info; - mysql_bin_log.write(qinfo); - thd->variables.character_set_client= cs_save; -} - -/* is_user_table() return true if the table was created explicitly */ inline bool is_user_table(TABLE * table) { - const char *name= table->real_name; + const char *name= table->s->table_name; return strncmp(name, tmp_file_prefix, tmp_file_prefix_length); } @@ -1325,9 +1727,13 @@ inline bool is_user_table(TABLE * table) #ifndef EMBEDDED_LIBRARY extern "C" void unireg_abort(int exit_code); void kill_delayed_threads(void); -bool check_stack_overrun(THD *thd,char *dummy); +bool check_stack_overrun(THD *thd, long margin, char *dummy); #else #define unireg_abort(exit_code) DBUG_RETURN(exit_code) inline void kill_delayed_threads(void) {} -#define check_stack_overrun(A, B) 0 +#define check_stack_overrun(A, B, C) 0 +#endif + +#endif /* MYSQL_CLIENT */ + #endif diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 20f20a0a86b..99d66134405 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,6 +21,7 @@ #include "repl_failsafe.h" #include "stacktrace.h" #include "mysqld_suffix.h" +#include "mysys_err.h" #ifdef HAVE_BERKELEY_DB #include "ha_berkeley.h" #endif @@ -29,9 +29,6 @@ #include "ha_innodb.h" #endif #include "ha_myisam.h" -#ifdef HAVE_ISAM -#include "ha_isam.h" -#endif #ifdef HAVE_NDBCLUSTER_DB #include "ha_ndbcluster.h" #endif @@ -41,19 +38,11 @@ #else #define OPT_INNODB_DEFAULT 0 #endif -#ifdef HAVE_BERKLEY_DB -#define OPT_BDB_DEFAULT 1 -#else #define OPT_BDB_DEFAULT 0 -#endif -#ifdef HAVE_ISAM_DB -#define OPT_ISAM_DEFAULT 1 -#else -#define OPT_ISAM_DEFAULT 0 -#endif #ifdef HAVE_NDBCLUSTER_DB #define OPT_NDBCLUSTER_DEFAULT 0 -#if defined(NDB_SHM_TRANSPORTER) && MYSQL_VERSION_ID >= 50000 +#if defined(NOT_ENOUGH_TESTED) \ + && defined(NDB_SHM_TRANSPORTER) && MYSQL_VERSION_ID >= 50000 #define OPT_NDB_SHM_DEFAULT 1 #else #define OPT_NDB_SHM_DEFAULT 0 @@ -62,10 +51,15 @@ #define OPT_NDBCLUSTER_DEFAULT 0 #endif -#include <nisam.h> +#ifndef DEFAULT_SKIP_THREAD_PRIORITY +#define DEFAULT_SKIP_THREAD_PRIORITY 0 +#endif + #include <thr_alarm.h> #include <ft_global.h> #include <errmsg.h> +#include "sp_rcontext.h" +#include "sp_cache.h" #define mysqld_charset &my_charset_latin1 @@ -79,8 +73,10 @@ #define IF_PURIFY(A,B) (B) #endif -#ifndef INADDR_NONE -#define INADDR_NONE -1 // Error value from inet_addr +#if SIZEOF_CHARP == 4 +#define MAX_MEM_TABLE_SIZE ~(ulong) 0 +#else +#define MAX_MEM_TABLE_SIZE ~(ulonglong) 0 #endif /* stack traces are only supported on linux intel */ @@ -112,10 +108,11 @@ extern "C" { // Because of SCO 3.2V4.2 #ifdef HAVE_GRP_H #include <grp.h> #endif +#include <my_net.h> #if defined(OS2) # include <sys/un.h> -#elif !defined( __WIN__) +#elif !defined(__WIN__) # ifndef __NETWARE__ #include <sys/resource.h> # endif /* __NETWARE__ */ @@ -132,26 +129,17 @@ extern "C" { // Because of SCO 3.2V4.2 #include <sys/utsname.h> #endif /* __WIN__ */ -#ifdef HAVE_LIBWRAP -#include <tcpd.h> -#include <syslog.h> -#ifdef NEED_SYS_SYSLOG_H -#include <sys/syslog.h> -#endif /* NEED_SYS_SYSLOG_H */ -int allow_severity = LOG_INFO; -int deny_severity = LOG_WARNING; - -#endif /* HAVE_LIBWRAP */ +#include <my_libwrap.h> #ifdef HAVE_SYS_MMAN_H #include <sys/mman.h> #endif +#ifdef __NETWARE__ #define zVOLSTATE_ACTIVE 6 #define zVOLSTATE_DEACTIVE 2 #define zVOLSTATE_MAINTENANCE 3 -#ifdef __NETWARE__ #include <nks/netware.h> #include <nks/vm.h> #include <library.h> @@ -177,7 +165,7 @@ static void registerwithneb(); static void getvolumename(); static void getvolumeID(BYTE *volumeName); #endif /* __NETWARE__ */ - + #ifdef _AIX41 int initgroups(const char *,unsigned int); @@ -226,17 +214,67 @@ extern "C" int gethostname(char *name, int namelen); /* Constants */ const char *show_comp_option_name[]= {"YES", "NO", "DISABLED"}; -const char *sql_mode_names[] = +static const char *sql_mode_names[]= { "REAL_AS_FLOAT", "PIPES_AS_CONCAT", "ANSI_QUOTES", "IGNORE_SPACE", "?", "ONLY_FULL_GROUP_BY", "NO_UNSIGNED_SUBTRACTION", "NO_DIR_IN_CREATE", "POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "MYSQL323", "MYSQL40", "ANSI", - "NO_AUTO_VALUE_ON_ZERO", NullS + "NO_AUTO_VALUE_ON_ZERO", "NO_BACKSLASH_ESCAPES", "STRICT_TRANS_TABLES", + "STRICT_ALL_TABLES", + "NO_ZERO_IN_DATE", "NO_ZERO_DATE", "ALLOW_INVALID_DATES", + "ERROR_FOR_DIVISION_BY_ZERO", + "TRADITIONAL", "NO_AUTO_CREATE_USER", "HIGH_NOT_PRECEDENCE", + "NO_ENGINE_SUBSTITUTION", + NullS +}; +static const unsigned int sql_mode_names_len[]= +{ + /*REAL_AS_FLOAT*/ 13, + /*PIPES_AS_CONCAT*/ 15, + /*ANSI_QUOTES*/ 11, + /*IGNORE_SPACE*/ 12, + /*?*/ 1, + /*ONLY_FULL_GROUP_BY*/ 18, + /*NO_UNSIGNED_SUBTRACTION*/ 23, + /*NO_DIR_IN_CREATE*/ 16, + /*POSTGRESQL*/ 10, + /*ORACLE*/ 6, + /*MSSQL*/ 5, + /*DB2*/ 3, + /*MAXDB*/ 5, + /*NO_KEY_OPTIONS*/ 14, + /*NO_TABLE_OPTIONS*/ 16, + /*NO_FIELD_OPTIONS*/ 16, + /*MYSQL323*/ 8, + /*MYSQL40*/ 7, + /*ANSI*/ 4, + /*NO_AUTO_VALUE_ON_ZERO*/ 21, + /*NO_BACKSLASH_ESCAPES*/ 20, + /*STRICT_TRANS_TABLES*/ 19, + /*STRICT_ALL_TABLES*/ 17, + /*NO_ZERO_IN_DATE*/ 15, + /*NO_ZERO_DATE*/ 12, + /*ALLOW_INVALID_DATES*/ 19, + /*ERROR_FOR_DIVISION_BY_ZERO*/ 26, + /*TRADITIONAL*/ 11, + /*NO_AUTO_CREATE_USER*/ 19, + /*HIGH_NOT_PRECEDENCE*/ 19, + /*NO_ENGINE_SUBSTITUTION*/ 22 }; TYPELIB sql_mode_typelib= { array_elements(sql_mode_names)-1,"", - sql_mode_names, NULL }; + sql_mode_names, + (unsigned int *)sql_mode_names_len }; +static const char *tc_heuristic_recover_names[]= +{ + "COMMIT", "ROLLBACK", NullS +}; +static TYPELIB tc_heuristic_recover_typelib= +{ + array_elements(tc_heuristic_recover_names)-1,"", + tc_heuristic_recover_names, NULL +}; const char *first_keyword= "first", *binary_keyword= "BINARY"; const char *my_localhost= "localhost", *delayed_user= "DELAYED"; #if SIZEOF_OFF_T > 4 && defined(BIG_TABLES) @@ -250,87 +288,130 @@ bool opt_large_files= sizeof(my_off_t) > 4; /* Used with --help for detailed option */ -bool opt_help= 0; -bool opt_verbose= 0; +static my_bool opt_help= 0, opt_verbose= 0; -arg_cmp_func Arg_comparator::comparator_matrix[4][2] = +arg_cmp_func Arg_comparator::comparator_matrix[5][2] = {{&Arg_comparator::compare_string, &Arg_comparator::compare_e_string}, {&Arg_comparator::compare_real, &Arg_comparator::compare_e_real}, {&Arg_comparator::compare_int_signed, &Arg_comparator::compare_e_int}, - {&Arg_comparator::compare_row, &Arg_comparator::compare_e_row}}; + {&Arg_comparator::compare_row, &Arg_comparator::compare_e_row}, + {&Arg_comparator::compare_decimal, &Arg_comparator::compare_e_decimal}}; + +/* static variables */ + +static bool lower_case_table_names_used= 0; +static bool volatile select_thread_in_use, signal_thread_in_use; +static bool volatile ready_to_exit; +static my_bool opt_debugging= 0, opt_external_locking= 0, opt_console= 0; +static my_bool opt_bdb, opt_isam, opt_ndbcluster, opt_merge; +static my_bool opt_short_log_format= 0; +static uint kill_cached_threads, wake_thread; +static ulong killed_threads, thread_created; +static ulong max_used_connections; +static ulong my_bind_addr; /* the address we bind to */ +static volatile ulong cached_thread_count= 0; +static const char *sql_mode_str= "OFF"; +static char *mysqld_user, *mysqld_chroot, *log_error_file_ptr; +static char *opt_init_slave, *language_ptr, *opt_init_connect; +static char *default_character_set_name; +static char *character_set_filesystem_name; +static char *my_bind_addr_str; +static char *default_collation_name; +static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME; +static char mysql_data_home_buff[2]; +static I_List<THD> thread_cache; +#ifndef EMBEDDED_LIBRARY +static struct passwd *user_info; +static pthread_t select_thread; +static uint thr_kill_signal; +#endif + +static pthread_cond_t COND_thread_cache, COND_flush_thread_cache; + +#ifdef HAVE_BERKELEY_DB +static my_bool opt_sync_bdb_logs; +#endif /* Global variables */ bool opt_log, opt_update_log, opt_bin_log, opt_slow_log; +my_bool opt_log_queries_not_using_indexes= 0; bool opt_error_log= IF_WIN(1,0); bool opt_disable_networking=0, opt_skip_show_db=0; -bool opt_character_set_client_handshake= 1; -bool lower_case_table_names_used= 0; +my_bool opt_character_set_client_handshake= 1; bool server_id_supplied = 0; -bool opt_endinfo, using_udf_functions, locked_in_memory; +bool opt_endinfo, using_udf_functions; +my_bool locked_in_memory; bool opt_using_transactions, using_update_log; -bool volatile abort_loop, select_thread_in_use, signal_thread_in_use; -bool volatile ready_to_exit, shutdown_in_progress, grant_option; +bool volatile abort_loop; +bool volatile shutdown_in_progress, grant_option; my_bool opt_skip_slave_start = 0; // If set, slave is not autostarted my_bool opt_reckless_slave = 0; -my_bool opt_enable_named_pipe= 0, opt_debugging= 0; -my_bool opt_local_infile, opt_external_locking, opt_slave_compressed_protocol; +my_bool opt_enable_named_pipe= 0; +my_bool opt_local_infile, opt_slave_compressed_protocol; my_bool opt_safe_user_create = 0, opt_no_mix_types = 0; my_bool opt_show_slave_auth_info, opt_sql_bin_update = 0; my_bool opt_log_slave_updates= 0; -my_bool opt_console= 0, opt_bdb, opt_innodb, opt_isam, opt_ndbcluster; -my_bool opt_merge; +my_bool opt_innodb; +bool slave_warning_issued = false; + #ifdef HAVE_NDBCLUSTER_DB const char *opt_ndbcluster_connectstring= 0; +const char *opt_ndb_connectstring= 0; +char opt_ndb_constrbuf[1024]; +unsigned opt_ndb_constrbuf_len= 0; my_bool opt_ndb_shm, opt_ndb_optimized_node_selection; +ulong opt_ndb_cache_check_time; +const char *opt_ndb_mgmd; +ulong opt_ndb_nodeid; #endif my_bool opt_readonly, use_temp_pool, relay_log_purge; -my_bool opt_sync_bdb_logs, opt_sync_frm, opt_allow_suspicious_udfs; +my_bool opt_sync_frm, opt_allow_suspicious_udfs; my_bool opt_secure_auth= 0; -my_bool opt_short_log_format= 0; -my_bool opt_log_queries_not_using_indexes= 0; +char* opt_secure_file_priv= 0; my_bool opt_log_slow_admin_statements= 0; my_bool lower_case_file_system= 0; -my_bool opt_innodb_safe_binlog= 0; +my_bool opt_large_pages= 0; +uint opt_large_page_size= 0; +my_bool opt_old_style_user_limits= 0, trust_function_creators= 0; +/* + True if there is at least one per-hour limit for some user, so we should + check them before each query (and possibly reset counters when hour is + changed). False otherwise. +*/ volatile bool mqh_used = 0; +my_bool opt_noacl; +my_bool sp_automatic_privileges= 1; #ifdef HAVE_INITGROUPS static bool calling_initgroups= FALSE; /* Used in SIGSEGV handler. */ #endif uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options; +uint mysqld_port_timeout; uint delay_key_write_options, protocol_version; uint lower_case_table_names; -uint opt_crash_binlog_innodb; -uint volatile thread_count, thread_running, kill_cached_threads, wake_thread; -ulong back_log, connect_timeout, concurrency; -ulong server_id, thd_startup_options; -ulong table_cache_size, thread_stack, thread_stack_min, what_to_log; +uint tc_heuristic_recover= 0; +uint volatile thread_count, thread_running; +ulonglong thd_startup_options; +ulong back_log, connect_timeout, concurrency, server_id; +ulong table_cache_size, thread_stack, what_to_log; ulong query_buff_size, slow_launch_time, slave_open_temp_tables; ulong open_files_limit, max_binlog_size, max_relay_log_size; ulong slave_net_timeout, slave_trans_retries; ulong thread_cache_size=0, binlog_cache_size=0, max_binlog_cache_size=0; ulong query_cache_size=0; -ulong com_stat[(uint) SQLCOM_END], com_other; -ulong com_stmt_prepare, com_stmt_execute, com_stmt_send_long_data; -ulong com_stmt_close, com_stmt_reset; -ulong bytes_sent, bytes_received, net_big_packet_count; ulong refresh_version, flush_version; /* Increments on each reload */ -ulong query_id, long_query_count; -ulong aborted_threads, killed_threads, aborted_connects; +query_id_t global_query_id; +ulong aborted_threads, aborted_connects; ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size; ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use; -ulong delayed_insert_errors,flush_time, thread_created; -ulong filesort_rows, filesort_range_count, filesort_scan_count; -ulong filesort_merge_passes; -ulong select_range_check_count, select_range_count, select_scan_count; -ulong select_full_range_join_count,select_full_join_count; -ulong specialflag=0,opened_tables=0,created_tmp_tables=0, - created_tmp_disk_tables=0; +ulong delayed_insert_errors,flush_time; +ulong specialflag=0; ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; -ulong max_connections,max_used_connections, - max_connect_errors, max_user_connections = 0; +ulong max_connections, max_connect_errors; +uint max_user_connections= 0; /* Limit of the total number of prepared statements in the server. Is necessary to protect the server against out-of-memory attacks. @@ -351,21 +432,17 @@ ulong thread_id=1L,current_pid; ulong slow_launch_threads = 0, sync_binlog_period; ulong expire_logs_days = 0; ulong rpl_recovery_rank=0; -ulong my_bind_addr; /* the address we bind to */ -volatile ulong cached_thread_count= 0; double log_10[32]; /* 10 potences */ double log_01[32]; -time_t start_time; +time_t server_start_time; char mysql_home[FN_REFLEN], pidfile_name[FN_REFLEN], system_time_zone[30]; char *default_tz_name; char log_error_file[FN_REFLEN], glob_hostname[FN_REFLEN]; -char* log_error_file_ptr= log_error_file; char mysql_real_data_home[FN_REFLEN], language[FN_REFLEN], reg_ext[FN_EXTLEN], mysql_charsets_dir[FN_REFLEN], - *mysqld_user,*mysqld_chroot, *opt_init_file, - *opt_init_connect, *opt_init_slave, + *opt_init_file, *opt_tc_log_file, def_ft_boolean_syntax[sizeof(ft_boolean_syntax)]; const key_map key_map_empty(0); @@ -373,21 +450,20 @@ key_map key_map_full(0); // Will be initialized later const char *opt_date_time_formats[3]; -char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME; -char *language_ptr, *default_collation_name, *default_character_set_name; -char mysql_data_home_buff[2], *mysql_data_home=mysql_real_data_home; -struct passwd *user_info; +char *mysql_data_home= mysql_real_data_home; char server_version[SERVER_VERSION_LENGTH]; char *mysqld_unix_port, *opt_mysql_tmpdir; -char *my_bind_addr_str; const char **errmesg; /* Error messages */ const char *myisam_recover_options_str="OFF"; const char *myisam_stats_method_str="nulls_unequal"; -const char *sql_mode_str="OFF"; + /* name of reference on left espression in rewritten IN subquery */ const char *in_left_expr_name= "<left expr>"; /* name of additional condition */ const char *in_additional_cond= "<IN COND>"; +const char *in_having_cond= "<IN HAVING>"; + +my_decimal decimal_zero; /* classes for comparation parsing/processing */ Eq_creator eq_creator; Ne_creator ne_creator; @@ -398,37 +474,38 @@ Le_creator le_creator; FILE *bootstrap_file; +int bootstrap_error; FILE *stderror_file=0; I_List<i_string_pair> replicate_rewrite_db; I_List<i_string> replicate_do_db, replicate_ignore_db; // allow the user to tell us which db to replicate and which to ignore I_List<i_string> binlog_do_db, binlog_ignore_db; -I_List<THD> threads,thread_cache; +I_List<THD> threads; I_List<NAMED_LIST> key_caches; struct system_variables global_system_variables; struct system_variables max_system_variables; +struct system_status_var global_status_var; MY_TMPDIR mysql_tmpdir_list; MY_BITMAP temp_pool; CHARSET_INFO *system_charset_info, *files_charset_info ; CHARSET_INFO *national_charset_info, *table_alias_charset; +CHARSET_INFO *character_set_filesystem; -SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster, - have_example_db, have_archive_db, have_csv_db, have_merge_db; -SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache; -SHOW_COMP_OPTION have_geometry, have_rtree_keys; +SHOW_COMP_OPTION have_isam; +SHOW_COMP_OPTION have_raid, have_ssl, have_symlink, have_query_cache; +SHOW_COMP_OPTION have_geometry, have_rtree_keys, have_dlopen; SHOW_COMP_OPTION have_crypt, have_compress; -SHOW_COMP_OPTION have_blackhole_db; /* Thread specific variables */ pthread_key(MEM_ROOT**,THR_MALLOC); pthread_key(THD*, THR_THD); pthread_mutex_t LOCK_mysql_create_db, LOCK_Acl, LOCK_open, LOCK_thread_count, - LOCK_mapped_file, LOCK_status, + LOCK_mapped_file, LOCK_status, LOCK_global_read_lock, LOCK_error_log, LOCK_uuid_generator, LOCK_delayed_insert, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_bytes_sent, LOCK_bytes_received, @@ -446,12 +523,11 @@ pthread_mutex_t LOCK_prepared_stmt_count; pthread_mutex_t LOCK_des_key_file; #endif rw_lock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; -pthread_cond_t COND_refresh,COND_thread_count, COND_slave_stopped, - COND_slave_start; -pthread_cond_t COND_thread_cache,COND_flush_thread_cache; +pthread_cond_t COND_refresh,COND_thread_count, COND_global_read_lock; pthread_t signal_thread; pthread_attr_t connection_attrib; -static uint thr_kill_signal; + +File_parser_dummy_hook file_parser_dummy_hook; /* replication parameters, if master_host is not NULL, we are a slave */ uint master_port= MYSQL_PORT, master_connect_retry = 60; @@ -467,17 +543,16 @@ char *master_ssl_ca, *master_ssl_capath, *master_ssl_cipher; /* Static variables */ static bool kill_in_progress, segfaulted; -static my_bool opt_do_pstack, opt_noacl, opt_bootstrap, opt_myisam_log; +static my_bool opt_do_pstack, opt_bootstrap, opt_myisam_log; static int cleanup_done; static ulong opt_specialflag, opt_myisam_block_size; static char *opt_logname, *opt_update_logname, *opt_binlog_index_name; -static char *opt_slow_logname; +static char *opt_slow_logname, *opt_tc_heuristic_recover; static char *mysql_home_ptr, *pidfile_name_ptr; static char **defaults_argv; static char *opt_bin_logname; static my_socket unix_sock,ip_sock; -static pthread_t select_thread; struct rand_struct sql_rand; // used by sql_class.cc:THD::THD() /* OS specific variables */ @@ -521,68 +596,75 @@ bool mysqld_embedded=1; static const char* default_dbug_option; #endif #ifdef HAVE_LIBWRAP -char *libwrapName= NULL; +const char *libwrapName= NULL; +int allow_severity = LOG_INFO; +int deny_severity = LOG_WARNING; #endif #ifdef HAVE_QUERY_CACHE -ulong query_cache_limit= 0; +static ulong query_cache_limit= 0; ulong query_cache_min_res_unit= QUERY_CACHE_MIN_RESULT_DATA_SIZE; Query_cache query_cache; #endif #ifdef HAVE_SMEM char *shared_memory_base_name= default_shared_memory_base_name; -bool opt_enable_shared_memory; +my_bool opt_enable_shared_memory; HANDLE smem_event_connect_request= 0; #endif +#define SSL_VARS_NOT_STATIC #include "sslopt-vars.h" #ifdef HAVE_OPENSSL #include <openssl/crypto.h> - +#ifndef HAVE_YASSL typedef struct CRYPTO_dynlock_value { rw_lock_t lock; } openssl_lock_t; -char *des_key_file; -struct st_VioSSLAcceptorFd *ssl_acceptor_fd; static openssl_lock_t *openssl_stdlocks; - static openssl_lock_t *openssl_dynlock_create(const char *, int); static void openssl_dynlock_destroy(openssl_lock_t *, const char *, int); static void openssl_lock_function(int, int, const char *, int); static void openssl_lock(int, openssl_lock_t *, const char *, int); static unsigned long openssl_id_function(); +#endif +char *des_key_file; +struct st_VioSSLFd *ssl_acceptor_fd; #endif /* HAVE_OPENSSL */ /* Function declarations */ -static void start_signal_handler(void); -extern "C" pthread_handler_decl(signal_hand, arg); +pthread_handler_t signal_hand(void *arg); static void mysql_init_variables(void); static void get_options(int argc,char **argv); static void set_server_version(void); static int init_thread_environment(); static char *get_relative_path(const char *path); static void fix_paths(void); -extern "C" pthread_handler_decl(handle_connections_sockets,arg); -extern "C" pthread_handler_decl(kill_server_thread,arg); -static int bootstrap(FILE *file); -static void close_server_sock(); +pthread_handler_t handle_connections_sockets(void *arg); +pthread_handler_t kill_server_thread(void *arg); +static void bootstrap(FILE *file); static bool read_init_file(char *file_name); #ifdef __NT__ -extern "C" pthread_handler_decl(handle_connections_namedpipes,arg); +pthread_handler_t handle_connections_namedpipes(void *arg); #endif #ifdef HAVE_SMEM -static pthread_handler_decl(handle_connections_shared_memory,arg); +pthread_handler_t handle_connections_shared_memory(void *arg); #endif -extern "C" pthread_handler_decl(handle_slave,arg); +pthread_handler_t handle_slave(void *arg); static ulong find_bit_type(const char *x, TYPELIB *bit_lib); static void clean_up(bool print_message); +static int test_if_case_insensitive(const char *dir_name); + +#ifndef EMBEDDED_LIBRARY +static void start_signal_handler(void); +static void close_server_sock(); static void clean_up_mutexes(void); static void wait_for_signal_thread_to_end(void); -static int test_if_case_insensitive(const char *dir_name); static void create_pid_file(); +#endif + #ifndef EMBEDDED_LIBRARY /**************************************************************************** @@ -604,14 +686,14 @@ static void close_connections(void) (void) pthread_mutex_lock(&LOCK_manager); if (manager_thread_in_use) { - DBUG_PRINT("quit",("killing manager thread: %lx",manager_thread)); + DBUG_PRINT("quit",("killing manager thread: 0x%lx",manager_thread)); (void) pthread_cond_signal(&COND_manager); } (void) pthread_mutex_unlock(&LOCK_manager); /* kill connection thread */ #if !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) && !defined(__NETWARE__) - DBUG_PRINT("quit",("waiting for select thread: %lx",select_thread)); + DBUG_PRINT("quit",("waiting for select thread: 0x%lx",select_thread)); (void) pthread_mutex_lock(&LOCK_thread_count); while (select_thread_in_use) @@ -645,11 +727,11 @@ static void close_connections(void) /* Abort listening to new connections */ DBUG_PRINT("quit",("Closing sockets")); - if ( !opt_disable_networking ) + if (!opt_disable_networking ) { if (ip_sock != INVALID_SOCKET) { - (void) shutdown(ip_sock,2); + (void) shutdown(ip_sock, SHUT_RDWR); (void) closesocket(ip_sock); ip_sock= INVALID_SOCKET; } @@ -658,7 +740,7 @@ static void close_connections(void) if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe) { HANDLE temp; - DBUG_PRINT( "quit", ("Closing named pipes") ); + DBUG_PRINT("quit", ("Closing named pipes") ); /* Create connection to the handle named pipe handler to break the loop */ if ((temp = CreateFile(pipe_name, @@ -681,7 +763,7 @@ static void close_connections(void) #ifdef HAVE_SYS_UN_H if (unix_sock != INVALID_SOCKET) { - (void) shutdown(unix_sock,2); + (void) shutdown(unix_sock, SHUT_RDWR); (void) closesocket(unix_sock); (void) unlink(mysqld_unix_port); unix_sock= INVALID_SOCKET; @@ -704,9 +786,10 @@ static void close_connections(void) DBUG_PRINT("quit",("Informing thread %ld that it's time to die", tmp->thread_id)); /* We skip slave threads on this first loop through. */ - if (tmp->slave_thread) continue; + if (tmp->slave_thread) + continue; - tmp->killed= 1; + tmp->killed= THD::KILL_CONNECTION; if (tmp->mysys_var) { tmp->mysys_var->abort=1; @@ -748,7 +831,9 @@ static void close_connections(void) { if (global_system_variables.log_warnings) sql_print_warning(ER(ER_FORCING_CLOSE),my_progname, - tmp->thread_id,tmp->user ? tmp->user : ""); + tmp->thread_id, + (tmp->main_security_ctx.user ? + tmp->main_security_ctx.user : "")); close_connection(tmp,0,0); } #endif @@ -768,7 +853,6 @@ static void close_connections(void) DBUG_PRINT("quit",("close_connections thread")); DBUG_VOID_RETURN; } -#endif /*EMBEDDED_LIBRARY*/ static void close_server_sock() @@ -781,7 +865,7 @@ static void close_server_sock() { ip_sock=INVALID_SOCKET; DBUG_PRINT("info",("calling shutdown on TCP/IP socket")); - VOID(shutdown(tmp_sock,2)); + VOID(shutdown(tmp_sock, SHUT_RDWR)); #if defined(__NETWARE__) /* The following code is disabled for normal systems as it causes MySQL @@ -796,7 +880,7 @@ static void close_server_sock() { unix_sock=INVALID_SOCKET; DBUG_PRINT("info",("calling shutdown on unix socket")); - VOID(shutdown(tmp_sock,2)); + VOID(shutdown(tmp_sock, SHUT_RDWR)); #if defined(__NETWARE__) /* The following code is disabled for normal systems as it may cause MySQL @@ -811,12 +895,14 @@ static void close_server_sock() #endif } +#endif /*EMBEDDED_LIBRARY*/ + void kill_mysql(void) { DBUG_ENTER("kill_mysql"); -#ifdef SIGNALS_DONT_BREAK_READ +#if defined(SIGNALS_DONT_BREAK_READ) && !defined(EMBEDDED_LIBRARY) abort_loop=1; // Break connection loops close_server_sock(); // Force accept to wake up #endif @@ -837,7 +923,7 @@ void kill_mysql(void) } #endif #elif defined(OS2) - pthread_cond_signal( &eventShutdown); // post semaphore + pthread_cond_signal(&eventShutdown); // post semaphore #elif defined(HAVE_PTHREAD_KILL) if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL)) { @@ -887,46 +973,55 @@ static void __cdecl kill_server(int sig_ptr) #define RETURN_FROM_KILL_SERVER DBUG_VOID_RETURN #endif { - int sig=(int) (long) sig_ptr; // This is passed a int DBUG_ENTER("kill_server"); #ifndef EMBEDDED_LIBRARY + int sig=(int) (long) sig_ptr; // This is passed a int // if there is a signal during the kill in progress, ignore the other if (kill_in_progress) // Safety RETURN_FROM_KILL_SERVER; kill_in_progress=TRUE; abort_loop=1; // This should be set if (sig != 0) // 0 is not a valid signal number - my_sigset(sig,SIG_IGN); + my_sigset(sig, SIG_IGN); /* purify inspected */ if (sig == MYSQL_KILL_SIGNAL || sig == 0) sql_print_information(ER(ER_NORMAL_SHUTDOWN),my_progname); else sql_print_error(ER(ER_GOT_SIGNAL),my_progname,sig); /* purecov: inspected */ -#if defined(HAVE_SMEM) && defined(__WIN__) - /* - Send event to smem_event_connect_request for aborting - */ - if (!SetEvent(smem_event_connect_request)) - { +#if defined(HAVE_SMEM) && defined(__WIN__) + /* + Send event to smem_event_connect_request for aborting + */ + if (!SetEvent(smem_event_connect_request)) + { DBUG_PRINT("error", ("Got error: %ld from SetEvent of smem_event_connect_request", - GetLastError())); + GetLastError())); } -#endif - +#endif + #if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__) && !defined(OS2)) my_thread_init(); // If this is a new thread #endif close_connections(); - if (sig != MYSQL_KILL_SIGNAL && sig != 0) + if (sig != MYSQL_KILL_SIGNAL && +#ifdef __WIN__ + sig != SIGINT && /* Bug#18235 */ +#endif + sig != 0) unireg_abort(1); /* purecov: inspected */ else unireg_end(); + #ifdef __NETWARE__ if (!event_flag) - pthread_join(select_thread, NULL); // wait for main thread + pthread_join(select_thread, NULL); // wait for main thread #endif /* __NETWARE__ */ +#if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__) && !defined(OS2)) + my_thread_end(); +#endif + pthread_exit(0); /* purecov: deadcode */ #endif /* EMBEDDED_LIBRARY */ @@ -935,7 +1030,7 @@ static void __cdecl kill_server(int sig_ptr) #if defined(USE_ONE_SIGNAL_HAND) || (defined(__NETWARE__) && defined(SIGNALS_DONT_BREAK_READ)) -extern "C" pthread_handler_decl(kill_server_thread,arg __attribute__((unused))) +pthread_handler_t kill_server_thread(void *arg __attribute__((unused))) { my_thread_init(); // Initialize new thread kill_server(0); @@ -1010,7 +1105,6 @@ void clean_up(bool print_message) mysql_log.cleanup(); mysql_slow_log.cleanup(); - mysql_update_log.cleanup(); mysql_bin_log.cleanup(); #ifdef HAVE_REPLICATION @@ -1035,6 +1129,9 @@ void clean_up(bool print_message) udf_free(); #endif (void) ha_panic(HA_PANIC_CLOSE); /* close all tables and logs */ + if (tc_log) + tc_log->close(); + xid_cache_free(); delete_elements(&key_caches, (void (*)(const char*, gptr)) free_key_cache); multi_keycache_free(); end_thr_alarm(1); /* Free allocated memory */ @@ -1058,6 +1155,7 @@ void clean_up(bool print_message) #endif x_free(opt_bin_logname); x_free(opt_relay_logname); + x_free(opt_secure_file_priv); bitmap_free(&temp_pool); free_max_user_conn(); #ifdef HAVE_REPLICATION @@ -1072,9 +1170,11 @@ void clean_up(bool print_message) if (ssl_acceptor_fd) { SSL_CTX_free(ssl_acceptor_fd->ssl_context); - my_free((gptr) ssl_acceptor_fd, MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr) ssl_acceptor_fd, MYF(0)); } #endif /* HAVE_OPENSSL */ + vio_end(); + #ifdef USE_REGEX my_regex_end(); #endif @@ -1085,7 +1185,9 @@ void clean_up(bool print_message) if (!opt_bootstrap) (void) my_delete(pidfile_name,MYF(0)); // This may not always exist #endif - x_free((gptr) my_errmsg[ERRMAPP]); /* Free messages */ + finish_client_errs(); + my_free((gptr) my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST), + MYF(MY_WME | MY_FAE | MY_ALLOW_ZERO_PTR)); DBUG_PRINT("quit", ("Error messages freed")); /* Tell main we are ready */ (void) pthread_mutex_lock(&LOCK_thread_count); @@ -1102,6 +1204,8 @@ void clean_up(bool print_message) } /* clean_up */ +#ifndef EMBEDDED_LIBRARY + /* This is mainly needed when running with purify, but it's still nice to know that all child threads have died when mysqld exits @@ -1145,10 +1249,12 @@ static void clean_up_mutexes() (void) pthread_mutex_destroy(&LOCK_user_conn); #ifdef HAVE_OPENSSL (void) pthread_mutex_destroy(&LOCK_des_key_file); +#ifndef HAVE_YASSL for (int i= 0; i < CRYPTO_num_locks(); ++i) (void) rwlock_destroy(&openssl_stdlocks[i].lock); OPENSSL_free(openssl_stdlocks); #endif +#endif #ifdef HAVE_REPLICATION (void) pthread_mutex_destroy(&LOCK_rpl_status); (void) pthread_cond_destroy(&COND_rpl_status); @@ -1157,14 +1263,20 @@ static void clean_up_mutexes() (void) rwlock_destroy(&LOCK_sys_init_connect); (void) rwlock_destroy(&LOCK_sys_init_slave); (void) pthread_mutex_destroy(&LOCK_global_system_variables); + (void) pthread_mutex_destroy(&LOCK_global_read_lock); + (void) pthread_mutex_destroy(&LOCK_uuid_generator); (void) pthread_mutex_destroy(&LOCK_prepared_stmt_count); (void) pthread_cond_destroy(&COND_thread_count); (void) pthread_cond_destroy(&COND_refresh); + (void) pthread_cond_destroy(&COND_global_read_lock); (void) pthread_cond_destroy(&COND_thread_cache); (void) pthread_cond_destroy(&COND_flush_thread_cache); (void) pthread_cond_destroy(&COND_manager); } +#endif /*EMBEDDED_LIBRARY*/ + + /**************************************************************************** ** Init IP and UNIX socket ****************************************************************************/ @@ -1199,7 +1311,7 @@ static void set_ports() static struct passwd *check_user(const char *user) { #if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) - struct passwd *user_info; + struct passwd *tmp_user_info; uid_t user_id= geteuid(); // Don't bother if we aren't superuser @@ -1207,12 +1319,14 @@ static struct passwd *check_user(const char *user) { if (user) { - // Don't give a warning, if real user is same as given with --user - user_info= getpwnam(user); - if ((!user_info || user_id != user_info->pw_uid) && + /* Don't give a warning, if real user is same as given with --user */ + /* purecov: begin tested */ + tmp_user_info= getpwnam(user); + if ((!tmp_user_info || user_id != tmp_user_info->pw_uid) && global_system_variables.log_warnings) sql_print_warning( "One can only use the --user switch if running as root\n"); + /* purecov: end */ } return NULL; } @@ -1225,23 +1339,25 @@ static struct passwd *check_user(const char *user) } return NULL; } + /* purecov: begin tested */ if (!strcmp(user,"root")) return NULL; // Avoid problem with dynamic libraries - if (!(user_info= getpwnam(user))) + if (!(tmp_user_info= getpwnam(user))) { // Allow a numeric uid to be used const char *pos; for (pos= user; my_isdigit(mysqld_charset,*pos); pos++) ; if (*pos) // Not numeric id goto err; - if (!(user_info= getpwuid(atoi(user)))) + if (!(tmp_user_info= getpwuid(atoi(user)))) goto err; else - return user_info; + return tmp_user_info; } else - return user_info; + return tmp_user_info; + /* purecov: end */ err: sql_print_error("Fatal error: Can't change to run as user '%s' ; Please check that the user exists!\n",user); @@ -1250,10 +1366,11 @@ err: return NULL; } -static void set_user(const char *user, struct passwd *user_info) +static void set_user(const char *user, struct passwd *user_info_arg) { + /* purecov: begin tested */ #if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) - DBUG_ASSERT(user_info); + DBUG_ASSERT(user_info_arg != 0); #ifdef HAVE_INITGROUPS /* We can get a SIGSEGV when calling initgroups() on some systems when NSS @@ -1262,33 +1379,34 @@ static void set_user(const char *user, struct passwd *user_info) output a specific message to help the user resolve this problem. */ calling_initgroups= TRUE; - initgroups((char*) user, user_info->pw_gid); + initgroups((char*) user, user_info_arg->pw_gid); calling_initgroups= FALSE; #endif - if (setgid(user_info->pw_gid) == -1) + if (setgid(user_info_arg->pw_gid) == -1) { sql_perror("setgid"); unireg_abort(1); } - if (setuid(user_info->pw_uid) == -1) + if (setuid(user_info_arg->pw_uid) == -1) { sql_perror("setuid"); unireg_abort(1); } #endif + /* purecov: end */ } -static void set_effective_user(struct passwd *user_info) +static void set_effective_user(struct passwd *user_info_arg) { #if !defined(__WIN__) && !defined(OS2) && !defined(__NETWARE__) - DBUG_ASSERT(user_info); - if (setregid((gid_t)-1, user_info->pw_gid) == -1) + DBUG_ASSERT(user_info_arg != 0); + if (setregid((gid_t)-1, user_info_arg->pw_gid) == -1) { sql_perror("setregid"); unireg_abort(1); } - if (setreuid((uid_t)-1, user_info->pw_uid) == -1) + if (setreuid((uid_t)-1, user_info_arg->pw_uid) == -1) { sql_perror("setreuid"); unireg_abort(1); @@ -1311,26 +1429,19 @@ static void set_root(const char *path) #endif } -static void server_init(void) +static void network_init(void) { struct sockaddr_in IPaddr; #ifdef HAVE_SYS_UN_H struct sockaddr_un UNIXaddr; #endif int arg=1; - DBUG_ENTER("server_init"); - -#ifdef __WIN__ - if ( !opt_disable_networking ) - { - WSADATA WsaData; - if (SOCKET_ERROR == WSAStartup (0x0101, &WsaData)) - { - my_message(0,"WSAStartup Failed\n",MYF(0)); - unireg_abort(1); - } - } -#endif /* __WIN__ */ + int ret; + uint waited; + uint this_wait; + uint retry; + DBUG_ENTER("network_init"); + LINT_INIT(ret); set_ports(); @@ -1356,8 +1467,26 @@ static void server_init(void) */ (void) setsockopt(ip_sock,SOL_SOCKET,SO_REUSEADDR,(char*)&arg,sizeof(arg)); #endif /* __WIN__ */ - if (bind(ip_sock, my_reinterpret_cast(struct sockaddr *) (&IPaddr), - sizeof(IPaddr)) < 0) + /* + Sometimes the port is not released fast enough when stopping and + restarting the server. This happens quite often with the test suite + on busy Linux systems. Retry to bind the address at these intervals: + Sleep intervals: 1, 2, 4, 6, 9, 13, 17, 22, ... + Retry at second: 1, 3, 7, 13, 22, 35, 52, 74, ... + Limit the sequence by mysqld_port_timeout (set --port-open-timeout=#). + */ + for (waited= 0, retry= 1; ; retry++, waited+= this_wait) + { + if (((ret= bind(ip_sock, my_reinterpret_cast(struct sockaddr *) (&IPaddr), + sizeof(IPaddr))) >= 0) || + (socket_errno != SOCKET_EADDRINUSE) || + (waited >= mysqld_port_timeout)) + break; + sql_print_information("Retrying bind on TCP/IP port %u", mysqld_port); + this_wait= retry * retry / 3 + 1; + sleep(this_wait); + } + if (ret < 0) { DBUG_PRINT("error",("Got error: %d from bind",socket_errno)); sql_perror("Can't start server: Bind on TCP/IP port"); @@ -1373,22 +1502,12 @@ static void server_init(void) } } - if ((user_info= check_user(mysqld_user))) - { -#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) - if (locked_in_memory) // getuid() == 0 here - set_effective_user(user_info); - else -#endif - set_user(mysqld_user, user_info); - } - #ifdef __NT__ /* create named pipe */ if (Service.IsNT() && mysqld_unix_port[0] && !opt_bootstrap && opt_enable_named_pipe) { - + pipe_name[sizeof(pipe_name)-1]= 0; /* Safety if too long string */ strxnmov(pipe_name, sizeof(pipe_name)-1, "\\\\.\\pipe\\", mysqld_unix_port, NullS); @@ -1405,7 +1524,7 @@ static void server_init(void) sql_perror("Can't start server : Set security descriptor"); unireg_abort(1); } - saPipeSecurity.nLength = sizeof( SECURITY_ATTRIBUTES ); + saPipeSecurity.nLength = sizeof(SECURITY_ATTRIBUTES); saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor; saPipeSecurity.bInheritHandle = FALSE; if ((hPipe= CreateNamedPipe(pipe_name, @@ -1425,9 +1544,9 @@ static void server_init(void) FORMAT_MESSAGE_FROM_SYSTEM, NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &lpMsgBuf, 0, NULL ); - MessageBox( NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe", - MB_OK|MB_ICONINFORMATION ); - LocalFree( lpMsgBuf ); + MessageBox(NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe", + MB_OK|MB_ICONINFORMATION); + LocalFree(lpMsgBuf); unireg_abort(1); } } @@ -1443,8 +1562,8 @@ static void server_init(void) if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1)) { - sql_print_error("The socket file path is too long (> %lu): %s", - sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); + sql_print_error("The socket file path is too long (> %u): %s", + (uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port); unireg_abort(1); } if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0) @@ -1481,17 +1600,6 @@ static void server_init(void) #endif /*!EMBEDDED_LIBRARY*/ -void yyerror(const char *s) -{ - THD *thd=current_thd; - char *yytext= (char*) thd->lex->tok_start; - /* "parse error" changed into "syntax error" between bison 1.75 and 1.875 */ - if (strcmp(s,"parse error") == 0 || strcmp(s,"syntax error") == 0) - s=ER(ER_SYNTAX_ERROR); - net_printf(thd,ER_PARSE_ERROR, s, yytext ? (char*) yytext : "", - thd->lex->yylineno); -} - #ifndef EMBEDDED_LIBRARY /* @@ -1517,11 +1625,11 @@ void close_connection(THD *thd, uint errcode, bool lock) errcode ? ER(errcode) : "")); if (lock) (void) pthread_mutex_lock(&LOCK_thread_count); - thd->killed=1; - if ((vio=thd->net.vio) != 0) + thd->killed= THD::KILL_CONNECTION; + if ((vio= thd->net.vio) != 0) { if (errcode) - send_error(thd, errcode, ER(errcode)); /* purecov: inspected */ + net_send_error(thd, errcode, ER(errcode)); /* purecov: inspected */ vio_close(vio); /* vio is freed in delete thd */ } if (lock) @@ -1571,6 +1679,7 @@ void end_thread(THD *thd, bool put_in_cache) wake_thread--; thd=thread_cache.get(); thd->real_id=pthread_self(); + thd->thread_stack= (char*) &thd; // For store_globals (void) thd->store_globals(); /* THD::mysys_var::abort is associated with physical thread rather @@ -1585,13 +1694,11 @@ void end_thread(THD *thd, bool put_in_cache) } } - DBUG_PRINT("info", ("sending a broadcast")) - /* Tell main we are ready */ (void) pthread_mutex_unlock(&LOCK_thread_count); /* It's safe to broadcast outside a lock (COND... is not deleted here) */ + DBUG_PRINT("signal", ("Broadcasting COND_thread_count")); (void) pthread_cond_broadcast(&COND_thread_count); - DBUG_PRINT("info", ("unlocked thread_count mutex")) #ifdef ONE_THREAD if (!(test_flags & TEST_NO_THREADS)) // For debugging under Linux #endif @@ -1603,17 +1710,6 @@ void end_thread(THD *thd, bool put_in_cache) } -/* Start a cached thread. LOCK_thread_count is locked on entry */ - -static void start_cached_thread(THD *thd) -{ - thread_cache.append(thd); - wake_thread++; - thread_count++; - pthread_cond_signal(&COND_thread_cache); -} - - void flush_thread_cache() { (void) pthread_mutex_lock(&LOCK_thread_count); @@ -1628,18 +1724,36 @@ void flush_thread_cache() } +/* + Aborts a thread nicely. Commes here on SIGPIPE + TODO: One should have to fix that thr_alarm know about this + thread too. +*/ + +#ifdef THREAD_SPECIFIC_SIGPIPE +extern "C" sig_handler abort_thread(int sig __attribute__((unused))) +{ + THD *thd=current_thd; + DBUG_ENTER("abort_thread"); + if (thd) + thd->killed= THD::KILL_CONNECTION; + DBUG_VOID_RETURN; +} +#endif + /****************************************************************************** Setup a signal thread with handles all signals. Because Linux doesn't support schemas use a mutex to check that the signal thread is ready before continuing ******************************************************************************/ + #if defined(__WIN__) || defined(OS2) static void init_signals(void) { int signals[] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGABRT } ; for (uint i=0 ; i < sizeof(signals)/sizeof(int) ; i++) - signal( signals[i], kill_server) ; + signal(signals[i], kill_server) ; #if defined(__WIN__) signal(SIGBREAK,SIG_IGN); //ignore SIGBREAK for NT #else @@ -1663,18 +1777,18 @@ static void check_data_home(const char *path) // down server event callback void mysql_down_server_cb(void *, void *) { - event_flag = TRUE; + event_flag= TRUE; kill_server(0); } // destroy callback resources void mysql_cb_destroy(void *) -{ - UnRegisterEventNotification(eh); // cleanup down event notification +{ + UnRegisterEventNotification(eh); // cleanup down event notification NX_UNWRAP_INTERFACE(ref); - /* Deregister NSS volume deactivation event */ - NX_UNWRAP_INTERFACE(refneb); + /* Deregister NSS volume deactivation event */ + NX_UNWRAP_INTERFACE(refneb); if (neb_consumer_id) UnRegisterConsumer(neb_consumer_id, NULL); } @@ -1726,7 +1840,7 @@ static void registerwithneb() { ConsumerRegistrationInfo reg_info; - + /* Clear NEB registration structure */ bzero((char*) ®_info, sizeof(struct ConsumerRegistrationInfo)); @@ -1742,7 +1856,7 @@ static void registerwithneb() reg_info.CRIOwnerID= (LoadDefinitionStructure *)getnlmhandle(); reg_info.CRIConsumerESR= NULL; // No consumer ESR required reg_info.CRISecurityToken= 0; // No security token for the event - reg_info.CRIConsumerFlags= 0; // SMP_ENABLED_BIT; + reg_info.CRIConsumerFlags= 0; // SMP_ENABLED_BIT; reg_info.CRIFilterName= 0; // No event filtering reg_info.CRIFilterDataLength= 0; // No filtering data reg_info.CRIFilterData= 0; // No filtering data @@ -1767,7 +1881,7 @@ static void registerwithneb() Get the NSS volume ID of the MySQL Data volume. Volume ID is stored in a global variable */ - getvolumeID((BYTE*) datavolname); + getvolumeID((BYTE*) datavolname); } @@ -1798,7 +1912,6 @@ ulong neb_event_callback(struct EventBlock *eblock) nw_panic = TRUE; event_flag= TRUE; kill_server(0); - } } return 0; @@ -1833,7 +1946,7 @@ static void getvolumeID(BYTE *volumeName) strxmov(path, (const char *) ADMIN_VOL_PATH, (const char *) volumeName, NullS); - if ((status= zOpen(rootKey, zNSS_TASK, zNSPACE_LONG|zMODE_UTF8, + if ((status= zOpen(rootKey, zNSS_TASK, zNSPACE_LONG|zMODE_UTF8, (BYTE *) path, zRR_READ_ACCESS, &fileKey)) != zOK) { consoleprintf("\nGetNSSVolumeProperties - Failed to get file, status: %d\n.", (int) status); @@ -1841,7 +1954,7 @@ static void getvolumeID(BYTE *volumeName) } getInfoMask= zGET_IDS | zGET_VOLUME_INFO ; - if ((status= zGetInfo(fileKey, getInfoMask, sizeof(info), + if ((status= zGetInfo(fileKey, getInfoMask, sizeof(info), zINFO_VERSION_A, &info)) != zOK) { consoleprintf("\nGetNSSVolumeProperties - Failed in zGetInfo, status: %d\n.", (int) status); @@ -1856,7 +1969,7 @@ static void getvolumeID(BYTE *volumeName) datavolid.clockSeqLow= info.vol.volumeID.clockSeqLow; /* This is guranteed to be 6-byte length (but sizeof() would be better) */ memcpy(datavolid.node, info.vol.volumeID.node, (unsigned int) 6); - + exit: if (rootKey) zClose(rootKey); @@ -1875,6 +1988,7 @@ static void init_signals(void) } + static void start_signal_handler(void) { // Save vm id of this process @@ -1899,7 +2013,8 @@ static void check_data_home(const char *path) static void sig_reload(int signo) { // Flush everything - reload_acl_and_cache((THD*) 0,REFRESH_LOG, (TABLE_LIST*) 0, NULL); + bool not_used; + reload_acl_and_cache((THD*) 0,REFRESH_LOG, (TABLE_LIST*) 0, ¬_used); signal(signo, SIG_ACK); } @@ -1925,6 +2040,7 @@ static void init_signals(void) signal_thread = pthread_self(); } + static void start_signal_handler(void) {} @@ -1972,14 +2088,14 @@ or misconfigured. This error can also be caused by malfunctioning hardware.\n", We will try our best to scrape up some info that will hopefully help diagnose\n\ the problem, but since we have already crashed, something is definitely wrong\n\ and this may fail.\n\n"); - fprintf(stderr, "key_buffer_size=%lu\n", + fprintf(stderr, "key_buffer_size=%lu\n", (ulong) dflt_key_cache->key_cache_mem_size); - fprintf(stderr, "read_buffer_size=%ld\n", global_system_variables.read_buff_size); - fprintf(stderr, "max_used_connections=%ld\n", max_used_connections); - fprintf(stderr, "max_connections=%ld\n", max_connections); - fprintf(stderr, "threads_connected=%d\n", thread_count); + fprintf(stderr, "read_buffer_size=%ld\n", (long) global_system_variables.read_buff_size); + fprintf(stderr, "max_used_connections=%lu\n", max_used_connections); + fprintf(stderr, "max_connections=%lu\n", max_connections); + fprintf(stderr, "threads_connected=%u\n", thread_count); fprintf(stderr, "It is possible that mysqld could use up to \n\ -key_buffer_size + (read_buffer_size + sort_buffer_size)*max_connections = %ld K\n\ +key_buffer_size + (read_buffer_size + sort_buffer_size)*max_connections = %lu K\n\ bytes of memory\n", ((ulong) dflt_key_cache->key_cache_mem_size + (global_system_variables.read_buff_size + global_system_variables.sortbuff_size) * @@ -2010,7 +2126,7 @@ the thread stack. Please read http://www.mysql.com/doc/en/Linux.html\n\n", fprintf(stderr, "Trying to get some variables.\n\ Some pointers may be invalid and cause the dump to abort...\n"); safe_print_str("thd->query", thd->query, 1024); - fprintf(stderr, "thd->thread_id=%ld\n", thd->thread_id); + fprintf(stderr, "thd->thread_id=%lu\n", (ulong) thd->thread_id); } fprintf(stderr, "\ The manual page at http://www.mysql.com/doc/en/Crashing.html contains\n\ @@ -2056,6 +2172,8 @@ bugs.\n"); #define SA_NODEFER 0 #endif +#ifndef EMBEDDED_LIBRARY + static void init_signals(void) { sigset_t set; @@ -2134,7 +2252,6 @@ static void init_signals(void) } -#ifndef EMBEDDED_LIBRARY static void start_signal_handler(void) { int error; @@ -2147,8 +2264,16 @@ static void start_signal_handler(void) (void) pthread_attr_setdetachstate(&thr_attr,PTHREAD_CREATE_DETACHED); if (!(opt_specialflag & SPECIAL_NO_PRIOR)) my_pthread_attr_setprio(&thr_attr,INTERRUPT_PRIOR); +#if defined(__ia64__) || defined(__ia64) + /* + Peculiar things with ia64 platforms - it seems we only have half the + stack size in reality, so we have to double it here + */ + pthread_attr_setstacksize(&thr_attr,thread_stack*2); +#else pthread_attr_setstacksize(&thr_attr,thread_stack); #endif +#endif (void) pthread_mutex_lock(&LOCK_thread_count); if ((error=pthread_create(&signal_thread,&thr_attr,signal_hand,0))) @@ -2168,7 +2293,7 @@ static void start_signal_handler(void) /* This threads handles all signals and alarms */ /* ARGSUSED */ -extern "C" void *signal_hand(void *arg __attribute__((unused))) +pthread_handler_t signal_hand(void *arg __attribute__((unused))) { sigset_t set; int sig; @@ -2257,7 +2382,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) if (!(opt_specialflag & SPECIAL_NO_PRIOR)) my_pthread_attr_setprio(&connection_attrib,INTERRUPT_PRIOR); if (pthread_create(&tmp,&connection_attrib, kill_server_thread, - (void*) sig)) + (void*) &sig)) sql_print_error("Can't create thread to kill server"); #else kill_server((void*) sig); // MIT THREAD has a alarm thread @@ -2267,12 +2392,13 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) case SIGHUP: if (!abort_loop) { - mysql_print_status((THD*) 0); // Print some debug info + bool not_used; + mysql_print_status(); // Print some debug info reload_acl_and_cache((THD*) 0, (REFRESH_LOG | REFRESH_TABLES | REFRESH_FAST | - REFRESH_STATUS | REFRESH_GRANT | + REFRESH_GRANT | REFRESH_THREADS | REFRESH_HOSTS), - (TABLE_LIST*) 0, NULL); // Flush logs + (TABLE_LIST*) 0, ¬_used); // Flush logs } break; #ifdef USE_ONE_SIGNAL_HAND @@ -2289,29 +2415,52 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) } return(0); /* purecov: deadcode */ } -#endif /*!EMBEDDED_LIBRARY*/ static void check_data_home(const char *path) {} +#endif /*!EMBEDDED_LIBRARY*/ #endif /* __WIN__*/ /* - All global error messages are sent here where the first one is stored for - the client + All global error messages are sent here where the first one is stored + for the client */ /* ARGSUSED */ -extern "C" int my_message_sql(uint error, const char *str, myf MyFlags) +static int my_message_sql(uint error, const char *str, myf MyFlags) { THD *thd; DBUG_ENTER("my_message_sql"); - DBUG_PRINT("error", ("Message: '%s'", str)); + DBUG_PRINT("error", ("error: %u message: '%s'", error, str)); + /* + Put here following assertion when situation with EE_* error codes + will be fixed + DBUG_ASSERT(error != 0); + */ if ((thd= current_thd)) { /* + TODO: There are two exceptions mechanism (THD and sp_rcontext), + this could be improved by having a common stack of handlers. + */ + if (thd->handle_error(error, + MYSQL_ERROR::WARN_LEVEL_ERROR)) + DBUG_RETURN(0); + + if (thd->spcont && + thd->spcont->handle_error(error, MYSQL_ERROR::WARN_LEVEL_ERROR, thd)) + { + DBUG_RETURN(0); + } + + thd->query_error= 1; // needed to catch query errors during replication + + if (!thd->no_warnings_for_error) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, str); + /* thd->lex->current_select == 0 if lex structure is not inited (not query command (COM_QUERY)) */ @@ -2322,13 +2471,12 @@ extern "C" int my_message_sql(uint error, const char *str, myf MyFlags) (thd->lex->current_select ? thd->lex->current_select->no_error : 0), (int) thd->is_fatal_error)); - - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, error, str); } else { NET *net= &thd->net; net->report_error= 1; + query_cache_abort(net); if (!net->last_error[0]) // Return only first message { strmake(net->last_error, str, sizeof(net->last_error)-1); @@ -2341,6 +2489,21 @@ extern "C" int my_message_sql(uint error, const char *str, myf MyFlags) DBUG_RETURN(0); } + +#ifndef EMBEDDED_LIBRARY +static void *my_str_malloc_mysqld(size_t size) +{ + return my_malloc(size, MYF(MY_FAE)); +} + + +static void my_str_free_mysqld(void *ptr) +{ + my_free((gptr)ptr, MYF(MY_FAE)); +} +#endif /* EMBEDDED_LIBRARY */ + + #ifdef __WIN__ struct utsname @@ -2355,7 +2518,7 @@ int uname(struct utsname *a) } -extern "C" pthread_handler_decl(handle_shutdown,arg) +pthread_handler_t handle_shutdown(void *arg) { MSG msg; my_thread_init(); @@ -2384,15 +2547,15 @@ int STDCALL handle_kill(ulong ctrl_type) #ifdef OS2 -extern "C" pthread_handler_decl(handle_shutdown,arg) +pthread_handler_t handle_shutdown(void *arg) { my_thread_init(); // wait semaphore - pthread_cond_wait( &eventShutdown, NULL); + pthread_cond_wait(&eventShutdown, NULL); // close semaphore and kill server - pthread_cond_destroy( &eventShutdown); + pthread_cond_destroy(&eventShutdown); /* Exit main loop on main thread, so kill will be done from @@ -2409,43 +2572,16 @@ extern "C" pthread_handler_decl(handle_shutdown,arg) #endif -const char *load_default_groups[]= { +static const char *load_default_groups[]= { #ifdef HAVE_NDBCLUSTER_DB "mysql_cluster", #endif -"mysqld","server",MYSQL_BASE_VERSION,0,0}; +"mysqld","server", MYSQL_BASE_VERSION, 0, 0}; + +#if defined(__WIN__) && !defined(EMBEDDED_LIBRARY) static const int load_default_groups_sz= sizeof(load_default_groups)/sizeof(load_default_groups[0]); - -bool open_log(MYSQL_LOG *log, const char *hostname, - const char *opt_name, const char *extension, - const char *index_file_name, - enum_log_type type, bool read_append, - bool no_auto_events, ulong max_size) -{ - char tmp[FN_REFLEN]; - if (!opt_name || !opt_name[0]) - { - /* - TODO: The following should be using fn_format(); We just need to - first change fn_format() to cut the file name if it's too long. - */ - strmake(tmp,hostname,FN_REFLEN-5); - strmov(fn_ext(tmp),extension); - opt_name=tmp; - } - // get rid of extension if the log is binary to avoid problems - if (type == LOG_BIN) - { - char *p = fn_ext(opt_name); - uint length=(uint) (p-opt_name); - strmake(tmp,opt_name,min(length,FN_REFLEN)); - opt_name=tmp; - } - return log->open(opt_name, type, 0, index_file_name, - (read_append) ? SEQ_READ_APPEND : WRITE_CACHE, - no_auto_events, max_size); -} +#endif /* @@ -2455,7 +2591,7 @@ bool open_log(MYSQL_LOG *log, const char *hostname, init_global_datetime_format() format_type What kind of format should be supported var_ptr Pointer to variable that should be updated - + NOTES The default value is taken from either opt_date_time_formats[] or the ISO format (ANSI SQL) @@ -2465,8 +2601,8 @@ bool open_log(MYSQL_LOG *log, const char *hostname, 1 error */ -bool init_global_datetime_format(timestamp_type format_type, - DATE_TIME_FORMAT **var_ptr) +static bool init_global_datetime_format(timestamp_type format_type, + DATE_TIME_FORMAT **var_ptr) { /* Get command line option */ const char *str= opt_date_time_formats[format_type]; @@ -2494,10 +2630,11 @@ static int init_common_variables(const char *conf_file_name, int argc, char **argv, const char **groups) { umask(((~my_umask) & 0666)); + my_decimal_set_zero(&decimal_zero); // set decimal_zero constant; tzset(); // Set tzname max_system_variables.pseudo_thread_id= (ulong)~0; - start_time=time((time_t*) 0); + server_start_time= time((time_t*) 0); if (init_thread_environment()) return 1; mysql_init_variables(); @@ -2513,20 +2650,20 @@ static int init_common_variables(const char *conf_file_name, int argc, #ifdef HAVE_TZNAME { struct tm tm_tmp; - localtime_r(&start_time,&tm_tmp); + localtime_r(&server_start_time,&tm_tmp); strmake(system_time_zone, tzname[tm_tmp.tm_isdst != 0 ? 1 : 0], sizeof(system_time_zone)-1); } #endif /* - We set SYSTEM time zone as reasonable default and + We set SYSTEM time zone as reasonable default and also for failure of my_tz_init() and bootstrap mode. If user explicitly set time zone with --default-time-zone option we will change this value in my_tz_init(). */ global_system_variables.time_zone= my_tz_SYSTEM; - + /* Init mutexes for the global MYSQL_LOG objects. As safe_mutex depends on what MY_INIT() does, we can't init the mutexes of @@ -2534,13 +2671,18 @@ static int init_common_variables(const char *conf_file_name, int argc, before MY_INIT(). So we do it here. */ mysql_log.init_pthread_objects(); - mysql_update_log.init_pthread_objects(); mysql_slow_log.init_pthread_objects(); mysql_bin_log.init_pthread_objects(); - - if (gethostname(glob_hostname,sizeof(glob_hostname)-4) < 0) - strmov(glob_hostname,"mysql"); - strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5); + + if (gethostname(glob_hostname,sizeof(glob_hostname)) < 0) + { + strmake(glob_hostname, STRING_WITH_LEN("localhost")); + sql_print_warning("gethostname failed, using '%s' as hostname", + glob_hostname); + strmake(pidfile_name, STRING_WITH_LEN("mysql")); + } + else + strmake(pidfile_name, glob_hostname, sizeof(pidfile_name)-5); strmov(fn_ext(pidfile_name),".pid"); // Add proper extension load_defaults(conf_file_name, groups, &argc, &argv); @@ -2551,6 +2693,19 @@ static int init_common_variables(const char *conf_file_name, int argc, DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname, server_version, SYSTEM_TYPE,MACHINE_TYPE)); +#ifdef HAVE_LARGE_PAGES + /* Initialize large page size */ + if (opt_large_pages && (opt_large_page_size= my_get_large_page_size())) + { + my_use_large_pages= 1; + my_large_page_size= opt_large_page_size; +#ifdef HAVE_INNOBASE_DB + innobase_use_large_pages= 1; + innobase_large_page_size= opt_large_page_size; +#endif + } +#endif /* HAVE_LARGE_PAGES */ + /* connections and databases needs lots of files */ { uint files, wanted_files, max_open_files; @@ -2613,10 +2768,33 @@ static int init_common_variables(const char *conf_file_name, int argc, #ifdef USE_REGEX my_regex_init(&my_charset_latin1); #endif - if (!(default_charset_info= get_charset_by_csname(default_character_set_name, - MY_CS_PRIMARY, - MYF(MY_WME)))) - return 1; + /* + Process a comma-separated character set list and choose + the first available character set. This is mostly for + test purposes, to be able to start "mysqld" even if + the requested character set is not available (see bug#18743). + */ + for (;;) + { + char *next_character_set_name= strchr(default_character_set_name, ','); + if (next_character_set_name) + *next_character_set_name++= '\0'; + if (!(default_charset_info= + get_charset_by_csname(default_character_set_name, + MY_CS_PRIMARY, MYF(MY_WME)))) + { + if (next_character_set_name) + { + default_character_set_name= next_character_set_name; + default_collation_name= 0; // Ignore collation + } + else + return 1; // Eof of the list + } + else + break; + } + if (default_collation_name) { CHARSET_INFO *default_collation; @@ -2643,6 +2821,12 @@ static int init_common_variables(const char *conf_file_name, int argc, global_system_variables.character_set_client= default_charset_info; global_system_variables.collation_connection= default_charset_info; + if (!(character_set_filesystem= + get_charset_by_csname(character_set_filesystem_name, + MY_CS_PRIMARY, MYF(MY_WME)))) + return 1; + global_system_variables.character_set_filesystem= character_set_filesystem; + sys_init_connect.value_length= 0; if ((sys_init_connect.value= opt_init_connect)) sys_init_connect.value_length= strlen(opt_init_connect); @@ -2666,9 +2850,8 @@ static int init_common_variables(const char *conf_file_name, int argc, get corrupted if accesses with names of different case. */ DBUG_PRINT("info", ("lower_case_table_names: %d", lower_case_table_names)); - if (!lower_case_table_names && - (lower_case_file_system= - (test_if_case_insensitive(mysql_real_data_home) == 1))) + lower_case_file_system= test_if_case_insensitive(mysql_real_data_home); + if (!lower_case_table_names && lower_case_file_system == 1) { if (lower_case_table_names_used) { @@ -2732,16 +2915,29 @@ static int init_thread_environment() (void) pthread_mutex_init(&LOCK_user_conn, MY_MUTEX_INIT_FAST); (void) pthread_mutex_init(&LOCK_active_mi, MY_MUTEX_INIT_FAST); (void) pthread_mutex_init(&LOCK_global_system_variables, MY_MUTEX_INIT_FAST); + (void) pthread_mutex_init(&LOCK_global_read_lock, MY_MUTEX_INIT_FAST); (void) pthread_mutex_init(&LOCK_prepared_stmt_count, MY_MUTEX_INIT_FAST); (void) pthread_mutex_init(&LOCK_uuid_generator, MY_MUTEX_INIT_FAST); #ifdef HAVE_OPENSSL (void) pthread_mutex_init(&LOCK_des_key_file,MY_MUTEX_INIT_FAST); +#ifndef HAVE_YASSL + openssl_stdlocks= (openssl_lock_t*) OPENSSL_malloc(CRYPTO_num_locks() * + sizeof(openssl_lock_t)); + for (int i= 0; i < CRYPTO_num_locks(); ++i) + (void) my_rwlock_init(&openssl_stdlocks[i].lock, NULL); + CRYPTO_set_dynlock_create_callback(openssl_dynlock_create); + CRYPTO_set_dynlock_destroy_callback(openssl_dynlock_destroy); + CRYPTO_set_dynlock_lock_callback(openssl_lock); + CRYPTO_set_locking_callback(openssl_lock_function); + CRYPTO_set_id_callback(openssl_id_function); +#endif #endif (void) my_rwlock_init(&LOCK_sys_init_connect, NULL); (void) my_rwlock_init(&LOCK_sys_init_slave, NULL); (void) my_rwlock_init(&LOCK_grant, NULL); (void) pthread_cond_init(&COND_thread_count,NULL); (void) pthread_cond_init(&COND_refresh,NULL); + (void) pthread_cond_init(&COND_global_read_lock,NULL); (void) pthread_cond_init(&COND_thread_cache,NULL); (void) pthread_cond_init(&COND_flush_thread_cache,NULL); (void) pthread_cond_init(&COND_manager,NULL); @@ -2749,6 +2945,7 @@ static int init_thread_environment() (void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST); (void) pthread_cond_init(&COND_rpl_status, NULL); #endif + sp_cache_init(); /* Parameter for threads created for connections */ (void) pthread_attr_init(&connection_attrib); (void) pthread_attr_setdetachstate(&connection_attrib, @@ -2763,37 +2960,26 @@ static int init_thread_environment() sql_print_error("Can't create thread-keys"); return 1; } -#ifdef HAVE_OPENSSL - openssl_stdlocks= (openssl_lock_t*) OPENSSL_malloc(CRYPTO_num_locks() * - sizeof(openssl_lock_t)); - for (int i= 0; i < CRYPTO_num_locks(); ++i) - (void) my_rwlock_init(&openssl_stdlocks[i].lock, NULL); - CRYPTO_set_dynlock_create_callback(openssl_dynlock_create); - CRYPTO_set_dynlock_destroy_callback(openssl_dynlock_destroy); - CRYPTO_set_dynlock_lock_callback(openssl_lock); - CRYPTO_set_locking_callback(openssl_lock_function); - CRYPTO_set_id_callback(openssl_id_function); -#endif return 0; } -#ifdef HAVE_OPENSSL +#if defined(HAVE_OPENSSL) && !defined(HAVE_YASSL) static unsigned long openssl_id_function() -{ +{ return (unsigned long) pthread_self(); -} +} static openssl_lock_t *openssl_dynlock_create(const char *file, int line) -{ +{ openssl_lock_t *lock= new openssl_lock_t; my_rwlock_init(&lock->lock, NULL); return lock; } -static void openssl_dynlock_destroy(openssl_lock_t *lock, const char *file, +static void openssl_dynlock_destroy(openssl_lock_t *lock, const char *file, int line) { rwlock_destroy(&lock->lock); @@ -2813,7 +2999,7 @@ static void openssl_lock_function(int mode, int n, const char *file, int line) } -static void openssl_lock(int mode, openssl_lock_t *lock, const char *file, +static void openssl_lock(int mode, openssl_lock_t *lock, const char *file, int line) { int err; @@ -2847,6 +3033,8 @@ static void openssl_lock(int mode, openssl_lock_t *lock, const char *file, #endif /* HAVE_OPENSSL */ +#ifndef EMBEDDED_LIBRARY + static void init_ssl() { #ifdef HAVE_OPENSSL @@ -2856,15 +3044,23 @@ static void init_ssl() ssl_acceptor_fd= new_VioSSLAcceptorFd(opt_ssl_key, opt_ssl_cert, opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher); - DBUG_PRINT("info",("ssl_acceptor_fd: %lx", (long) ssl_acceptor_fd)); + DBUG_PRINT("info",("ssl_acceptor_fd: 0x%lx", (long) ssl_acceptor_fd)); if (!ssl_acceptor_fd) + { opt_use_ssl = 0; + have_ssl= SHOW_OPTION_DISABLED; + } + } + else + { + have_ssl= SHOW_OPTION_DISABLED; } if (des_key_file) load_des_key_file(des_key_file); #endif /* HAVE_OPENSSL */ } +#endif /* EMBEDDED_LIBRARY */ static int init_server_components() { @@ -2876,7 +3072,7 @@ static int init_server_components() query_cache_set_min_res_unit(query_cache_min_res_unit); query_cache_init(); query_cache_resize(query_cache_size); - randominit(&sql_rand,(ulong) start_time,(ulong) start_time/2); + randominit(&sql_rand,(ulong) server_start_time,(ulong) server_start_time/2); reset_floating_point_exceptions(); init_thr_lock(); #ifdef HAVE_REPLICATION @@ -2884,41 +3080,69 @@ static int init_server_components() #endif /* Setup log files */ if (opt_log) - open_log(&mysql_log, glob_hostname, opt_logname, ".log", NullS, - LOG_NORMAL, 0, 0, 0); + mysql_log.open_query_log(opt_logname); if (opt_update_log) { - open_log(&mysql_update_log, glob_hostname, opt_update_logname, "", - NullS, LOG_NEW, 0, 0, 0); - using_update_log=1; - } - if (opt_slow_log) - open_log(&mysql_slow_log, glob_hostname, opt_slow_logname, "-slow.log", - NullS, LOG_NORMAL, 0, 0, 0); - - if (opt_bin_log) - { - /* If we fail to open binlog, it's going to hinder our recovery, so die */ - if (open_log(&mysql_bin_log, glob_hostname, opt_bin_logname, "-bin", - opt_binlog_index_name, LOG_BIN, 0, 0, max_binlog_size)) - unireg_abort(1); - using_update_log=1; -#ifdef HAVE_REPLICATION - if (expire_logs_days) + /* + Update log is removed since 5.0. But we still accept the option. + The idea is if the user already uses the binlog and the update log, + we completely ignore any option/variable related to the update log, like + if the update log did not exist. But if the user uses only the update + log, then we translate everything into binlog for him (with warnings). + Implementation of the above : + - If mysqld is started with --log-update and --log-bin, + ignore --log-update (print a warning), push a warning when SQL_LOG_UPDATE + is used, and turn off --sql-bin-update-same. + This will completely ignore SQL_LOG_UPDATE + - If mysqld is started with --log-update only, + change it to --log-bin (with the filename passed to log-update, + plus '-bin') (print a warning), push a warning when SQL_LOG_UPDATE is + used, and turn on --sql-bin-update-same. + This will translate SQL_LOG_UPDATE to SQL_LOG_BIN. + + Note that we tell the user that --sql-bin-update-same is deprecated and + does nothing, and we don't take into account if he used this option or + not; but internally we give this variable a value to have the behaviour + we want (i.e. have SQL_LOG_UPDATE influence SQL_LOG_BIN or not). + As sql-bin-update-same, log-update and log-bin cannot be changed by the + user after starting the server (they are not variables), the user will + not later interfere with the settings we do here. + */ + if (opt_bin_log) { - long purge_time= time(0) - expire_logs_days*24*60*60; - if (purge_time >= 0) - mysql_bin_log.purge_logs_before_date(purge_time); + opt_sql_bin_update= 0; + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log."); + } + else + { + opt_sql_bin_update= 1; + opt_bin_log= 1; + if (opt_update_logname) + { + /* as opt_bin_log==0, no need to free opt_bin_logname */ + if (!(opt_bin_logname= my_strdup(opt_update_logname, MYF(MY_WME)))) + exit(EXIT_OUT_OF_MEMORY); + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log. Now starting MySQL \ +with --log-bin='%s' instead.",opt_bin_logname); + } + else + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log. Now starting MySQL \ +with --log-bin instead."); } -#endif } - else if (opt_log_slave_updates) + if (opt_log_slave_updates && !opt_bin_log) { - sql_print_warning("\ -you need to use --log-bin to make --log-slave-updates work. \ -Now disabling --log-slave-updates."); + sql_print_warning("You need to use --log-bin to make " + "--log-slave-updates work."); + unireg_abort(1); } + if (opt_slow_log) + mysql_slow_log.open_slow_log(opt_slow_logname); + #ifdef HAVE_REPLICATION if (opt_log_slave_updates && replicate_same_server_id) { @@ -2933,7 +3157,7 @@ server."); if (opt_error_log) { if (!log_error_file_ptr[0]) - fn_format(log_error_file, glob_hostname, mysql_data_home, ".err", + fn_format(log_error_file, pidfile_name, mysql_data_home, ".err", MY_REPLACE_EXT); /* replace '.<domain>' by '.err', bug#4997 */ else fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err", @@ -2949,60 +3173,101 @@ server."); } } - if (opt_innodb_safe_binlog) + if (opt_bin_log) { - if (have_innodb != SHOW_OPTION_YES) - sql_print_warning("--innodb-safe-binlog is meaningful only if " - "the InnoDB storage engine is enabled in the server."); -#ifdef HAVE_INNOBASE_DB - if (innobase_flush_log_at_trx_commit != 1) - { - sql_print_warning("--innodb-safe-binlog is meaningful only if " - "innodb_flush_log_at_trx_commit is 1; now setting it " - "to 1."); - innobase_flush_log_at_trx_commit= 1; - } - if (innobase_unix_file_flush_method) + char buf[FN_REFLEN]; + const char *ln; + ln= mysql_bin_log.generate_name(opt_bin_logname, "-bin", 1, buf); + if (!opt_bin_logname && !opt_binlog_index_name) { /* - This option has so many values that it's hard to know which value is - good (especially "littlesync", and on Windows... see - srv/srv0start.c). + User didn't give us info to name the binlog index file. + Picking `hostname`-bin.index like did in 4.x, causes replication to + fail if the hostname is changed later. So, we would like to instead + require a name. But as we don't want to break many existing setups, we + only give warning, not error. */ - sql_print_warning("--innodb-safe-binlog requires that " - "the innodb_flush_method actually synchronizes the " - "InnoDB log to disk; it is your responsibility " - "to verify that the method you chose does it."); + sql_print_warning("No argument was provided to --log-bin, and " + "--log-bin-index was not used; so replication " + "may break when this MySQL server acts as a " + "master and has his hostname changed!! Please " + "use '--log-bin=%s' to avoid this problem.", ln); } - if (sync_binlog_period != 1) + if (ln == buf) { - sql_print_warning("--innodb-safe-binlog is meaningful only if " - "the global sync_binlog variable is 1; now setting it " - "to 1."); - sync_binlog_period= 1; + my_free(opt_bin_logname, MYF(MY_ALLOW_ZERO_PTR)); + opt_bin_logname=my_strdup(buf, MYF(0)); } -#endif + if (mysql_bin_log.open_index_file(opt_binlog_index_name, ln)) + { + unireg_abort(1); + } + + /* + Used to specify which type of lock we need to use for queries of type + INSERT ... SELECT. This will change when we have row level logging. + */ + using_update_log=1; } + if (xid_cache_init()) + { + sql_print_error("Out of memory"); + unireg_abort(1); + } if (ha_init()) { sql_print_error("Can't init databases"); unireg_abort(1); } - if (opt_myisam_log) - (void) mi_log(1); /* - Now that InnoDB is initialized, we can know the last good binlog position - and cut the binlog if needed. This function does nothing if there was no - crash recovery by InnoDB. + Check that the default storage engine is actually available. */ - if (opt_innodb_safe_binlog) + if (!ha_storage_engine_is_enabled((enum db_type) + global_system_variables.table_type)) + { + if (!opt_bootstrap) + { + sql_print_error("Default storage engine (%s) is not available", + ha_get_storage_engine((enum db_type) + global_system_variables.table_type)); + unireg_abort(1); + } + global_system_variables.table_type= DB_TYPE_MYISAM; + } + + tc_log= (total_ha_2pc > 1 ? (opt_bin_log ? + (TC_LOG *) &mysql_bin_log : + (TC_LOG *) &tc_log_mmap) : + (TC_LOG *) &tc_log_dummy); + + if (tc_log->open(opt_bin_log ? opt_bin_logname : opt_tc_log_file)) { - /* not fatal if fails (but print errors) */ - mysql_bin_log.cut_spurious_tail(); + sql_print_error("Can't init tc log"); + unireg_abort(1); } - mysql_bin_log.report_pos_in_innodb(); + + if (ha_recover(0)) + { + unireg_abort(1); + } + + if (opt_bin_log && mysql_bin_log.open(opt_bin_logname, LOG_BIN, 0, + WRITE_CACHE, 0, max_binlog_size, 0)) + unireg_abort(1); + +#ifdef HAVE_REPLICATION + if (opt_bin_log && expire_logs_days) + { + long purge_time= (long) (time(0) - expire_logs_days*24*60*60); + if (purge_time >= 0) + mysql_bin_log.purge_logs_before_date(purge_time); + } +#endif + + if (opt_myisam_log) + (void) mi_log(1); /* call ha_init_key_cache() on all key caches to init them */ process_key_caches(&ha_init_key_cache); @@ -3036,6 +3301,7 @@ server."); } +#ifndef EMBEDDED_LIBRARY static void create_maintenance_thread() { if ( @@ -3053,7 +3319,6 @@ static void create_maintenance_thread() static void create_shutdown_thread() { -#if !defined(EMBEDDED_LIBRARY) #ifdef __WIN__ hEventShutdown=CreateEvent(0, FALSE, FALSE, shutdown_event_name); pthread_t hThread; @@ -3069,9 +3334,9 @@ static void create_shutdown_thread() if (pthread_create(&hThread,&connection_attrib,handle_shutdown,0)) sql_print_warning("Can't create thread to handle shutdown requests"); #endif -#endif // EMBEDDED_LIBRARY } +#endif /* EMBEDDED_LIBRARY */ #if defined(__NT__) || defined(HAVE_SMEM) static void handle_connections_methods() @@ -3124,7 +3389,7 @@ static void handle_connections_methods() handler_count--; } } -#endif +#endif while (handler_count > 0) pthread_cond_wait(&COND_handler_count,&LOCK_thread_count); @@ -3151,11 +3416,11 @@ int win_main(int argc, char **argv) int main(int argc, char **argv) #endif { + MY_INIT(argv[0]); // init my_sys library & pthreads + /* ^^^ Nothing should be before this line! */ DEBUGGER_OFF; - MY_INIT(argv[0]); // init my_sys library & pthreads - /* Set signal used to kill MySQL */ #if defined(SIGUSR2) thr_kill_signal= thd_lib_detected == THD_LIB_LT ? SIGINT : SIGUSR2; @@ -3167,10 +3432,26 @@ int main(int argc, char **argv) if (_cust_check_startup()) { / * _cust_check_startup will report startup failure error * / - exit( 1 ); + exit(1); } #endif +#ifdef __WIN__ + /* + Before performing any socket operation (like retrieving hostname + in init_common_variables we have to call WSAStartup + */ + { + WSADATA WsaData; + if (SOCKET_ERROR == WSAStartup (0x0101, &WsaData)) + { + /* errors are not read yet, so we use english text here */ + my_message(ER_WSAS_FAILED, "WSAStartup Failed", MYF(0)); + unireg_abort(1); + } + } +#endif /* __WIN__ */ + if (init_common_variables(MYSQL_CONFIG_NAME, argc, argv, load_default_groups)) unireg_abort(1); // Will do exit @@ -3178,30 +3459,43 @@ int main(int argc, char **argv) init_signals(); if (!(opt_specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),CONNECT_PRIOR); +#if defined(__ia64__) || defined(__ia64) + /* + Peculiar things with ia64 platforms - it seems we only have half the + stack size in reality, so we have to double it here + */ + pthread_attr_setstacksize(&connection_attrib,thread_stack*2); +#else pthread_attr_setstacksize(&connection_attrib,thread_stack); +#endif #ifdef HAVE_PTHREAD_ATTR_GETSTACKSIZE { /* Retrieve used stack size; Needed for checking stack overflows */ size_t stack_size= 0; pthread_attr_getstacksize(&connection_attrib, &stack_size); +#if defined(__ia64__) || defined(__ia64) + stack_size/= 2; +#endif /* We must check if stack_size = 0 as Solaris 2.9 can return 0 here */ if (stack_size && stack_size < thread_stack) { if (global_system_variables.log_warnings) - sql_print_warning("Asked for %ld thread stack, but got %ld", - thread_stack, stack_size); + sql_print_warning("Asked for %lu thread stack, but got %ld", + thread_stack, (long) stack_size); +#if defined(__ia64__) || defined(__ia64) + thread_stack= stack_size*2; +#else thread_stack= stack_size; +#endif } } #endif #ifdef __NETWARE__ /* Increasing stacksize of threads on NetWare */ - + pthread_attr_setstacksize(&connection_attrib, NW_THD_STACKSIZE); #endif - thread_stack_min=thread_stack - STACK_MIN_SIZE; - (void) thr_setconcurrency(concurrency); // 10 by default select_thread=pthread_self(); @@ -3224,7 +3518,17 @@ int main(int argc, char **argv) mysql_data_home= mysql_data_home_buff; mysql_data_home[0]=FN_CURLIB; // all paths are relative from here mysql_data_home[1]=0; - server_init(); + + if ((user_info= check_user(mysqld_user))) + { +#if defined(HAVE_MLOCKALL) && defined(MCL_CURRENT) + if (locked_in_memory) // getuid() == 0 here + set_effective_user(user_info); + else +#endif + set_user(mysqld_user, user_info); + } + if (opt_bin_log && !server_id) { @@ -3249,6 +3553,8 @@ we force server id to 2, but this MySQL server will not act as a slave."); if (init_server_components()) exit(1); + network_init(); + #ifdef __WIN__ if (!opt_console) { @@ -3259,10 +3565,16 @@ we force server id to 2, but this MySQL server will not act as a slave."); #endif /* + Initialize my_str_malloc() and my_str_free() + */ + my_str_malloc= &my_str_malloc_mysqld; + my_str_free= &my_str_free_mysqld; + + /* init signals & alarm After this we can't quit by a simple unireg_abort */ - error_handler_hook = my_message_sql; + error_handler_hook= my_message_sql; start_signal_handler(); // Creates pidfile if (mysql_rm_tmp_tables() || acl_init(opt_noacl) || my_tz_init((THD *)0, default_tz_name, opt_bootstrap)) @@ -3272,7 +3584,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); #ifndef __NETWARE__ (void) pthread_kill(signal_thread, MYSQL_KILL_SIGNAL); #endif /* __NETWARE__ */ - + if (!opt_bootstrap) (void) my_delete(pidfile_name,MYF(MY_WME)); // Not needed anymore @@ -3304,9 +3616,9 @@ we force server id to 2, but this MySQL server will not act as a slave."); if (opt_bootstrap) { select_thread_in_use= 0; // Allow 'kill' to work - int error= bootstrap(stdin); + bootstrap(stdin); end_thr_alarm(1); // Don't allow alarms - unireg_abort(error ? 1 : 0); + unireg_abort(bootstrap_error ? 1 : 0); } if (opt_init_file) { @@ -3320,20 +3632,17 @@ we force server id to 2, but this MySQL server will not act as a slave."); create_shutdown_thread(); create_maintenance_thread(); - printf(ER(ER_READY),my_progname,server_version, - ((unix_sock == INVALID_SOCKET) ? (char*) "" : mysqld_unix_port), - mysqld_port, ""); - if (MYSQL_COMPILATION_COMMENT[0] != '\0') - fputs(" " MYSQL_COMPILATION_COMMENT, stdout); - - putchar('\n'); - fflush(stdout); + sql_print_information(ER(ER_STARTUP),my_progname,server_version, + ((unix_sock == INVALID_SOCKET) ? (char*) "" + : mysqld_unix_port), + mysqld_port, + MYSQL_COMPILATION_COMMENT); #if defined(__NT__) || defined(HAVE_SMEM) handle_connections_methods(); #else #ifdef __WIN__ - if ( !have_tcpip || opt_disable_networking) + if (!have_tcpip || opt_disable_networking) { sql_print_error("TCP/IP unavailable or disabled with --skip-networking; no available interfaces"); unireg_abort(1); @@ -3343,7 +3652,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); #endif /* __NT__ */ /* (void) pthread_attr_destroy(&connection_attrib); */ - + DBUG_PRINT("quit",("Exiting main thread")); #ifndef __WIN__ @@ -3379,7 +3688,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); wait_for_signal_thread_to_end(); clean_up_mutexes(); my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); - + exit(0); return(0); /* purecov: deadcode */ } @@ -3436,7 +3745,8 @@ default_service_handling(char **argv, const char *servicename, const char *displayname, const char *file_path, - const char *extra_opt) + const char *extra_opt, + const char *account_name) { char path_and_service[FN_REFLEN+FN_REFLEN+32], *pos, *end; end= path_and_service + sizeof(path_and_service)-3; @@ -3455,12 +3765,14 @@ default_service_handling(char **argv, if (Service.got_service_option(argv, "install")) { - Service.Install(1, servicename, displayname, path_and_service); + Service.Install(1, servicename, displayname, path_and_service, + account_name); return 0; } if (Service.got_service_option(argv, "install-manual")) { - Service.Install(0, servicename, displayname, path_and_service); + Service.Install(0, servicename, displayname, path_and_service, + account_name); return 0; } if (Service.got_service_option(argv, "remove")) @@ -3475,13 +3787,14 @@ default_service_handling(char **argv, int main(int argc, char **argv) { - /* When several instances are running on the same machine, we - need to have an unique named hEventShudown through the - application PID e.g.: MySQLShutdown1890; MySQLShutdown2342 - */ + /* + When several instances are running on the same machine, we + need to have an unique named hEventShudown through the + application PID e.g.: MySQLShutdown1890; MySQLShutdown2342 + */ int10_to_str((int) GetCurrentProcessId(),strmov(shutdown_event_name, - "MySQLShutdown"), 10); - + "MySQLShutdown"), 10); + /* Must be initialized early for comparison of service name */ system_charset_info= &my_charset_utf8_general_ci; @@ -3495,7 +3808,7 @@ int main(int argc, char **argv) if (argc == 2) { if (!default_service_handling(argv, MYSQL_SERVICENAME, MYSQL_SERVICENAME, - file_path, "")) + file_path, "", NULL)) return 0; if (Service.IsService(argv[1])) /* Start an optional service */ { @@ -3514,7 +3827,8 @@ int main(int argc, char **argv) } else if (argc == 3) /* install or remove any optional service */ { - if (!default_service_handling(argv, argv[2], argv[2], file_path, "")) + if (!default_service_handling(argv, argv[2], argv[2], file_path, "", + NULL)) return 0; if (Service.IsService(argv[2])) { @@ -3532,15 +3846,30 @@ int main(int argc, char **argv) return 0; } } - else if (argc == 4) + else if (argc == 4 || argc == 5) { /* - Install an optional service with optional config file - mysqld --install-manual mysqldopt --defaults-file=c:\miguel\my.ini + This may seem strange, because we handle --local-service while + preserving 4.1's behavior of allowing any one other argument that is + passed to the service on startup. (The assumption is that this is + --defaults-file=file, but that was not enforced in 4.1, so we don't + enforce it here.) */ - if (!default_service_handling(argv, argv[2], argv[2], file_path, - argv[3])) - return 0; + const char *extra_opt= NullS; + const char *account_name = NullS; + int index; + for (index = 3; index < argc; index++) + { + if (!strcmp(argv[index], "--local-service")) + account_name= "NT AUTHORITY\\LocalService"; + else + extra_opt= argv[index]; + } + + if (argc == 4 || account_name) + if (!default_service_handling(argv, argv[2], argv[2], file_path, + extra_opt, account_name)) + return 0; } else if (argc == 1 && Service.IsService(MYSQL_SERVICENAME)) { @@ -3564,16 +3893,15 @@ int main(int argc, char **argv) create MySQL privilege tables without having to start a full MySQL server. */ -static int bootstrap(FILE *file) +static void bootstrap(FILE *file) { - int error= 0; DBUG_ENTER("bootstrap"); THD *thd= new THD; thd->bootstrap=1; my_net_init(&thd->net,(st_vio*) 0); thd->max_client_packet_length= thd->net.max_packet; - thd->master_access= ~(ulong)0; + thd->security_ctx->master_access= ~(ulong)0; thd->thread_id=thread_id++; thread_count++; @@ -3583,7 +3911,8 @@ static int bootstrap(FILE *file) (void*) thd)) { sql_print_warning("Can't create thread to handle bootstrap"); - DBUG_RETURN(-1); + bootstrap_error=-1; + DBUG_VOID_RETURN; } /* Wait for thread to die */ (void) pthread_mutex_lock(&LOCK_thread_count); @@ -3598,13 +3927,7 @@ static int bootstrap(FILE *file) handle_bootstrap((void *)thd); #endif - error= thd->is_fatal_error; -#ifndef EMBEDDED_LIBRARY - net_end(&thd->net); -#endif - thd->cleanup(); - delete thd; - DBUG_RETURN(error); + DBUG_VOID_RETURN; } @@ -3615,13 +3938,32 @@ static bool read_init_file(char *file_name) DBUG_PRINT("enter",("name: %s",file_name)); if (!(file=my_fopen(file_name,O_RDONLY,MYF(MY_WME)))) return(1); - bootstrap(file); /* Ignore errors from this */ + bootstrap(file); (void) my_fclose(file,MYF(MY_WME)); return 0; } #ifndef EMBEDDED_LIBRARY +/* + Create new thread to handle incoming connection. + + SYNOPSIS + create_new_thread() + thd in/out Thread handle of future thread. + + DESCRIPTION + This function will create new thread to handle the incoming + connection. If there are idle cached threads one will be used. + 'thd' will be pushed into 'threads'. + + In single-threaded mode (#define ONE_THREAD) connection will be + handled inside this function. + + RETURN VALUE + none +*/ + static void create_new_thread(THD *thd) { NET *net=&thd->net; @@ -3644,11 +3986,12 @@ static void create_new_thread(THD *thd) thd->real_id=pthread_self(); // Keep purify happy /* Start a new thread to handle connection */ + thread_count++; + #ifdef ONE_THREAD if (test_flags & TEST_NO_THREADS) // For debugging under Linux { thread_cache_size=0; // Safety - thread_count++; threads.append(thd); thd->real_id=pthread_self(); (void) pthread_mutex_unlock(&LOCK_thread_count); @@ -3657,19 +4000,21 @@ static void create_new_thread(THD *thd) else #endif { + if (thread_count-delayed_insert_threads > max_used_connections) + max_used_connections=thread_count-delayed_insert_threads; + if (cached_thread_count > wake_thread) { - start_cached_thread(thd); + thread_cache.append(thd); + wake_thread++; + pthread_cond_signal(&COND_thread_cache); } else { int error; - thread_count++; thread_created++; threads.append(thd); - if (thread_count-delayed_insert_threads > max_used_connections) - max_used_connections=thread_count-delayed_insert_threads; - DBUG_PRINT("info",(("creating thread %d"), thd->thread_id)); + DBUG_PRINT("info",(("creating thread %lu"), thd->thread_id)); thd->connect_time = time(NULL); if ((error=pthread_create(&thd->real_id,&connection_attrib, handle_one_connection, @@ -3679,10 +4024,10 @@ static void create_new_thread(THD *thd) ("Can't create thread to handle request (error %d)", error)); thread_count--; - thd->killed=1; // Safety + thd->killed= THD::KILL_CONNECTION; // Safety (void) pthread_mutex_unlock(&LOCK_thread_count); statistic_increment(aborted_connects,&LOCK_status); - net_printf(thd,ER_CANT_CREATE_THREAD,error); + net_printf_error(thd, ER_CANT_CREATE_THREAD, error); (void) pthread_mutex_lock(&LOCK_thread_count); close_connection(thd,0,0); delete thd; @@ -3710,11 +4055,8 @@ inline void kill_broken_server() (!opt_disable_networking && ip_sock == INVALID_SOCKET)) { select_thread_in_use = 0; -#ifdef __NETWARE__ - kill_server(MYSQL_KILL_SIGNAL); /* never returns */ -#else - kill_server((void*)MYSQL_KILL_SIGNAL); /* never returns */ -#endif /* __NETWARE__ */ + /* The following call will never return */ + kill_server(IF_NETWARE(MYSQL_KILL_SIGNAL, (void*) MYSQL_KILL_SIGNAL)); } } #define MAYBE_BROKEN_SYSCALL kill_broken_server(); @@ -3725,8 +4067,7 @@ inline void kill_broken_server() /* Handle new connections and spawn new process to handle them */ #ifndef EMBEDDED_LIBRARY -extern "C" pthread_handler_decl(handle_connections_sockets, - arg __attribute__((unused))) +pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) { my_socket sock,new_sock; uint error_count=0; @@ -3812,7 +4153,7 @@ extern "C" pthread_handler_decl(handle_connections_sockets, size_socket length=sizeof(struct sockaddr_in); new_sock = accept(sock, my_reinterpret_cast(struct sockaddr *) (&cAddr), &length); -#ifdef __NETWARE__ +#ifdef __NETWARE__ // TODO: temporary fix, waiting for TCP/IP fix - DEFECT000303149 if ((new_sock == INVALID_SOCKET) && (socket_errno == EINVAL)) { @@ -3852,8 +4193,8 @@ extern "C" pthread_handler_decl(handle_connections_sockets, struct request_info req; signal(SIGCHLD, SIG_DFL); request_init(&req, RQ_DAEMON, libwrapName, RQ_FILE, new_sock, NULL); - fromhost(&req); - if (!hosts_access(&req)) + my_fromhost(&req); + if (!my_hosts_access(&req)) { /* This may be stupid but refuse() includes an exit(0) @@ -3861,7 +4202,7 @@ extern "C" pthread_handler_decl(handle_connections_sockets, clean_exit() - same stupid thing ... */ syslog(deny_severity, "refused connect from %s", - eval_client(&req)); + my_eval_client(&req)); /* C++ sucks (the gibberish in front just translates the supplied @@ -3872,7 +4213,7 @@ extern "C" pthread_handler_decl(handle_connections_sockets, if (req.sink) ((void (*)(int))req.sink)(req.fd); - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); (void) closesocket(new_sock); continue; } @@ -3887,7 +4228,7 @@ extern "C" pthread_handler_decl(handle_connections_sockets, if (getsockname(new_sock,&dummy, &dummyLen) < 0) { sql_perror("Error on new connection socket"); - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); (void) closesocket(new_sock); continue; } @@ -3899,28 +4240,28 @@ extern "C" pthread_handler_decl(handle_connections_sockets, if (!(thd= new THD)) { - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); VOID(closesocket(new_sock)); continue; } if (!(vio_tmp=vio_new(new_sock, sock == unix_sock ? VIO_TYPE_SOCKET : VIO_TYPE_TCPIP, - sock == unix_sock)) || + sock == unix_sock ? VIO_LOCALHOST: 0)) || my_net_init(&thd->net,vio_tmp)) { if (vio_tmp) vio_delete(vio_tmp); else { - (void) shutdown(new_sock,2); + (void) shutdown(new_sock, SHUT_RDWR); (void) closesocket(new_sock); } delete thd; continue; } if (sock == unix_sock) - thd->host=(char*) my_localhost; + thd->security_ctx->host=(char*) my_localhost; create_new_thread(thd); } @@ -3935,7 +4276,7 @@ extern "C" pthread_handler_decl(handle_connections_sockets, #ifdef __NT__ -extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) +pthread_handler_t handle_connections_namedpipes(void *arg) { HANDLE hConnectedPipe; BOOL fConnected; @@ -3948,25 +4289,27 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) while (!abort_loop) { /* wait for named pipe connection */ - fConnected = ConnectNamedPipe( hPipe, NULL ); + fConnected = ConnectNamedPipe(hPipe, NULL); if (abort_loop) break; if (!fConnected) fConnected = GetLastError() == ERROR_PIPE_CONNECTED; if (!fConnected) { - CloseHandle( hPipe ); - if ((hPipe = CreateNamedPipe(pipe_name, - PIPE_ACCESS_DUPLEX, - PIPE_TYPE_BYTE | - PIPE_READMODE_BYTE | - PIPE_WAIT, - PIPE_UNLIMITED_INSTANCES, - (int) global_system_variables.net_buffer_length, - (int) global_system_variables.net_buffer_length, - NMPWAIT_USE_DEFAULT_WAIT, - &saPipeSecurity )) == - INVALID_HANDLE_VALUE ) + CloseHandle(hPipe); + if ((hPipe= CreateNamedPipe(pipe_name, + PIPE_ACCESS_DUPLEX, + PIPE_TYPE_BYTE | + PIPE_READMODE_BYTE | + PIPE_WAIT, + PIPE_UNLIMITED_INSTANCES, + (int) global_system_variables. + net_buffer_length, + (int) global_system_variables. + net_buffer_length, + NMPWAIT_USE_DEFAULT_WAIT, + &saPipeSecurity)) == + INVALID_HANDLE_VALUE) { sql_perror("Can't create new named pipe!"); break; // Abort @@ -3993,8 +4336,8 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) if (!(thd = new THD)) { - DisconnectNamedPipe( hConnectedPipe ); - CloseHandle( hConnectedPipe ); + DisconnectNamedPipe(hConnectedPipe); + CloseHandle(hConnectedPipe); continue; } if (!(thd->net.vio = vio_new_win32pipe(hConnectedPipe)) || @@ -4004,8 +4347,8 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) delete thd; continue; } - /* host name is unknown */ - thd->host = my_strdup(my_localhost,MYF(0)); /* Host is unknown */ + /* Host is unknown */ + thd->security_ctx->host= my_strdup(my_localhost, MYF(0)); create_new_thread(thd); } @@ -4019,17 +4362,16 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) Thread of shared memory's service SYNOPSIS - pthread_handler_decl() - handle_connections_shared_memory Thread handle + handle_connections_shared_memory() arg Arguments of thread */ #ifdef HAVE_SMEM -pthread_handler_decl(handle_connections_shared_memory,arg) +pthread_handler_t handle_connections_shared_memory(void *arg) { /* file-mapping object, use for create shared memory */ HANDLE handle_connect_file_map= 0; - char *handle_connect_map= 0; // pointer on shared memory + char *handle_connect_map= 0; // pointer on shared memory HANDLE event_connect_answer= 0; ulong smem_buffer_length= shared_memory_buffer_length + 4; ulong connect_number= 1; @@ -4095,7 +4437,7 @@ pthread_handler_decl(handle_connections_shared_memory,arg) /* it can be after shutdown command */ - if (abort_loop) + if (abort_loop) goto error; HANDLE handle_client_file_map= 0; @@ -4196,7 +4538,7 @@ pthread_handler_decl(handle_connections_shared_memory,arg) errmsg= 0; goto errorconn; } - thd->host= my_strdup(my_localhost,MYF(0)); /* Host is unknown */ + thd->security_ctx->host= my_strdup(my_localhost, MYF(0)); /* Host is unknown */ create_new_thread(thd); connect_number++; continue; @@ -4210,7 +4552,7 @@ errorconn: NullS); sql_perror(buff); } - if (handle_client_file_map) + if (handle_client_file_map) CloseHandle(handle_client_file_map); if (handle_client_map) UnmapViewOfFile(handle_client_map); @@ -4255,8 +4597,8 @@ error: enum options_mysqld { - OPT_ISAM_LOG=256, OPT_SKIP_NEW, - OPT_SKIP_GRANT, OPT_SKIP_LOCK, + OPT_ISAM_LOG=256, OPT_SKIP_NEW, + OPT_SKIP_GRANT, OPT_SKIP_LOCK, OPT_ENABLE_LOCK, OPT_USE_LOCKING, OPT_SOCKET, OPT_UPDATE_LOG, OPT_BIN_LOG, OPT_SKIP_RESOLVE, @@ -4278,7 +4620,7 @@ enum options_mysqld OPT_MASTER_HOST, OPT_MASTER_USER, OPT_MASTER_PASSWORD, OPT_MASTER_PORT, OPT_MASTER_INFO_FILE, OPT_MASTER_CONNECT_RETRY, - OPT_MASTER_RETRY_COUNT, + OPT_MASTER_RETRY_COUNT, OPT_LOG_TC, OPT_LOG_TC_SIZE, OPT_MASTER_SSL, OPT_MASTER_SSL_KEY, OPT_MASTER_SSL_CERT, OPT_MASTER_SSL_CAPATH, OPT_MASTER_SSL_CIPHER, OPT_MASTER_SSL_CA, @@ -4292,7 +4634,7 @@ enum options_mysqld OPT_SAFEMALLOC_MEM_LIMIT, OPT_REPLICATE_DO_TABLE, OPT_REPLICATE_IGNORE_TABLE, OPT_REPLICATE_WILD_DO_TABLE, OPT_REPLICATE_WILD_IGNORE_TABLE, OPT_REPLICATE_SAME_SERVER_ID, - OPT_DISCONNECT_SLAVE_EVENT_COUNT, + OPT_DISCONNECT_SLAVE_EVENT_COUNT, OPT_TC_HEURISTIC_RECOVER, OPT_ABORT_SLAVE_EVENT_COUNT, OPT_INNODB_DATA_HOME_DIR, OPT_INNODB_DATA_FILE_PATH, @@ -4301,16 +4643,21 @@ enum options_mysqld OPT_INNODB_LOG_ARCHIVE, OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, OPT_INNODB_FLUSH_METHOD, + OPT_INNODB_DOUBLEWRITE, + OPT_INNODB_CHECKSUMS, OPT_INNODB_FAST_SHUTDOWN, OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB, OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, + OPT_LOG_BIN_TRUST_FUNCTION_CREATORS, OPT_SAFE_SHOW_DB, OPT_INNODB_SAFE_BINLOG, OPT_INNODB, OPT_ISAM, - OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, OPT_NDB_USE_EXACT_COUNT, + OPT_ENGINE_CONDITION_PUSHDOWN, OPT_NDBCLUSTER, OPT_NDB_CONNECTSTRING, + OPT_NDB_USE_EXACT_COUNT, OPT_NDB_USE_TRANSACTIONS, OPT_NDB_FORCE_SEND, OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, - OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, + OPT_NDB_SHM, OPT_NDB_OPTIMIZED_NODE_SELECTION, OPT_NDB_CACHE_CHECK_TIME, + OPT_NDB_MGMD, OPT_NDB_NODEID, OPT_SKIP_SAFEMALLOC, - OPT_TEMP_POOL, OPT_TX_ISOLATION, + OPT_TEMP_POOL, OPT_TX_ISOLATION, OPT_COMPLETION_TYPE, OPT_SKIP_STACK_TRACE, OPT_SKIP_SYMLINKS, OPT_MAX_BINLOG_DUMP_EVENTS, OPT_SPORADIC_BINLOG_DUMP_FAIL, OPT_SAFE_USER_CREATE, OPT_SQL_MODE, @@ -4342,7 +4689,7 @@ enum options_mysqld OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS, OPT_MAX_LENGTH_FOR_SORT_DATA, OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE, - OPT_MAX_ERROR_COUNT, OPT_MYISAM_DATA_POINTER_SIZE, + OPT_MAX_ERROR_COUNT, OPT_MULTI_RANGE_COUNT, OPT_MYISAM_DATA_POINTER_SIZE, OPT_MYISAM_BLOCK_SIZE, OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE, OPT_MYISAM_MAX_SORT_FILE_SIZE, OPT_MYISAM_SORT_BUFFER_SIZE, OPT_MYISAM_STATS_METHOD, @@ -4352,7 +4699,8 @@ enum options_mysqld OPT_PRELOAD_BUFFER_SIZE, OPT_QUERY_CACHE_LIMIT, OPT_QUERY_CACHE_MIN_RES_UNIT, OPT_QUERY_CACHE_SIZE, OPT_QUERY_CACHE_TYPE, OPT_QUERY_CACHE_WLOCK_INVALIDATE, OPT_RECORD_BUFFER, - OPT_RECORD_RND_BUFFER, OPT_RELAY_LOG_SPACE_LIMIT, OPT_RELAY_LOG_PURGE, + OPT_RECORD_RND_BUFFER, OPT_DIV_PRECINCREMENT, OPT_RELAY_LOG_SPACE_LIMIT, + OPT_RELAY_LOG_PURGE, OPT_SLAVE_NET_TIMEOUT, OPT_SLAVE_COMPRESSED_PROTOCOL, OPT_SLOW_LAUNCH_TIME, OPT_SLAVE_TRANS_RETRIES, OPT_READONLY, OPT_DEBUGGING, OPT_SORT_BUFFER, OPT_TABLE_CACHE, @@ -4370,12 +4718,17 @@ enum options_mysqld OPT_INNODB_FILE_IO_THREADS, OPT_INNODB_LOCK_WAIT_TIMEOUT, OPT_INNODB_THREAD_CONCURRENCY, + OPT_INNODB_COMMIT_CONCURRENCY, OPT_INNODB_FORCE_RECOVERY, OPT_INNODB_STATUS_FILE, OPT_INNODB_MAX_DIRTY_PAGES_PCT, OPT_INNODB_TABLE_LOCKS, + OPT_INNODB_SUPPORT_XA, OPT_INNODB_OPEN_FILES, OPT_INNODB_AUTOEXTEND_INCREMENT, + OPT_INNODB_SYNC_SPIN_LOOPS, + OPT_INNODB_CONCURRENCY_TICKETS, + OPT_INNODB_THREAD_SLEEP_DELAY, OPT_BDB_CACHE_SIZE, OPT_BDB_LOG_BUFFER_SIZE, OPT_BDB_MAX_LOCK, @@ -4396,6 +4749,7 @@ enum options_mysqld OPT_GROUP_CONCAT_MAX_LEN, OPT_DEFAULT_COLLATION, OPT_CHARACTER_SET_CLIENT_HANDSHAKE, + OPT_CHARACTER_SET_FILESYSTEM, OPT_INIT_CONNECT, OPT_INIT_SLAVE, OPT_SECURE_AUTH, @@ -4404,8 +4758,22 @@ enum options_mysqld OPT_DATETIME_FORMAT, OPT_LOG_QUERIES_NOT_USING_INDEXES, OPT_DEFAULT_TIME_ZONE, + OPT_SYSDATE_IS_NOW, + OPT_OPTIMIZER_SEARCH_DEPTH, + OPT_OPTIMIZER_PRUNE_LEVEL, + OPT_UPDATABLE_VIEWS_WITH_LIMIT, + OPT_SP_AUTOMATIC_PRIVILEGES, + OPT_MAX_SP_RECURSION_DEPTH, + OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET, + OPT_ENABLE_LARGE_PAGES, + OPT_TIMED_MUTEXES, + OPT_OLD_STYLE_USER_LIMITS, OPT_LOG_SLOW_ADMIN_STATEMENTS, - OPT_MERGE + OPT_TABLE_LOCK_WAIT_TIMEOUT, + OPT_PORT_OPEN_TIMEOUT, + OPT_MERGE, + OPT_INNODB_ROLLBACK_ON_TIMEOUT, + OPT_SECURE_FILE_PRIV }; @@ -4413,7 +4781,7 @@ enum options_mysqld struct my_option my_long_options[] = { - {"help", '?', "Display this help and exit.", + {"help", '?', "Display this help and exit.", (gptr*) &opt_help, (gptr*) &opt_help, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION @@ -4422,8 +4790,6 @@ struct my_option my_long_options[] = (gptr*) &abort_slave_event_count, (gptr*) &abort_slave_event_count, 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* HAVE_REPLICATION */ - {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode will also set transaction isolation level 'serializable'.", 0, 0, 0, - GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"allow-suspicious-udfs", OPT_ALLOW_SUSPICIOUS_UDFS, "Allows use of UDFs consisting of only one symbol xxx() " "without corresponding xxx_init() or xxx_deinit(). That also means " @@ -4431,6 +4797,22 @@ struct my_option my_long_options[] = "from libc.so", (gptr*) &opt_allow_suspicious_udfs, (gptr*) &opt_allow_suspicious_udfs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax. This mode will also set transaction isolation level 'serializable'.", 0, 0, 0, + GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-increment-increment", OPT_AUTO_INCREMENT, + "Auto-increment columns are incremented by this", + (gptr*) &global_system_variables.auto_increment_increment, + (gptr*) &max_system_variables.auto_increment_increment, 0, GET_ULONG, + OPT_ARG, 1, 1, 65535, 0, 1, 0 }, + {"auto-increment-offset", OPT_AUTO_INCREMENT_OFFSET, + "Offset added to Auto-increment columns. Used when auto-increment-increment != 1", + (gptr*) &global_system_variables.auto_increment_offset, + (gptr*) &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG, + 1, 1, 65535, 0, 1, 0 }, + {"automatic-sp-privileges", OPT_SP_AUTOMATIC_PRIVILEGES, + "Creating and dropping stored procedures alters ACLs. Disable with --skip-automatic-sp-privileges.", + (gptr*) &sp_automatic_privileges, (gptr*) &sp_automatic_privileges, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"basedir", 'b', "Path to installation directory. All paths are usually resolved relative to this.", (gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, @@ -4452,8 +4834,7 @@ Disable with --skip-bdb (will save memory).", "Don't try to recover Berkeley DB tables on start.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"bdb-no-sync", OPT_BDB_NOSYNC, - "Disable synchronously flushing logs. This option is deprecated, use --skip-sync-bdb-logs or sync-bdb-logs=0 instead", - // (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, + "This option is deprecated, use --skip-sync-bdb-logs instead", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"bdb-shared-data", OPT_BDB_SHARED, "Start Berkeley DB in multi-process mode.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, @@ -4474,13 +4855,20 @@ Disable with --skip-bdb (will save memory).", {"binlog-ignore-db", OPT_BINLOG_IGNORE_DB, "Tells the master that updates to the given database should not be logged tothe binary log.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifndef DISABLE_GRANT_OPTIONS {"bootstrap", OPT_BOOTSTRAP, "Used by mysql installation scripts.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif {"character-set-client-handshake", OPT_CHARACTER_SET_CLIENT_HANDSHAKE, "Don't ignore client side character set value sent during handshake.", (gptr*) &opt_character_set_client_handshake, (gptr*) &opt_character_set_client_handshake, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"character-set-filesystem", OPT_CHARACTER_SET_FILESYSTEM, + "Set the filesystem character set.", + (gptr*) &character_set_filesystem_name, + (gptr*) &character_set_filesystem_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"character-set-server", 'C', "Set the default character set.", (gptr*) &default_character_set_name, (gptr*) &default_character_set_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, @@ -4493,10 +4881,14 @@ Disable with --skip-bdb (will save memory).", {"collation-server", OPT_DEFAULT_COLLATION, "Set the default collation.", (gptr*) &default_collation_name, (gptr*) &default_collation_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + {"completion-type", OPT_COMPLETION_TYPE, "Default completion type.", + (gptr*) &global_system_variables.completion_type, + (gptr*) &max_system_variables.completion_type, 0, GET_ULONG, + REQUIRED_ARG, 0, 0, 2, 0, 1, 0}, {"concurrent-insert", OPT_CONCURRENT_INSERT, - "Use concurrent insert with MyISAM. Disable with --skip-concurrent-insert.", + "Use concurrent insert with MyISAM. Disable with --concurrent-insert=0", (gptr*) &myisam_concurrent_insert, (gptr*) &myisam_concurrent_insert, - 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + 0, GET_LONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, {"console", OPT_CONSOLE, "Write error output on screen; Don't remove the console window on windows.", (gptr*) &opt_console, (gptr*) &opt_console, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -4553,9 +4945,16 @@ Disable with --skip-bdb (will save memory).", {"enable-pstack", OPT_DO_PSTACK, "Print a symbolic stack trace on failure.", (gptr*) &opt_do_pstack, (gptr*) &opt_do_pstack, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"engine-condition-pushdown", + OPT_ENGINE_CONDITION_PUSHDOWN, + "Push supported query conditions to the storage engine.", + (gptr*) &global_system_variables.engine_condition_pushdown, + (gptr*) &global_system_variables.engine_condition_pushdown, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"exit-info", 'T', "Used for debugging; Use at your own risk!", 0, 0, 0, GET_LONG, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"external-locking", OPT_USE_LOCKING, "Use system (external) locking. With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running.", + {"external-locking", OPT_USE_LOCKING, "Use system (external) locking (disabled by default). With this option enabled you can run myisamchk to test (not repair) tables while the MySQL server is running. \ +Disable with --skip-external-locking.", (gptr*) &opt_external_locking, (gptr*) &opt_external_locking, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"flush", OPT_FLUSH, "Flush tables to disk between SQL commands.", 0, 0, 0, @@ -4566,12 +4965,20 @@ Disable with --skip-bdb (will save memory).", "Set up signals usable for debugging", (gptr*) &opt_debugging, (gptr*) &opt_debugging, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_LARGE_PAGES + {"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. \ +Disable with --skip-large-pages.", + (gptr*) &opt_large_pages, (gptr*) &opt_large_pages, 0, GET_BOOL, NO_ARG, 0, 0, 0, + 0, 0, 0}, +#endif {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection", (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifndef DISABLE_GRANT_OPTIONS {"init-file", OPT_INIT_FILE, "Read SQL commands from this file at startup.", (gptr*) &opt_init_file, (gptr*) &opt_init_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#endif {"init-rpl-role", OPT_INIT_RPL_ROLE, "Set the replication role.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"init-slave", OPT_INIT_SLAVE, "Command(s) that are executed when a slave connects to this master", @@ -4581,6 +4988,11 @@ Disable with --skip-bdb (will save memory).", Disable with --skip-innodb (will save memory).", (gptr*) &opt_innodb, (gptr*) &opt_innodb, 0, GET_BOOL, NO_ARG, OPT_INNODB_DEFAULT, 0, 0, 0, 0, 0}, +#ifdef HAVE_INNOBASE_DB + {"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \ +Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums, + (gptr*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, +#endif {"innodb_data_file_path", OPT_INNODB_DATA_FILE_PATH, "Path to individual files and their sizes.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -4589,18 +5001,32 @@ Disable with --skip-innodb (will save memory).", "The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir, (gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_doublewrite", OPT_INNODB_DOUBLEWRITE, "Enable InnoDB doublewrite buffer (enabled by default). \ +Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, + (gptr*) &innobase_use_doublewrite, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN, - "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown, - (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + "Speeds up the shutdown process of the InnoDB storage engine. Possible " + "values are 0, 1 (faster)" + /* + NetWare can't close unclosed files, can't automatically kill remaining + threads, etc, so on this OS we disable the crash-like InnoDB shutdown. + */ +#ifndef __NETWARE__ + " or 2 (fastest - crash-like)" +#endif + ".", + (gptr*) &innobase_fast_shutdown, + (gptr*) &innobase_fast_shutdown, 0, GET_ULONG, OPT_ARG, 1, 0, + IF_NETWARE(1,2), 0, 0, 0}, {"innodb_file_per_table", OPT_INNODB_FILE_PER_TABLE, "Stores each InnoDB table to an .ibd file in the database dir.", (gptr*) &innobase_file_per_table, (gptr*) &innobase_file_per_table, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"innodb_flush_log_at_trx_commit", OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, "Set to 0 (write and flush once per second), 1 (write and flush at each commit) or 2 (write at commit, flush once per second).", - (gptr*) &innobase_flush_log_at_trx_commit, - (gptr*) &innobase_flush_log_at_trx_commit, - 0, GET_UINT, OPT_ARG, 1, 0, 2, 0, 0, 0}, + (gptr*) &srv_flush_log_at_trx_commit, + (gptr*) &srv_flush_log_at_trx_commit, + 0, GET_ULONG, OPT_ARG, 1, 0, 2, 0, 0, 0}, {"innodb_flush_method", OPT_INNODB_FLUSH_METHOD, "With which method to flush data.", (gptr*) &innobase_unix_file_flush_method, (gptr*) &innobase_unix_file_flush_method, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, @@ -4627,21 +5053,29 @@ Disable with --skip-innodb (will save memory).", (gptr*) &srv_max_purge_lag, (gptr*) &srv_max_purge_lag, 0, GET_LONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1L, 0}, + {"innodb_rollback_on_timeout", OPT_INNODB_ROLLBACK_ON_TIMEOUT, + "Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)", + (gptr*) &innobase_rollback_on_timeout, (gptr*) &innobase_rollback_on_timeout, + 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"innodb_status_file", OPT_INNODB_STATUS_FILE, "Enable SHOW INNODB STATUS output in the innodb_status.<pid> file", (gptr*) &innobase_create_status_file, (gptr*) &innobase_create_status_file, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_support_xa", OPT_INNODB_SUPPORT_XA, + "Enable InnoDB support for the XA two-phase commit", + (gptr*) &global_system_variables.innodb_support_xa, + (gptr*) &global_system_variables.innodb_support_xa, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"innodb_table_locks", OPT_INNODB_TABLE_LOCKS, "Enable InnoDB locking in LOCK TABLES", (gptr*) &global_system_variables.innodb_table_locks, (gptr*) &global_system_variables.innodb_table_locks, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, #endif /* End HAVE_INNOBASE_DB */ - {"isam", OPT_ISAM, "Enable ISAM (if this version of MySQL supports it). \ -Disable with --skip-isam.", - (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, OPT_ISAM_DEFAULT, 0, 0, + {"isam", OPT_ISAM, "Obsolete. ISAM storage engine is no longer supported.", + (gptr*) &opt_isam, (gptr*) &opt_isam, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"language", 'L', + {"language", 'L', "Client error messages in given language. May be given as a full path.", (gptr*) &language_ptr, (gptr*) &language_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -4653,21 +5087,45 @@ Disable with --skip-isam.", {"log", 'l', "Log connections and queries to file.", (gptr*) &opt_logname, (gptr*) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-bin", OPT_BIN_LOG, - "Log update queries in binary format.", + "Log update queries in binary format. Optional (but strongly recommended " + "to avoid replication problems if server's hostname changes) argument " + "should be the chosen location for the binary log files.", (gptr*) &opt_bin_logname, (gptr*) &opt_bin_logname, 0, GET_STR_ALLOC, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-bin-index", OPT_BIN_LOG_INDEX, "File that holds the names for last binary log files.", (gptr*) &opt_binlog_index_name, (gptr*) &opt_binlog_index_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"log-error", OPT_ERROR_LOG_FILE, "Log error file.", + /* + This option starts with "log-bin" to emphasize that it is specific of + binary logging. + */ + {"log-bin-trust-function-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS, + "If equal to 0 (the default), then when --log-bin is used, creation of " + "a stored function is allowed only to users having the SUPER privilege and" + " only if this function may not break binary logging.", + (gptr*) &trust_function_creators, (gptr*) &trust_function_creators, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifndef TO_BE_REMOVED_IN_5_1_OR_6_0 + /* + In 5.0.6 we introduced the below option, then in 5.0.16 we renamed it to + log-bin-trust-function-creators but kept also the old name for + compatibility; the behaviour was also changed to apply only to functions + (and triggers). In a future release this old name could be removed. + */ + {"log-bin-trust-routine-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS, + "(deprecated) Use log-bin-trust-function-creators.", + (gptr*) &trust_function_creators, (gptr*) &trust_function_creators, 0, + GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif + {"log-error", OPT_ERROR_LOG_FILE, "Error log file.", (gptr*) &log_error_file_ptr, (gptr*) &log_error_file_ptr, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-isam", OPT_ISAM_LOG, "Log all MyISAM changes to file.", (gptr*) &myisam_log_filename, (gptr*) &myisam_log_filename, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-long-format", '0', - "Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.", + "Log some extra information to update log. Please note that this option is deprecated; see --log-short-format option.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"log-queries-not-using-indexes", OPT_LOG_QUERIES_NOT_USING_INDEXES, "Log queries that are executed without benefit of any index to the slow log if it is open.", @@ -4690,13 +5148,24 @@ Disable with --skip-isam.", "Log slow queries to this log file. Defaults logging to hostname-slow.log file. Must be enabled to activate other slow log options.", (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, + {"log-tc", OPT_LOG_TC, + "Path to transaction coordinator log (used for transactions that affect " + "more than one storage engine, when binary log is disabled)", + (gptr*) &opt_tc_log_file, (gptr*) &opt_tc_log_file, 0, GET_STR, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_MMAP + {"log-tc-size", OPT_LOG_TC_SIZE, "Size of transaction coordinator log.", + (gptr*) &opt_tc_log_size, (gptr*) &opt_tc_log_size, 0, GET_ULONG, + REQUIRED_ARG, TC_LOG_MIN_SIZE, TC_LOG_MIN_SIZE, ~0L, 0, TC_LOG_PAGE_SIZE, 0}, +#endif {"log-update", OPT_UPDATE_LOG, - "Log updates to file.# where # is a unique number if not given.", + "The update log is deprecated since version 5.0, is replaced by the binary \ +log and this option justs turns on --log-bin instead.", (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"log-warnings", 'W', "Log some non-critical warnings to the error log file. Use this option twice or --log-warnings=2 if you also want 'Aborted connections' warnings.", + {"log-warnings", 'W', "Log some not critical warnings to the log file.", (gptr*) &global_system_variables.log_warnings, - (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, + (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"low-priority-updates", OPT_LOW_PRIORITY_UPDATES, "INSERT/DELETE/UPDATE has lower priority than selects.", @@ -4767,7 +5236,7 @@ master-ssl", (gptr*) &locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"merge", OPT_MERGE, "Enable Merge storage engine. Disable with \ --skip-merge.", - (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0}, + (gptr*) &opt_merge, (gptr*) &opt_merge, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"myisam-recover", OPT_MYISAM_RECOVER, "Syntax: myisam-recover[=option[,option...]], where option can be DEFAULT, BACKUP, FORCE or QUICK.", (gptr*) &myisam_recover_options_str, (gptr*) &myisam_recover_options_str, 0, @@ -4779,9 +5248,19 @@ Disable with --skip-ndbcluster (will save memory).", #ifdef HAVE_NDBCLUSTER_DB {"ndb-connectstring", OPT_NDB_CONNECTSTRING, "Connect string for ndbcluster.", - (gptr*) &opt_ndbcluster_connectstring, - (gptr*) &opt_ndbcluster_connectstring, + (gptr*) &opt_ndb_connectstring, + (gptr*) &opt_ndb_connectstring, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ndb-mgmd-host", OPT_NDB_MGMD, + "Set host and port for ndb_mgmd. Syntax: hostname[:port]", + (gptr*) &opt_ndb_mgmd, + (gptr*) &opt_ndb_mgmd, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"ndb-nodeid", OPT_NDB_NODEID, + "Nodeid for this mysqlserver in the cluster.", + (gptr*) &opt_ndb_nodeid, + (gptr*) &opt_ndb_nodeid, + 0, GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ndb-autoincrement-prefetch-sz", OPT_NDB_AUTOINCREMENT_PREFETCH_SZ, "Specify number of autoincrement values that are prefetched.", (gptr*) &global_system_variables.ndb_autoincrement_prefetch_sz, @@ -4809,6 +5288,17 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &global_system_variables.ndb_use_exact_count, (gptr*) &global_system_variables.ndb_use_exact_count, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb-use-transactions", OPT_NDB_USE_TRANSACTIONS, + "Use transactions for large inserts, if enabled then large " + "inserts will be split into several smaller transactions", + (gptr*) &global_system_variables.ndb_use_transactions, + (gptr*) &global_system_variables.ndb_use_transactions, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + {"ndb_use_transactions", OPT_NDB_USE_TRANSACTIONS, + "same as --ndb-use-transactions.", + (gptr*) &global_system_variables.ndb_use_transactions, + (gptr*) &global_system_variables.ndb_use_transactions, + 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"ndb-shm", OPT_NDB_SHM, "Use shared memory connections when available.", (gptr*) &opt_ndb_shm, @@ -4819,6 +5309,10 @@ Disable with --skip-ndbcluster (will save memory).", (gptr*) &opt_ndb_optimized_node_selection, (gptr*) &opt_ndb_optimized_node_selection, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, + { "ndb-cache-check-time", OPT_NDB_CACHE_CHECK_TIME, + "A dedicated thread is created to, at the given millisecons interval, invalidate the query cache if another MySQL server in the cluster has changed the data in the database.", + (gptr*) &opt_ndb_cache_check_time, (gptr*) &opt_ndb_cache_check_time, 0, GET_ULONG, REQUIRED_ARG, + 0, 0, LONG_TIMEOUT, 0, 1, 0}, #endif {"new", 'n', "Use very new possible 'unsafe' functions.", (gptr*) &global_system_variables.new_mode, @@ -4838,11 +5332,19 @@ Disable with --skip-ndbcluster (will save memory).", "Only use one thread (for debugging under Linux).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"old-style-user-limits", OPT_OLD_STYLE_USER_LIMITS, + "Enable old-style user limits (before 5.0.3 user resources were counted per each user+host vs. per account)", + (gptr*) &opt_old_style_user_limits, (gptr*) &opt_old_style_user_limits, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.", (gptr*) &pidfile_name_ptr, (gptr*) &pidfile_name_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection.", (gptr*) &mysqld_port, (gptr*) &mysqld_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"port-open-timeout", OPT_PORT_OPEN_TIMEOUT, + "Maximum time in seconds to wait for the port to become free. " + "(Default: no wait)", (gptr*) &mysqld_port_timeout, + (gptr*) &mysqld_port_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"relay-log", OPT_RELAY_LOG, "The location and name to use for relay logs.", (gptr*) &opt_relay_logname, (gptr*) &opt_relay_logname, 0, @@ -4872,12 +5374,6 @@ thread is in the relay logs.", {"replicate-rewrite-db", OPT_REPLICATE_REWRITE_DB, "Updates to a database with a different name than the original. Example: replicate-rewrite-db=master_db_name->slave_db_name.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE, - "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE, - "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.", - 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"replicate-same-server-id", OPT_REPLICATE_SAME_SERVER_ID, "In replication, if set to 1, do not skip events having our server id. \ @@ -4887,6 +5383,12 @@ Can't be set to 1 if --log-slave-updates is used.", (gptr*) &replicate_same_server_id, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"replicate-wild-do-table", OPT_REPLICATE_WILD_DO_TABLE, + "Tells the slave thread to restrict replication to the tables that match the specified wildcard pattern. To specify more than one table, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-do-table=foo%.bar% will replicate only updates to tables in all databases that start with foo and whose table names start with bar.", + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"replicate-wild-ignore-table", OPT_REPLICATE_WILD_IGNORE_TABLE, + "Tells the slave thread to not replicate to the tables that match the given wildcard pattern. To specify more than one table to ignore, use the directive multiple times, once for each table. This will work for cross-database updates. Example: replicate-wild-ignore-table=foo%.bar% will not do updates to tables in databases that start with foo and whose table names start with bar.", + 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, // In replication, we may need to tell the other servers how to connect {"report-host", OPT_REPORT_HOST, "Hostname or IP of the slave to be reported to to the master during slave registration. Will appear in the output of SHOW SLAVE HOSTS. Leave unset if you do not want the slave to register itself with the master. Note that it is not sufficient for the master to simply read the IP of the slave off the socket once the slave connects. Due to NAT and other routing issues, that IP may not be valid for connecting to the slave from the master or other hosts.", @@ -4921,6 +5423,10 @@ Can't be set to 1 if --log-slave-updates is used.", {"secure-auth", OPT_SECURE_AUTH, "Disallow authentication for accounts that have old (pre-4.1) passwords.", (gptr*) &opt_secure_auth, (gptr*) &opt_secure_auth, 0, GET_BOOL, NO_ARG, my_bool(0), 0, 0, 0, 0, 0}, + {"secure-file-priv", OPT_SECURE_FILE_PRIV, + "Limit LOAD DATA, SELECT ... OUTFILE, and LOAD_FILE() to files within specified directory", + (gptr*) &opt_secure_file_priv, (gptr*) &opt_secure_file_priv, 0, + GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"server-id", OPT_SERVER_ID, "Uniquely identifies the server instance in the community of replication partners.", (gptr*) &server_id, (gptr*) &server_id, 0, GET_ULONG, REQUIRED_ARG, 0, 0, 0, @@ -4942,10 +5448,12 @@ Can't be set to 1 if --log-slave-updates is used.", "Show user and password in SHOW SLAVE HOSTS on this master", (gptr*) &opt_show_slave_auth_info, (gptr*) &opt_show_slave_auth_info, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifndef DISABLE_GRANT_OPTIONS {"skip-grant-tables", OPT_SKIP_GRANT, "Start without grant tables. This gives all users FULL ACCESS to all tables!", (gptr*) &opt_noacl, (gptr*) &opt_noacl, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#endif {"skip-host-cache", OPT_SKIP_HOST_CACHE, "Don't cache host names.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-locking", OPT_SKIP_LOCK, @@ -4978,8 +5486,8 @@ Can't be set to 1 if --log-slave-updates is used.", {"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip-thread-priority", OPT_SKIP_PRIOR, - "Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, - 0, 0, 0, 0, 0}, + "Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG, + DEFAULT_SKIP_THREAD_PRIORITY, 0, 0, 0, 0, 0}, #ifdef HAVE_REPLICATION {"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR, "The location where the slave should put its temporary files when \ @@ -5001,9 +5509,9 @@ replicating a LOAD DATA INFILE command.", 0}, #endif /* HAVE_REPLICATION */ {"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME, - "If set, setting SQL_LOG_BIN to a value will automatically set SQL_LOG_UPDATE to the same value and vice versa.", - (gptr*) &opt_sql_bin_update, (gptr*) &opt_sql_bin_update, 0, GET_BOOL, - NO_ARG, 0, 0, 0, 0, 0, 0}, + "The update log is deprecated since version 5.0, is replaced by the binary \ +log and this option does nothing anymore.", + 0, 0, 0, GET_DISABLED, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sql-mode", OPT_SQL_MODE, "Syntax: sql-mode=option[,option[,option...]] where option can be one of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.", (gptr*) &sql_mode_str, (gptr*) &sql_mode_str, 0, GET_STR, REQUIRED_ARG, 0, @@ -5019,13 +5527,25 @@ replicating a LOAD DATA INFILE command.", {"symbolic-links", 's', "Enable symbolic link support.", (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, IF_PURIFY(0,1), 0, 0, 0, 0, 0}, + {"sysdate-is-now", OPT_SYSDATE_IS_NOW, + "Non-default option to alias SYSDATE() to NOW() to make it safe-replicable. Since 5.0, SYSDATE() returns a `dynamic' value different for different invocations, even within the same statement.", + (gptr*) &global_system_variables.sysdate_is_now, + 0, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, + {"tc-heuristic-recover", OPT_TC_HEURISTIC_RECOVER, + "Decision to use in heuristic recover process. Possible values are COMMIT or ROLLBACK.", + (gptr*) &opt_tc_heuristic_recover, (gptr*) &opt_tc_heuristic_recover, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"temp-pool", OPT_TEMP_POOL, "Using this option will cause most temporary files created to use a small set of names, rather than a unique name for each new file.", (gptr*) &use_temp_pool, (gptr*) &use_temp_pool, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"timed_mutexes", OPT_TIMED_MUTEXES, + "Specify whether to time mutexes (only InnoDB mutexes are currently supported)", + (gptr*) &timed_mutexes, (gptr*) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0, + 0, 0, 0, 0, 0}, {"tmpdir", 't', "Path for temporary files. Several paths may be specified, separated by a " -#if defined( __WIN__) || defined(OS2) || defined(__NETWARE__) +#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__) "semicolon (;)" #else "colon (:)" @@ -5038,6 +5558,11 @@ replicating a LOAD DATA INFILE command.", 0, 0, 0, 0, 0}, {"use-symbolic-links", 's', "Enable symbolic link support. Deprecated option; use --symbolic-links instead.", (gptr*) &my_use_symdir, (gptr*) &my_use_symdir, 0, GET_BOOL, NO_ARG, + /* + The system call realpath() produces warnings under valgrind and + purify. These are not suppressed: instead we disable symlinks + option if compiled with valgrind support. + */ IF_PURIFY(0,1), 0, 0, 0, 0, 0}, {"user", 'u', "Run mysqld daemon as user.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -5085,12 +5610,6 @@ replicating a LOAD DATA INFILE command.", "The number of seconds the mysqld server is waiting for a connect packet before responding with 'Bad handshake'.", (gptr*) &connect_timeout, (gptr*) &connect_timeout, 0, GET_ULONG, REQUIRED_ARG, CONNECT_TIMEOUT, 2, LONG_TIMEOUT, 0, 1, 0 }, -#ifdef HAVE_REPLICATION - {"crash_binlog_innodb", OPT_CRASH_BINLOG_INNODB, - "Used only for testing, to crash when writing Nth event to binlog.", - (gptr*) &opt_crash_binlog_innodb, (gptr*) &opt_crash_binlog_innodb, - 0, GET_UINT, REQUIRED_ARG, 0, 0, ~(uint)0, 0, 1, 0}, -#endif { "date_format", OPT_DATE_FORMAT, "The DATE format (For future).", (gptr*) &opt_date_time_formats[MYSQL_TIMESTAMP_DATE], @@ -5118,6 +5637,11 @@ replicating a LOAD DATA INFILE command.", "What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.", (gptr*) &delayed_queue_size, (gptr*) &delayed_queue_size, 0, GET_ULONG, REQUIRED_ARG, DELAYED_QUEUE_SIZE, 1, ~0L, 0, 1, 0}, + {"div_precision_increment", OPT_DIV_PRECINCREMENT, + "Precision of the result of '/' operator will be increased on that value.", + (gptr*) &global_system_variables.div_precincrement, + (gptr*) &max_system_variables.div_precincrement, 0, GET_ULONG, + REQUIRED_ARG, 4, 0, DECIMAL_MAX_SCALE, 0, 0, 0}, {"expire_logs_days", OPT_EXPIRE_LOGS_DAYS, "If non-zero, binary logs will be purged after expire_logs_days " "days; possible purges happen at startup and at binary log rotation.", @@ -5171,7 +5695,18 @@ replicating a LOAD DATA INFILE command.", {"innodb_buffer_pool_size", OPT_INNODB_BUFFER_POOL_SIZE, "The size of the memory buffer InnoDB uses to cache data and indexes of its tables.", (gptr*) &innobase_buffer_pool_size, (gptr*) &innobase_buffer_pool_size, 0, - GET_LONG, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, ~0L, 0, 1024*1024L, 0}, + GET_LL, REQUIRED_ARG, 8*1024*1024L, 1024*1024L, LONGLONG_MAX, 0, + 1024*1024L, 0}, + {"innodb_commit_concurrency", OPT_INNODB_COMMIT_CONCURRENCY, + "Helps in performance tuning in heavily concurrent environments.", + (gptr*) &srv_commit_concurrency, (gptr*) &srv_commit_concurrency, + 0, GET_LONG, REQUIRED_ARG, 0, 0, 1000, 0, 1, 0}, + {"innodb_concurrency_tickets", OPT_INNODB_CONCURRENCY_TICKETS, + "Number of times a thread is allowed to enter InnoDB within the same \ + SQL query after it has once got the ticket", + (gptr*) &srv_n_free_tickets_to_enter, + (gptr*) &srv_n_free_tickets_to_enter, + 0, GET_LONG, REQUIRED_ARG, 500L, 1L, ~0L, 0, 1L, 0}, {"innodb_file_io_threads", OPT_INNODB_FILE_IO_THREADS, "Number of file I/O threads in InnoDB.", (gptr*) &innobase_file_io_threads, (gptr*) &innobase_file_io_threads, 0, GET_LONG, REQUIRED_ARG, 4, 4, 64, 0, @@ -5189,9 +5724,10 @@ replicating a LOAD DATA INFILE command.", (gptr*) &innobase_log_buffer_size, (gptr*) &innobase_log_buffer_size, 0, GET_LONG, REQUIRED_ARG, 1024*1024L, 256*1024L, ~0L, 0, 1024, 0}, {"innodb_log_file_size", OPT_INNODB_LOG_FILE_SIZE, - "Size of each log file in a log group in megabytes.", + "Size of each log file in a log group.", (gptr*) &innobase_log_file_size, (gptr*) &innobase_log_file_size, 0, - GET_LONG, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, ~0L, 0, 1024*1024L, 0}, + GET_LL, REQUIRED_ARG, 5*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 0, + 1024*1024L, 0}, {"innodb_log_files_in_group", OPT_INNODB_LOG_FILES_IN_GROUP, "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.", (gptr*) &innobase_log_files_in_group, (gptr*) &innobase_log_files_in_group, @@ -5205,30 +5741,23 @@ replicating a LOAD DATA INFILE command.", "How many files at the maximum InnoDB keeps open at the same time.", (gptr*) &innobase_open_files, (gptr*) &innobase_open_files, 0, GET_LONG, REQUIRED_ARG, 300L, 10L, ~0L, 0, 1L, 0}, -#ifdef HAVE_REPLICATION - /* - Disabled for the 4.1.3 release. Disabling just this paragraph of code is - enough, as then user can't set it to 1 so it will always be ignored in the - rest of code. - */ -#if MYSQL_VERSION_ID >= 40103 - /* - innodb_safe_binlog is not a variable, just an option. Does not make - sense to make it a variable, as it is only used at startup (and so the - value would be lost at next startup, so setting it on the fly would have no - effect). - */ - {"innodb_safe_binlog", OPT_INNODB_SAFE_BINLOG, - "After a crash recovery by InnoDB, truncate the binary log after the last " - "not-rolled-back statement/transaction.", - (gptr*) &opt_innodb_safe_binlog, (gptr*) &opt_innodb_safe_binlog, - 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, -#endif -#endif + {"innodb_sync_spin_loops", OPT_INNODB_SYNC_SPIN_LOOPS, + "Count of spin-loop rounds in InnoDB mutexes", + (gptr*) &srv_n_spin_wait_rounds, + (gptr*) &srv_n_spin_wait_rounds, + 0, GET_LONG, REQUIRED_ARG, 20L, 0L, ~0L, 0, 1L, 0}, {"innodb_thread_concurrency", OPT_INNODB_THREAD_CONCURRENCY, - "Helps in performance tuning in heavily concurrent environments.", - (gptr*) &innobase_thread_concurrency, (gptr*) &innobase_thread_concurrency, - 0, GET_LONG, REQUIRED_ARG, 8, 1, 1000, 0, 1, 0}, + "Helps in performance tuning in heavily concurrent environments. " + "Sets the maximum number of threads allowed inside InnoDB. Value 0" + " will disable the thread throttling.", + (gptr*) &srv_thread_concurrency, (gptr*) &srv_thread_concurrency, + 0, GET_LONG, REQUIRED_ARG, 8, 0, 1000, 0, 1, 0}, + {"innodb_thread_sleep_delay", OPT_INNODB_THREAD_SLEEP_DELAY, + "Time of innodb thread sleeping before joining InnoDB queue (usec). Value 0" + " disable a sleep", + (gptr*) &srv_thread_sleep_delay, + (gptr*) &srv_thread_sleep_delay, + 0, GET_LONG, REQUIRED_ARG, 10000L, 0L, ~0L, 0, 1L, 0}, #endif /* HAVE_INNOBASE_DB */ {"interactive_timeout", OPT_INTERACTIVE_TIMEOUT, "The number of seconds the server waits for activity on an interactive connection before closing it.", @@ -5246,13 +5775,13 @@ replicating a LOAD DATA INFILE command.", (gptr*) &dflt_key_cache_var.param_buff_size, (gptr*) 0, 0, (GET_ULL | GET_ASK_ADDR), - REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, UINT_MAX32, MALLOC_OVERHEAD, + REQUIRED_ARG, KEY_CACHE_SIZE, MALLOC_OVERHEAD, ~(ulong) 0, MALLOC_OVERHEAD, IO_SIZE, 0}, {"key_cache_age_threshold", OPT_KEY_CACHE_AGE_THRESHOLD, "This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache", (gptr*) &dflt_key_cache_var.param_age_threshold, (gptr*) 0, - 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, + 0, (GET_ULONG | GET_ASK_ADDR), REQUIRED_ARG, 300, 100, ~0L, 0, 100, 0}, {"key_cache_block_size", OPT_KEY_CACHE_BLOCK_SIZE, "The default size of key cache blocks", @@ -5317,8 +5846,9 @@ The minimum value for this variable is 4096.", {"max_heap_table_size", OPT_MAX_HEP_TABLE_SIZE, "Don't allow creation of heap tables bigger than this.", (gptr*) &global_system_variables.max_heap_table_size, - (gptr*) &max_system_variables.max_heap_table_size, 0, GET_ULONG, - REQUIRED_ARG, 16*1024*1024L, 16384, ~0L, MALLOC_OVERHEAD, 1024, 0}, + (gptr*) &max_system_variables.max_heap_table_size, 0, GET_ULL, + REQUIRED_ARG, 16*1024*1024L, 16384, MAX_MEM_TABLE_SIZE, + MALLOC_OVERHEAD, 1024, 0}, {"max_join_size", OPT_MAX_JOIN_SIZE, "Joins that are probably going to read more than max_join_size records return an error.", (gptr*) &global_system_variables.max_join_size, @@ -5347,6 +5877,11 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.max_sort_length, (gptr*) &max_system_variables.max_sort_length, 0, GET_ULONG, REQUIRED_ARG, 1024, 4, 8192*1024L, 0, 1, 0}, + {"max_sp_recursion_depth", OPT_MAX_SP_RECURSION_DEPTH, + "Maximum stored procedure recursion depth. (discussed with docs).", + (gptr*) &global_system_variables.max_sp_recursion_depth, + (gptr*) &max_system_variables.max_sp_recursion_depth, 0, GET_ULONG, + OPT_ARG, 0, 0, 255, 0, 1, 0 }, {"max_tmp_tables", OPT_MAX_TMP_TABLES, "Maximum number of temporary tables a client can keep open at a time.", (gptr*) &global_system_variables.max_tmp_tables, @@ -5354,12 +5889,17 @@ The minimum value for this variable is 4096.", REQUIRED_ARG, 32, 1, ~0L, 0, 1, 0}, {"max_user_connections", OPT_MAX_USER_CONNECTIONS, "The maximum number of active connections for a single user (0 = no limit).", - (gptr*) &max_user_connections, (gptr*) &max_user_connections, 0, GET_ULONG, - REQUIRED_ARG, 0, 1, ~0L, 0, 1, 0}, + (gptr*) &max_user_connections, (gptr*) &max_user_connections, 0, GET_UINT, + REQUIRED_ARG, 0, 1, ~0, 0, 1, 0}, {"max_write_lock_count", OPT_MAX_WRITE_LOCK_COUNT, "After this many write locks, allow some read locks to run in between.", (gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG, REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, + {"multi_range_count", OPT_MULTI_RANGE_COUNT, + "Number of key ranges to request at once.", + (gptr*) &global_system_variables.multi_range_count, + (gptr*) &max_system_variables.multi_range_count, 0, + GET_ULONG, REQUIRED_ARG, 256, 1, ~0L, 0, 1, 0}, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "Block size to be used for MyISAM index pages.", (gptr*) &opt_myisam_block_size, @@ -5370,9 +5910,9 @@ The minimum value for this variable is 4096.", "Default pointer size to be used for MyISAM tables.", (gptr*) &myisam_data_pointer_size, (gptr*) &myisam_data_pointer_size, 0, GET_ULONG, REQUIRED_ARG, - 4, 2, 7, 0, 1, 0}, + 6, 2, 7, 0, 1, 0}, {"myisam_max_extra_sort_file_size", OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE, - "Used to help MySQL to decide when to use the slow but safe key cache index create method.", + "Deprecated option", (gptr*) &global_system_variables.myisam_max_extra_sort_file_size, (gptr*) &max_system_variables.myisam_max_extra_sort_file_size, 0, GET_ULL, REQUIRED_ARG, (ulonglong) MI_MAX_TEMP_LENGTH, @@ -5423,6 +5963,16 @@ The minimum value for this variable is 4096.", "If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of files.", (gptr*) &open_files_limit, (gptr*) &open_files_limit, 0, GET_ULONG, REQUIRED_ARG, 0, 0, OS_FILE_LIMIT, 0, 1, 0}, + {"optimizer_prune_level", OPT_OPTIMIZER_PRUNE_LEVEL, + "Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows.", + (gptr*) &global_system_variables.optimizer_prune_level, + (gptr*) &max_system_variables.optimizer_prune_level, + 0, GET_ULONG, OPT_ARG, 1, 0, 1, 0, 1, 0}, + {"optimizer_search_depth", OPT_OPTIMIZER_SEARCH_DEPTH, + "Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Smaller values than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value; if set to MAX_TABLES+2, the optimizer will switch to the original find_best (used for testing/comparison).", + (gptr*) &global_system_variables.optimizer_search_depth, + (gptr*) &max_system_variables.optimizer_search_depth, + 0, GET_ULONG, OPT_ARG, MAX_TABLES+1, 0, MAX_TABLES+2, 0, 1, 0}, {"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE, "The size of the buffer that is allocated when preloading indexes", (gptr*) &global_system_variables.preload_buff_size, @@ -5475,9 +6025,10 @@ The minimum value for this variable is 4096.", "Each thread that does a sequential scan allocates a buffer of this size for each table it scans. If you do many sequential scans, you may want to increase this value.", (gptr*) &global_system_variables.read_buff_size, (gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, - 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0}, + 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, SSIZE_MAX, MALLOC_OVERHEAD, IO_SIZE, + 0}, {"read_only", OPT_READONLY, - "Make all tables readonly, with the exception for replication (slave) threads and users with the SUPER privilege", + "Make all non-temporary tables read-only, with the exception for replication (slave) threads and users with the SUPER privilege", (gptr*) &opt_readonly, (gptr*) &opt_readonly, 0, GET_BOOL, NO_ARG, 0, 0, 1, 0, 1, 0}, @@ -5486,12 +6037,12 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.read_rnd_buff_size, (gptr*) &max_system_variables.read_rnd_buff_size, 0, GET_ULONG, REQUIRED_ARG, 256*1024L, IO_SIZE*2+MALLOC_OVERHEAD, - ~0L, MALLOC_OVERHEAD, IO_SIZE, 0}, + SSIZE_MAX, MALLOC_OVERHEAD, IO_SIZE, 0}, {"record_buffer", OPT_RECORD_BUFFER, "Alias for read_buffer_size", (gptr*) &global_system_variables.read_buff_size, (gptr*) &max_system_variables.read_buff_size,0, GET_ULONG, REQUIRED_ARG, - 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, ~0L, MALLOC_OVERHEAD, IO_SIZE, 0}, + 128*1024L, IO_SIZE*2+MALLOC_OVERHEAD, SSIZE_MAX, MALLOC_OVERHEAD, IO_SIZE, 0}, #ifdef HAVE_REPLICATION {"relay_log_purge", OPT_RELAY_LOG_PURGE, "0 = do not purge relay logs. 1 = purge them as soon as they are no more needed.", @@ -5517,7 +6068,7 @@ The minimum value for this variable is 4096.", "it failed with a deadlock or elapsed lock wait timeout, " "before giving up and stopping.", (gptr*) &slave_trans_retries, (gptr*) &slave_trans_retries, 0, - GET_ULONG, REQUIRED_ARG, 0L, 0L, (longlong) ULONG_MAX, 0, 1, 0}, + GET_ULONG, REQUIRED_ARG, 10L, 0L, (longlong) ULONG_MAX, 0, 1, 0}, #endif /* HAVE_REPLICATION */ {"slow_launch_time", OPT_SLOW_LAUNCH_TIME, "If creating the thread takes longer than this value (in seconds), the Slow_launch_threads counter will be incremented.", @@ -5531,40 +6082,27 @@ The minimum value for this variable is 4096.", 1, 0}, #ifdef HAVE_BERKELEY_DB {"sync-bdb-logs", OPT_BDB_SYNC, - "Synchronously flush logs. Enabled by default", + "Synchronously flush Berkeley DB logs. Enabled by default", (gptr*) &opt_sync_bdb_logs, (gptr*) &opt_sync_bdb_logs, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, #endif /* HAVE_BERKELEY_DB */ {"sync-binlog", OPT_SYNC_BINLOG, - "Sync the binlog to disk after every #th event. \ -#=0 (the default) does no sync. Syncing slows MySQL down", - (gptr*) &sync_binlog_period, - (gptr*) &sync_binlog_period, 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, - 0}, -#ifdef DOES_NOTHING_YET - {"sync-replication", OPT_SYNC_REPLICATION, - "Enable synchronous replication", - (gptr*) &global_system_variables.sync_replication, - (gptr*) &global_system_variables.sync_replication, - 0, GET_ULONG, REQUIRED_ARG, 0, 0, 1, 0, 1, 0}, - {"sync-replication-slave-id", OPT_SYNC_REPLICATION_SLAVE_ID, - "Synchronous replication is wished for this slave", - (gptr*) &global_system_variables.sync_replication_slave_id, - (gptr*) &global_system_variables.sync_replication_slave_id, - 0, GET_ULONG, REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0}, - {"sync-replication-timeout", OPT_SYNC_REPLICATION_TIMEOUT, - "Synchronous replication timeout", - (gptr*) &global_system_variables.sync_replication_timeout, - (gptr*) &global_system_variables.sync_replication_timeout, - 0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0}, -#endif - {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default", + "Synchronously flush binary log to disk after every #th event. " + "Use 0 (default) to disable synchronous flushing.", + (gptr*) &sync_binlog_period, (gptr*) &sync_binlog_period, 0, GET_ULONG, + REQUIRED_ARG, 0, 0, ~0L, 0, 1, 0}, + {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.", (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"table_cache", OPT_TABLE_CACHE, "The number of open tables for all threads.", (gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, TABLE_OPEN_CACHE_DEFAULT, 1, 512*1024L, 0, 1, 0}, + {"table_lock_wait_timeout", OPT_TABLE_LOCK_WAIT_TIMEOUT, "Timeout in " + "seconds to wait for a table level lock before returning an error. Used" + " only if the connection has active cursors.", + (gptr*) &table_lock_wait_timeout, (gptr*) &table_lock_wait_timeout, + 0, GET_ULONG, REQUIRED_ARG, 50, 1, 1024 * 1024 * 1024, 0, 1, 0}, {"thread_cache_size", OPT_THREAD_CACHE_SIZE, "How many threads we should keep in a cache for reuse.", (gptr*) &thread_cache_size, (gptr*) &thread_cache_size, 0, GET_ULONG, @@ -5585,8 +6123,8 @@ The minimum value for this variable is 4096.", {"tmp_table_size", OPT_TMP_TABLE_SIZE, "If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.", (gptr*) &global_system_variables.tmp_table_size, - (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG, - REQUIRED_ARG, 32*1024*1024L, 1024, ~0L, 0, 1, 0}, + (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULL, + REQUIRED_ARG, 32*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0}, {"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE, "Allocation block size for transactions to be stored in binary log", (gptr*) &global_system_variables.trans_alloc_block_size, @@ -5597,6 +6135,11 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, + {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT, + "1 = YES = Don't issue an error message (warning only) if a VIEW without presence of a key of the underlying table is used in queries with a LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which does not contain a key of the underlying table and the query uses a LIMIT clause (usually get from GUI tools).", + (gptr*) &global_system_variables.updatable_views_with_limit, + (gptr*) &max_system_variables.updatable_views_with_limit, + 0, GET_ULONG, REQUIRED_ARG, 1, 0, 1, 0, 1, 0}, {"wait_timeout", OPT_WAIT_TIMEOUT, "The number of seconds the server waits for activity on a connection before closing it.", (gptr*) &global_system_variables.net_wait_timeout, @@ -5607,179 +6150,190 @@ The minimum value for this variable is 4096.", }; +/* + Variables shown by SHOW STATUS in alphabetical order +*/ + struct show_var_st status_vars[]= { {"Aborted_clients", (char*) &aborted_threads, SHOW_LONG}, {"Aborted_connects", (char*) &aborted_connects, SHOW_LONG}, {"Binlog_cache_disk_use", (char*) &binlog_cache_disk_use, SHOW_LONG}, {"Binlog_cache_use", (char*) &binlog_cache_use, SHOW_LONG}, - {"Bytes_received", (char*) &bytes_received, SHOW_LONG}, - {"Bytes_sent", (char*) &bytes_sent, SHOW_LONG}, - {"Com_admin_commands", (char*) &com_other, SHOW_LONG}, - {"Com_alter_db", (char*) (com_stat+(uint) SQLCOM_ALTER_DB),SHOW_LONG}, - {"Com_alter_table", (char*) (com_stat+(uint) SQLCOM_ALTER_TABLE),SHOW_LONG}, - {"Com_analyze", (char*) (com_stat+(uint) SQLCOM_ANALYZE),SHOW_LONG}, - {"Com_backup_table", (char*) (com_stat+(uint) SQLCOM_BACKUP_TABLE),SHOW_LONG}, - {"Com_begin", (char*) (com_stat+(uint) SQLCOM_BEGIN),SHOW_LONG}, - {"Com_change_db", (char*) (com_stat+(uint) SQLCOM_CHANGE_DB),SHOW_LONG}, - {"Com_change_master", (char*) (com_stat+(uint) SQLCOM_CHANGE_MASTER),SHOW_LONG}, - {"Com_check", (char*) (com_stat+(uint) SQLCOM_CHECK),SHOW_LONG}, - {"Com_checksum", (char*) (com_stat+(uint) SQLCOM_CHECKSUM),SHOW_LONG}, - {"Com_commit", (char*) (com_stat+(uint) SQLCOM_COMMIT),SHOW_LONG}, - {"Com_create_db", (char*) (com_stat+(uint) SQLCOM_CREATE_DB),SHOW_LONG}, - {"Com_create_function", (char*) (com_stat+(uint) SQLCOM_CREATE_FUNCTION),SHOW_LONG}, - {"Com_create_index", (char*) (com_stat+(uint) SQLCOM_CREATE_INDEX),SHOW_LONG}, - {"Com_create_table", (char*) (com_stat+(uint) SQLCOM_CREATE_TABLE),SHOW_LONG}, - {"Com_dealloc_sql", (char*) (com_stat+(uint) - SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG}, - {"Com_delete", (char*) (com_stat+(uint) SQLCOM_DELETE),SHOW_LONG}, - {"Com_delete_multi", (char*) (com_stat+(uint) SQLCOM_DELETE_MULTI),SHOW_LONG}, - {"Com_do", (char*) (com_stat+(uint) SQLCOM_DO),SHOW_LONG}, - {"Com_drop_db", (char*) (com_stat+(uint) SQLCOM_DROP_DB),SHOW_LONG}, - {"Com_drop_function", (char*) (com_stat+(uint) SQLCOM_DROP_FUNCTION),SHOW_LONG}, - {"Com_drop_index", (char*) (com_stat+(uint) SQLCOM_DROP_INDEX),SHOW_LONG}, - {"Com_drop_table", (char*) (com_stat+(uint) SQLCOM_DROP_TABLE),SHOW_LONG}, - {"Com_drop_user", (char*) (com_stat+(uint) SQLCOM_DROP_USER),SHOW_LONG}, - {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE), - SHOW_LONG}, - {"Com_flush", (char*) (com_stat+(uint) SQLCOM_FLUSH),SHOW_LONG}, - {"Com_grant", (char*) (com_stat+(uint) SQLCOM_GRANT),SHOW_LONG}, - {"Com_ha_close", (char*) (com_stat+(uint) SQLCOM_HA_CLOSE),SHOW_LONG}, - {"Com_ha_open", (char*) (com_stat+(uint) SQLCOM_HA_OPEN),SHOW_LONG}, - {"Com_ha_read", (char*) (com_stat+(uint) SQLCOM_HA_READ),SHOW_LONG}, - {"Com_help", (char*) (com_stat+(uint) SQLCOM_HELP),SHOW_LONG}, - {"Com_insert", (char*) (com_stat+(uint) SQLCOM_INSERT),SHOW_LONG}, - {"Com_insert_select", (char*) (com_stat+(uint) SQLCOM_INSERT_SELECT),SHOW_LONG}, - {"Com_kill", (char*) (com_stat+(uint) SQLCOM_KILL),SHOW_LONG}, - {"Com_load", (char*) (com_stat+(uint) SQLCOM_LOAD),SHOW_LONG}, - {"Com_load_master_data", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_DATA),SHOW_LONG}, - {"Com_load_master_table", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_TABLE),SHOW_LONG}, - {"Com_lock_tables", (char*) (com_stat+(uint) SQLCOM_LOCK_TABLES),SHOW_LONG}, - {"Com_optimize", (char*) (com_stat+(uint) SQLCOM_OPTIMIZE),SHOW_LONG}, - {"Com_preload_keys", (char*) (com_stat+(uint) SQLCOM_PRELOAD_KEYS),SHOW_LONG}, - {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE), - SHOW_LONG}, - {"Com_purge", (char*) (com_stat+(uint) SQLCOM_PURGE),SHOW_LONG}, - {"Com_purge_before_date", (char*) (com_stat+(uint) SQLCOM_PURGE_BEFORE),SHOW_LONG}, - {"Com_rename_table", (char*) (com_stat+(uint) SQLCOM_RENAME_TABLE),SHOW_LONG}, - {"Com_repair", (char*) (com_stat+(uint) SQLCOM_REPAIR),SHOW_LONG}, - {"Com_replace", (char*) (com_stat+(uint) SQLCOM_REPLACE),SHOW_LONG}, - {"Com_replace_select", (char*) (com_stat+(uint) SQLCOM_REPLACE_SELECT),SHOW_LONG}, - {"Com_reset", (char*) (com_stat+(uint) SQLCOM_RESET),SHOW_LONG}, - {"Com_restore_table", (char*) (com_stat+(uint) SQLCOM_RESTORE_TABLE),SHOW_LONG}, - {"Com_revoke", (char*) (com_stat+(uint) SQLCOM_REVOKE),SHOW_LONG}, - {"Com_revoke_all", (char*) (com_stat+(uint) SQLCOM_REVOKE_ALL),SHOW_LONG}, - {"Com_rollback", (char*) (com_stat+(uint) SQLCOM_ROLLBACK),SHOW_LONG}, - {"Com_savepoint", (char*) (com_stat+(uint) SQLCOM_SAVEPOINT),SHOW_LONG}, - {"Com_select", (char*) (com_stat+(uint) SQLCOM_SELECT),SHOW_LONG}, - {"Com_set_option", (char*) (com_stat+(uint) SQLCOM_SET_OPTION),SHOW_LONG}, - {"Com_show_binlog_events", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOG_EVENTS),SHOW_LONG}, - {"Com_show_binlogs", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOGS),SHOW_LONG}, - {"Com_show_charsets", (char*) (com_stat+(uint) SQLCOM_SHOW_CHARSETS),SHOW_LONG}, - {"Com_show_collations", (char*) (com_stat+(uint) SQLCOM_SHOW_COLLATIONS),SHOW_LONG}, - {"Com_show_column_types", (char*) (com_stat+(uint) SQLCOM_SHOW_COLUMN_TYPES),SHOW_LONG}, - {"Com_show_create_db", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE_DB),SHOW_LONG}, - {"Com_show_create_table", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE),SHOW_LONG}, - {"Com_show_databases", (char*) (com_stat+(uint) SQLCOM_SHOW_DATABASES),SHOW_LONG}, - {"Com_show_errors", (char*) (com_stat+(uint) SQLCOM_SHOW_ERRORS),SHOW_LONG}, - {"Com_show_fields", (char*) (com_stat+(uint) SQLCOM_SHOW_FIELDS),SHOW_LONG}, - {"Com_show_grants", (char*) (com_stat+(uint) SQLCOM_SHOW_GRANTS),SHOW_LONG}, - {"Com_show_innodb_status", (char*) (com_stat+(uint) SQLCOM_SHOW_INNODB_STATUS),SHOW_LONG}, - {"Com_show_keys", (char*) (com_stat+(uint) SQLCOM_SHOW_KEYS),SHOW_LONG}, - {"Com_show_logs", (char*) (com_stat+(uint) SQLCOM_SHOW_LOGS),SHOW_LONG}, - {"Com_show_master_status", (char*) (com_stat+(uint) SQLCOM_SHOW_MASTER_STAT),SHOW_LONG}, - {"Com_show_ndb_status", (char*) (com_stat+(uint) SQLCOM_SHOW_NDBCLUSTER_STATUS),SHOW_LONG}, - {"Com_show_new_master", (char*) (com_stat+(uint) SQLCOM_SHOW_NEW_MASTER),SHOW_LONG}, - {"Com_show_open_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_OPEN_TABLES),SHOW_LONG}, - {"Com_show_privileges", (char*) (com_stat+(uint) SQLCOM_SHOW_PRIVILEGES),SHOW_LONG}, - {"Com_show_processlist", (char*) (com_stat+(uint) SQLCOM_SHOW_PROCESSLIST),SHOW_LONG}, - {"Com_show_slave_hosts", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_HOSTS),SHOW_LONG}, - {"Com_show_slave_status", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_STAT),SHOW_LONG}, - {"Com_show_status", (char*) (com_stat+(uint) SQLCOM_SHOW_STATUS),SHOW_LONG}, - {"Com_show_storage_engines", (char*) (com_stat+(uint) SQLCOM_SHOW_STORAGE_ENGINES),SHOW_LONG}, - {"Com_show_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_TABLES),SHOW_LONG}, - {"Com_show_variables", (char*) (com_stat+(uint) SQLCOM_SHOW_VARIABLES),SHOW_LONG}, - {"Com_show_warnings", (char*) (com_stat+(uint) SQLCOM_SHOW_WARNS),SHOW_LONG}, - {"Com_slave_start", (char*) (com_stat+(uint) SQLCOM_SLAVE_START),SHOW_LONG}, - {"Com_slave_stop", (char*) (com_stat+(uint) SQLCOM_SLAVE_STOP),SHOW_LONG}, - {"Com_stmt_close", (char*) &com_stmt_close, SHOW_LONG}, - {"Com_stmt_execute", (char*) &com_stmt_execute, SHOW_LONG}, - {"Com_stmt_prepare", (char*) &com_stmt_prepare, SHOW_LONG}, - {"Com_stmt_reset", (char*) &com_stmt_reset, SHOW_LONG}, - {"Com_stmt_send_long_data", (char*) &com_stmt_send_long_data, SHOW_LONG}, - {"Com_truncate", (char*) (com_stat+(uint) SQLCOM_TRUNCATE),SHOW_LONG}, - {"Com_unlock_tables", (char*) (com_stat+(uint) SQLCOM_UNLOCK_TABLES),SHOW_LONG}, - {"Com_update", (char*) (com_stat+(uint) SQLCOM_UPDATE),SHOW_LONG}, - {"Com_update_multi", (char*) (com_stat+(uint) SQLCOM_UPDATE_MULTI),SHOW_LONG}, + {"Bytes_received", (char*) offsetof(STATUS_VAR, bytes_received), SHOW_LONG_STATUS}, + {"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), SHOW_LONG_STATUS}, + {"Com_admin_commands", (char*) offsetof(STATUS_VAR, com_other), SHOW_LONG_STATUS}, + {"Com_alter_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB]), SHOW_LONG_STATUS}, + {"Com_alter_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLE]), SHOW_LONG_STATUS}, + {"Com_analyze", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ANALYZE]), SHOW_LONG_STATUS}, + {"Com_backup_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BACKUP_TABLE]), SHOW_LONG_STATUS}, + {"Com_begin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BEGIN]), SHOW_LONG_STATUS}, + {"Com_change_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_DB]), SHOW_LONG_STATUS}, + {"Com_change_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_MASTER]), SHOW_LONG_STATUS}, + {"Com_check", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECK]), SHOW_LONG_STATUS}, + {"Com_checksum", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECKSUM]), SHOW_LONG_STATUS}, + {"Com_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_COMMIT]), SHOW_LONG_STATUS}, + {"Com_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_DB]), SHOW_LONG_STATUS}, + {"Com_create_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_FUNCTION]), SHOW_LONG_STATUS}, + {"Com_create_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_INDEX]), SHOW_LONG_STATUS}, + {"Com_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TABLE]), SHOW_LONG_STATUS}, + {"Com_create_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_USER]), SHOW_LONG_STATUS}, + {"Com_dealloc_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DEALLOCATE_PREPARE]), SHOW_LONG_STATUS}, + {"Com_delete", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE]), SHOW_LONG_STATUS}, + {"Com_delete_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE_MULTI]), SHOW_LONG_STATUS}, + {"Com_do", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DO]), SHOW_LONG_STATUS}, + {"Com_drop_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_DB]), SHOW_LONG_STATUS}, + {"Com_drop_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_FUNCTION]), SHOW_LONG_STATUS}, + {"Com_drop_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_INDEX]), SHOW_LONG_STATUS}, + {"Com_drop_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TABLE]), SHOW_LONG_STATUS}, + {"Com_drop_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_USER]), SHOW_LONG_STATUS}, + {"Com_execute_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS}, + {"Com_flush", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS}, + {"Com_grant", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS}, + {"Com_ha_close", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS}, + {"Com_ha_open", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS}, + {"Com_ha_read", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_READ]), SHOW_LONG_STATUS}, + {"Com_help", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HELP]), SHOW_LONG_STATUS}, + {"Com_insert", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT]), SHOW_LONG_STATUS}, + {"Com_insert_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT_SELECT]), SHOW_LONG_STATUS}, + {"Com_kill", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_KILL]), SHOW_LONG_STATUS}, + {"Com_load", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD]), SHOW_LONG_STATUS}, + {"Com_load_master_data", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_DATA]), SHOW_LONG_STATUS}, + {"Com_load_master_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_TABLE]), SHOW_LONG_STATUS}, + {"Com_lock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOCK_TABLES]), SHOW_LONG_STATUS}, + {"Com_optimize", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_OPTIMIZE]), SHOW_LONG_STATUS}, + {"Com_preload_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PRELOAD_KEYS]), SHOW_LONG_STATUS}, + {"Com_prepare_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PREPARE]), SHOW_LONG_STATUS}, + {"Com_purge", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE]), SHOW_LONG_STATUS}, + {"Com_purge_before_date", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE_BEFORE]), SHOW_LONG_STATUS}, + {"Com_rename_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_TABLE]), SHOW_LONG_STATUS}, + {"Com_repair", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPAIR]), SHOW_LONG_STATUS}, + {"Com_replace", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE]), SHOW_LONG_STATUS}, + {"Com_replace_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE_SELECT]), SHOW_LONG_STATUS}, + {"Com_reset", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESET]), SHOW_LONG_STATUS}, + {"Com_restore_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESTORE_TABLE]), SHOW_LONG_STATUS}, + {"Com_revoke", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE]), SHOW_LONG_STATUS}, + {"Com_revoke_all", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE_ALL]), SHOW_LONG_STATUS}, + {"Com_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK]), SHOW_LONG_STATUS}, + {"Com_savepoint", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SAVEPOINT]), SHOW_LONG_STATUS}, + {"Com_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SELECT]), SHOW_LONG_STATUS}, + {"Com_set_option", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SET_OPTION]), SHOW_LONG_STATUS}, + {"Com_show_binlog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS}, + {"Com_show_binlogs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS}, + {"Com_show_charsets", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS}, + {"Com_show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS}, + {"Com_show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS}, + {"Com_show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS}, + {"Com_show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS}, + {"Com_show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS}, + {"Com_show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS}, + {"Com_show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS}, + {"Com_show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS}, + {"Com_show_innodb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_INNODB_STATUS]), SHOW_LONG_STATUS}, + {"Com_show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS}, + {"Com_show_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_LOGS]), SHOW_LONG_STATUS}, + {"Com_show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS}, + {"Com_show_ndb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NDBCLUSTER_STATUS]), SHOW_LONG_STATUS}, + {"Com_show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS}, + {"Com_show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS}, + {"Com_show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS}, + {"Com_show_processlist", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS}, + {"Com_show_slave_hosts", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS}, + {"Com_show_slave_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS}, + {"Com_show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS}, + {"Com_show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS}, + {"Com_show_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS}, + {"Com_show_triggers", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TRIGGERS]), SHOW_LONG_STATUS}, + {"Com_show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS}, + {"Com_show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS}, + {"Com_slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS}, + {"Com_slave_stop", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS}, + {"Com_stmt_close", (char*) offsetof(STATUS_VAR, com_stmt_close), SHOW_LONG_STATUS}, + {"Com_stmt_execute", (char*) offsetof(STATUS_VAR, com_stmt_execute), SHOW_LONG_STATUS}, + {"Com_stmt_fetch", (char*) offsetof(STATUS_VAR, com_stmt_fetch), SHOW_LONG_STATUS}, + {"Com_stmt_prepare", (char*) offsetof(STATUS_VAR, com_stmt_prepare), SHOW_LONG_STATUS}, + {"Com_stmt_reset", (char*) offsetof(STATUS_VAR, com_stmt_reset), SHOW_LONG_STATUS}, + {"Com_stmt_send_long_data", (char*) offsetof(STATUS_VAR, com_stmt_send_long_data), SHOW_LONG_STATUS}, + {"Com_truncate", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_TRUNCATE]), SHOW_LONG_STATUS}, + {"Com_unlock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNLOCK_TABLES]), SHOW_LONG_STATUS}, + {"Com_update", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE]), SHOW_LONG_STATUS}, + {"Com_update_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE_MULTI]), SHOW_LONG_STATUS}, + {"Com_xa_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_COMMIT]),SHOW_LONG_STATUS}, + {"Com_xa_end", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_END]),SHOW_LONG_STATUS}, + {"Com_xa_prepare", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_PREPARE]),SHOW_LONG_STATUS}, + {"Com_xa_recover", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_RECOVER]),SHOW_LONG_STATUS}, + {"Com_xa_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_ROLLBACK]),SHOW_LONG_STATUS}, + {"Com_xa_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_XA_START]),SHOW_LONG_STATUS}, + {"Compression", (char*) 0, SHOW_NET_COMPRESSION}, {"Connections", (char*) &thread_id, SHOW_LONG_CONST}, - {"Created_tmp_disk_tables", (char*) &created_tmp_disk_tables,SHOW_LONG}, + {"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, created_tmp_disk_tables), SHOW_LONG_STATUS}, {"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG}, - {"Created_tmp_tables", (char*) &created_tmp_tables, SHOW_LONG}, + {"Created_tmp_tables", (char*) offsetof(STATUS_VAR, created_tmp_tables), SHOW_LONG_STATUS}, {"Delayed_errors", (char*) &delayed_insert_errors, SHOW_LONG}, {"Delayed_insert_threads", (char*) &delayed_insert_threads, SHOW_LONG_CONST}, {"Delayed_writes", (char*) &delayed_insert_writes, SHOW_LONG}, {"Flush_commands", (char*) &refresh_version, SHOW_LONG_CONST}, - {"Handler_commit", (char*) &ha_commit_count, SHOW_LONG}, - {"Handler_delete", (char*) &ha_delete_count, SHOW_LONG}, - {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG}, - {"Handler_read_first", (char*) &ha_read_first_count, SHOW_LONG}, - {"Handler_read_key", (char*) &ha_read_key_count, SHOW_LONG}, - {"Handler_read_next", (char*) &ha_read_next_count, SHOW_LONG}, - {"Handler_read_prev", (char*) &ha_read_prev_count, SHOW_LONG}, - {"Handler_read_rnd", (char*) &ha_read_rnd_count, SHOW_LONG}, - {"Handler_read_rnd_next", (char*) &ha_read_rnd_next_count, SHOW_LONG}, - {"Handler_rollback", (char*) &ha_rollback_count, SHOW_LONG}, - {"Handler_update", (char*) &ha_update_count, SHOW_LONG}, - {"Handler_write", (char*) &ha_write_count, SHOW_LONG}, - {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, - SHOW_KEY_CACHE_LONG}, - {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, - SHOW_KEY_CACHE_CONST_LONG}, - {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, - SHOW_KEY_CACHE_CONST_LONG}, - {"Key_read_requests", (char*) &dflt_key_cache_var.global_cache_r_requests, - SHOW_KEY_CACHE_LONGLONG}, - {"Key_reads", (char*) &dflt_key_cache_var.global_cache_read, - SHOW_KEY_CACHE_LONGLONG}, - {"Key_write_requests", (char*) &dflt_key_cache_var.global_cache_w_requests, - SHOW_KEY_CACHE_LONGLONG}, - {"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, - SHOW_KEY_CACHE_LONGLONG}, + {"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), SHOW_LONG_STATUS}, + {"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), SHOW_LONG_STATUS}, + {"Handler_discover", (char*) offsetof(STATUS_VAR, ha_discover_count), SHOW_LONG_STATUS}, + {"Handler_prepare", (char*) offsetof(STATUS_VAR, ha_prepare_count), SHOW_LONG_STATUS}, + {"Handler_read_first", (char*) offsetof(STATUS_VAR, ha_read_first_count), SHOW_LONG_STATUS}, + {"Handler_read_key", (char*) offsetof(STATUS_VAR, ha_read_key_count), SHOW_LONG_STATUS}, + {"Handler_read_next", (char*) offsetof(STATUS_VAR, ha_read_next_count), SHOW_LONG_STATUS}, + {"Handler_read_prev", (char*) offsetof(STATUS_VAR, ha_read_prev_count), SHOW_LONG_STATUS}, + {"Handler_read_rnd", (char*) offsetof(STATUS_VAR, ha_read_rnd_count), SHOW_LONG_STATUS}, + {"Handler_read_rnd_next", (char*) offsetof(STATUS_VAR, ha_read_rnd_next_count), SHOW_LONG_STATUS}, + {"Handler_rollback", (char*) offsetof(STATUS_VAR, ha_rollback_count), SHOW_LONG_STATUS}, + {"Handler_savepoint", (char*) offsetof(STATUS_VAR, ha_savepoint_count), SHOW_LONG_STATUS}, + {"Handler_savepoint_rollback",(char*) offsetof(STATUS_VAR, ha_savepoint_rollback_count), SHOW_LONG_STATUS}, + {"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), SHOW_LONG_STATUS}, + {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), SHOW_LONG_STATUS}, +#ifdef HAVE_INNOBASE_DB + {"Innodb_", (char*) &innodb_status_variables, SHOW_VARS}, +#endif /*HAVE_INNOBASE_DB*/ + {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, SHOW_KEY_CACHE_LONG}, + {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, SHOW_KEY_CACHE_CONST_LONG}, + {"Key_blocks_used", (char*) &dflt_key_cache_var.blocks_used, SHOW_KEY_CACHE_CONST_LONG}, + {"Key_read_requests", (char*) &dflt_key_cache_var.global_cache_r_requests, SHOW_KEY_CACHE_LONGLONG}, + {"Key_reads", (char*) &dflt_key_cache_var.global_cache_read, SHOW_KEY_CACHE_LONGLONG}, + {"Key_write_requests", (char*) &dflt_key_cache_var.global_cache_w_requests, SHOW_KEY_CACHE_LONGLONG}, + {"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, SHOW_KEY_CACHE_LONGLONG}, + {"Last_query_cost", (char*) offsetof(STATUS_VAR, last_query_cost), SHOW_DOUBLE_STATUS}, {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, +#ifdef HAVE_NDBCLUSTER_DB + {"Ndb_", (char*) &ndb_status_variables, SHOW_VARS}, +#endif /*HAVE_NDBCLUSTER_DB*/ {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST}, {"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST}, {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST}, {"Open_tables", (char*) 0, SHOW_OPENTABLES}, - {"Opened_tables", (char*) &opened_tables, SHOW_LONG}, + {"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS}, {"Prepared_stmt_count", (char*) &prepared_stmt_count, SHOW_LONG_CONST}, #ifdef HAVE_QUERY_CACHE - {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, - SHOW_LONG_CONST}, - {"Qcache_free_memory", (char*) &query_cache.free_memory, - SHOW_LONG_CONST}, + {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST}, + {"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_CONST}, {"Qcache_hits", (char*) &query_cache.hits, SHOW_LONG}, {"Qcache_inserts", (char*) &query_cache.inserts, SHOW_LONG}, {"Qcache_lowmem_prunes", (char*) &query_cache.lowmem_prunes, SHOW_LONG}, {"Qcache_not_cached", (char*) &query_cache.refused, SHOW_LONG}, {"Qcache_queries_in_cache", (char*) &query_cache.queries_in_cache, SHOW_LONG_CONST}, - {"Qcache_total_blocks", (char*) &query_cache.total_blocks, - SHOW_LONG_CONST}, + {"Qcache_total_blocks", (char*) &query_cache.total_blocks, SHOW_LONG_CONST}, #endif /*HAVE_QUERY_CACHE*/ {"Questions", (char*) 0, SHOW_QUESTION}, {"Rpl_status", (char*) 0, SHOW_RPL_STATUS}, - {"Select_full_join", (char*) &select_full_join_count, SHOW_LONG}, - {"Select_full_range_join", (char*) &select_full_range_join_count, SHOW_LONG}, - {"Select_range", (char*) &select_range_count, SHOW_LONG}, - {"Select_range_check", (char*) &select_range_check_count, SHOW_LONG}, - {"Select_scan", (char*) &select_scan_count, SHOW_LONG}, + {"Select_full_join", (char*) offsetof(STATUS_VAR, select_full_join_count), SHOW_LONG_STATUS}, + {"Select_full_range_join", (char*) offsetof(STATUS_VAR, select_full_range_join_count), SHOW_LONG_STATUS}, + {"Select_range", (char*) offsetof(STATUS_VAR, select_range_count), SHOW_LONG_STATUS}, + {"Select_range_check", (char*) offsetof(STATUS_VAR, select_range_check_count), SHOW_LONG_STATUS}, + {"Select_scan", (char*) offsetof(STATUS_VAR, select_scan_count), SHOW_LONG_STATUS}, {"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG}, {"Slave_retried_transactions",(char*) 0, SHOW_SLAVE_RETRIED_TRANS}, {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING}, {"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG}, - {"Slow_queries", (char*) &long_query_count, SHOW_LONG}, - {"Sort_merge_passes", (char*) &filesort_merge_passes, SHOW_LONG}, - {"Sort_range", (char*) &filesort_range_count, SHOW_LONG}, - {"Sort_rows", (char*) &filesort_rows, SHOW_LONG}, - {"Sort_scan", (char*) &filesort_scan_count, SHOW_LONG}, + {"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), SHOW_LONG_STATUS}, + {"Sort_merge_passes", (char*) offsetof(STATUS_VAR, filesort_merge_passes), SHOW_LONG_STATUS}, + {"Sort_range", (char*) offsetof(STATUS_VAR, filesort_range_count), SHOW_LONG_STATUS}, + {"Sort_rows", (char*) offsetof(STATUS_VAR, filesort_rows), SHOW_LONG_STATUS}, + {"Sort_scan", (char*) offsetof(STATUS_VAR, filesort_scan_count), SHOW_LONG_STATUS}, #ifdef HAVE_OPENSSL {"Ssl_accept_renegotiates", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE}, {"Ssl_accepts", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT}, @@ -5807,6 +6361,11 @@ struct show_var_st status_vars[]= { #endif /* HAVE_OPENSSL */ {"Table_locks_immediate", (char*) &locks_immediate, SHOW_LONG}, {"Table_locks_waited", (char*) &locks_waited, SHOW_LONG}, +#ifdef HAVE_MMAP + {"Tc_log_max_pages_used", (char*) &tc_log_max_pages_used, SHOW_LONG}, + {"Tc_log_page_size", (char*) &tc_log_page_size, SHOW_LONG}, + {"Tc_log_page_waits", (char*) &tc_log_page_waits, SHOW_LONG}, +#endif {"Threads_cached", (char*) &cached_thread_count, SHOW_LONG_CONST}, {"Threads_connected", (char*) &thread_count, SHOW_INT_CONST}, {"Threads_created", (char*) &thread_created, SHOW_LONG_CONST}, @@ -5891,10 +6450,13 @@ static void mysql_init_variables(void) /* Things reset to zero */ opt_skip_slave_start= opt_reckless_slave = 0; mysql_home[0]= pidfile_name[0]= log_error_file[0]= 0; - opt_log= opt_update_log= opt_bin_log= opt_slow_log= 0; + opt_log= opt_update_log= opt_slow_log= 0; + opt_bin_log= 0; opt_disable_networking= opt_skip_show_db=0; - opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname=0; + opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0; + opt_tc_log_file= (char *)"tc.log"; // no hostname in tc_log file name ! opt_secure_auth= 0; + opt_secure_file_priv= 0; opt_bootstrap= opt_myisam_log= 0; mqh_used= 0; segfaulted= kill_in_progress= 0; @@ -5904,20 +6466,15 @@ static void mysql_init_variables(void) test_flags= select_errors= dropping_tables= ha_open_options=0; thread_count= thread_running= kill_cached_threads= wake_thread=0; slave_open_temp_tables= 0; - com_other= 0; cached_thread_count= 0; - bytes_sent= bytes_received= 0; opt_endinfo= using_udf_functions= 0; opt_using_transactions= using_update_log= 0; abort_loop= select_thread_in_use= signal_thread_in_use= 0; ready_to_exit= shutdown_in_progress= grant_option= 0; - long_query_count= aborted_threads= aborted_connects= 0; + aborted_threads= aborted_connects= 0; delayed_insert_threads= delayed_insert_writes= delayed_rows_in_use= 0; delayed_insert_errors= thread_created= 0; - filesort_rows= filesort_range_count= filesort_scan_count= 0; - filesort_merge_passes= select_range_check_count= select_range_count= 0; - select_scan_count= select_full_range_join_count= select_full_join_count= 0; - specialflag= opened_tables= created_tmp_tables= created_tmp_disk_tables= 0; + specialflag= 0; binlog_cache_use= binlog_cache_disk_use= 0; max_used_connections= slow_launch_threads = 0; mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0; @@ -5925,7 +6482,8 @@ static void mysql_init_variables(void) errmesg= 0; mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS; bzero((gptr) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list)); - bzero((gptr) &com_stat, sizeof(com_stat)); + bzero((char *) &global_status_var, sizeof(global_status_var)); + opt_large_pages= 0; key_map_full.set_all(); /* Character sets */ @@ -5933,6 +6491,7 @@ static void mysql_init_variables(void) files_charset_info= &my_charset_utf8_general_ci; national_charset_info= &my_charset_utf8_general_ci; table_alias_charset= &my_charset_bin; + character_set_filesystem= &my_charset_bin; opt_date_time_formats[0]= opt_date_time_formats[1]= opt_date_time_formats[2]= 0; @@ -5951,7 +6510,7 @@ static void mysql_init_variables(void) protocol_version= PROTOCOL_VERSION; what_to_log= ~ (1L << (uint) COM_TIME); refresh_version= flush_version= 1L; /* Increments on each reload */ - query_id= thread_id= 1L; + global_query_id= thread_id= 1L; strmov(server_version, MYSQL_SERVER_VERSION); myisam_recover_options_str= sql_mode_str= "OFF"; myisam_stats_method_str= "nulls_unequal"; @@ -5983,7 +6542,7 @@ static void mysql_init_variables(void) master_password= master_host= 0; master_info_file= (char*) "master.info", relay_log_info_file= (char*) "relay-log.info"; - master_ssl_key= master_ssl_cert= master_ssl_ca= + master_ssl_key= master_ssl_cert= master_ssl_ca= master_ssl_capath= master_ssl_cipher= 0; report_user= report_password = report_host= 0; /* TO BE DELETED */ opt_relay_logname= opt_relaylog_index_name= 0; @@ -5993,6 +6552,7 @@ static void mysql_init_variables(void) default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME; default_collation_name= compiled_default_collation_name; sys_charset_system.value= (char*) system_charset_info->csname; + character_set_filesystem_name= (char*) "binary"; /* Set default values for some option variables */ @@ -6003,7 +6563,7 @@ static void mysql_init_variables(void) global_system_variables.max_join_size= (ulonglong) HA_POS_ERROR; max_system_variables.max_join_size= (ulonglong) HA_POS_ERROR; global_system_variables.old_passwords= 0; - + /* Default behavior for 4.1 and 5.0 is to treat NULL values as unequal when collecting index statistics for MyISAM tables. @@ -6026,17 +6586,13 @@ static void mysql_init_variables(void) #else have_innodb=SHOW_OPTION_NO; #endif -#ifdef HAVE_ISAM - have_isam=SHOW_OPTION_YES; -#else have_isam=SHOW_OPTION_NO; -#endif #ifdef HAVE_EXAMPLE_DB have_example_db= SHOW_OPTION_YES; #else have_example_db= SHOW_OPTION_NO; #endif -#ifdef HAVE_ARCHIVE_DB +#if defined(HAVE_ARCHIVE_DB) have_archive_db= SHOW_OPTION_YES; #else have_archive_db= SHOW_OPTION_NO; @@ -6046,6 +6602,11 @@ static void mysql_init_variables(void) #else have_blackhole_db= SHOW_OPTION_NO; #endif +#ifdef HAVE_FEDERATED_DB + have_federated_db= SHOW_OPTION_YES; +#else + have_federated_db= SHOW_OPTION_NO; +#endif #ifdef HAVE_CSV_DB have_csv_db= SHOW_OPTION_YES; #else @@ -6062,15 +6623,20 @@ static void mysql_init_variables(void) have_raid=SHOW_OPTION_NO; #endif #ifdef HAVE_OPENSSL - have_openssl=SHOW_OPTION_YES; + have_ssl=SHOW_OPTION_YES; #else - have_openssl=SHOW_OPTION_NO; + have_ssl=SHOW_OPTION_NO; #endif #ifdef HAVE_BROKEN_REALPATH have_symlink=SHOW_OPTION_NO; #else have_symlink=SHOW_OPTION_YES; #endif +#ifdef HAVE_DLOPEN + have_dlopen=SHOW_OPTION_YES; +#else + have_dlopen=SHOW_OPTION_NO; +#endif #ifdef HAVE_QUERY_CACHE have_query_cache=SHOW_OPTION_YES; #else @@ -6127,7 +6693,7 @@ static void mysql_init_variables(void) } -extern "C" my_bool +static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { @@ -6203,7 +6769,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), opt_update_log=1; break; case (int) OPT_BIN_LOG: - opt_bin_log=1; + opt_bin_log= test(argument != disabled_my_option); break; case (int) OPT_ERROR_LOG_FILE: opt_error_log= 1; @@ -6338,6 +6904,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE; myisam_concurrent_insert=0; myisam_recover_options= HA_RECOVER_NONE; + sp_automatic_privileges=0; my_use_symdir=0; ha_open_options&= ~(HA_OPEN_ABORT_IF_CRASHED | HA_OPEN_DELAY_KEY_WRITE); #ifdef HAVE_QUERY_CACHE @@ -6419,14 +6986,34 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case (int) OPT_STANDALONE: /* Dummy option for NT */ break; #endif + /* + The following change issues a deprecation warning if the slave + configuration is specified either in the my.cnf file or on + the command-line. See BUG#21490. + */ + case OPT_MASTER_HOST: + case OPT_MASTER_USER: + case OPT_MASTER_PASSWORD: + case OPT_MASTER_PORT: + case OPT_MASTER_CONNECT_RETRY: + case OPT_MASTER_SSL: + case OPT_MASTER_SSL_KEY: + case OPT_MASTER_SSL_CERT: + case OPT_MASTER_SSL_CAPATH: + case OPT_MASTER_SSL_CIPHER: + case OPT_MASTER_SSL_CA: + if (!slave_warning_issued) //only show the warning once + { + slave_warning_issued = true; + WARN_DEPRECATED(NULL, "5.2", "for replication startup options", + "'CHANGE MASTER'"); + } + break; case OPT_CONSOLE: if (opt_console) opt_error_log= 0; // Force logs to stdout break; case (int) OPT_FLUSH: -#ifdef HAVE_ISAM - nisam_flush=1; -#endif myisam_flush=1; flush_time=0; // No auto flush break; @@ -6486,6 +7073,11 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), global_system_variables.tx_isolation= (type-1); break; } + case OPT_MERGE: + if (opt_merge) + have_merge_db= SHOW_OPTION_YES; + else + have_merge_db= SHOW_OPTION_DISABLED; #ifdef HAVE_BERKELEY_DB case OPT_BDB_NOSYNC: /* Deprecated option */ @@ -6511,7 +7103,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *end; uint length= strlen(argument); long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err); - if (test_if_int(argument,(uint) length, end, &my_charset_latin1)) + if (end == argument+length) berkeley_lock_scan_time= value; else { @@ -6534,17 +7126,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), have_berkeley_db= SHOW_OPTION_DISABLED; #endif break; - case OPT_MERGE: - have_merge_db= opt_merge ? SHOW_OPTION_YES : SHOW_OPTION_DISABLED; - break; - case OPT_ISAM: -#ifdef HAVE_ISAM - if (opt_isam) - have_isam= SHOW_OPTION_YES; - else - have_isam= SHOW_OPTION_DISABLED; -#endif - break; case OPT_NDBCLUSTER: #ifdef HAVE_NDBCLUSTER_DB if (opt_ndbcluster) @@ -6553,6 +7134,31 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), have_ndbcluster= SHOW_OPTION_DISABLED; #endif break; +#ifdef HAVE_NDBCLUSTER_DB + case OPT_NDB_MGMD: + case OPT_NDB_NODEID: + { + int len= my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, + sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, + "%s%s%s",opt_ndb_constrbuf_len > 0 ? ",":"", + optid == OPT_NDB_NODEID ? "nodeid=" : "", + argument); + opt_ndb_constrbuf_len+= len; + } + /* fall through to add the connectstring to the end + * and set opt_ndbcluster_connectstring + */ + case OPT_NDB_CONNECTSTRING: + if (opt_ndb_connectstring && opt_ndb_connectstring[0]) + my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len, + sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len, + "%s%s", opt_ndb_constrbuf_len > 0 ? ",":"", + opt_ndb_connectstring); + else + opt_ndb_constrbuf[opt_ndb_constrbuf_len]= 0; + opt_ndbcluster_connectstring= opt_ndb_constrbuf; + break; +#endif case OPT_INNODB: #ifdef HAVE_INNOBASE_DB if (opt_innodb) @@ -6570,9 +7176,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case OPT_INNODB_LOG_ARCHIVE: innobase_log_archive= argument ? test(atoi(argument)) : 1; break; - case OPT_INNODB_FAST_SHUTDOWN: - innobase_fast_shutdown= argument ? test(atoi(argument)) : 1; - break; #endif /* HAVE_INNOBASE_DB */ case OPT_MYISAM_RECOVER: { @@ -6594,10 +7197,28 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), ha_open_options|=HA_OPEN_ABORT_IF_CRASHED; break; } + case OPT_CONCURRENT_INSERT: + /* The following code is mainly here to emulate old behavior */ + if (!argument) /* --concurrent-insert */ + myisam_concurrent_insert= 1; + else if (argument == disabled_my_option) + myisam_concurrent_insert= 0; /* --skip-concurrent-insert */ + break; + case OPT_TC_HEURISTIC_RECOVER: + { + if ((tc_heuristic_recover=find_type(argument, + &tc_heuristic_recover_typelib, 2)) <=0) + { + fprintf(stderr, "Unknown option to tc-heuristic-recover: %s\n",argument); + exit(1); + } + } case OPT_MYISAM_STATS_METHOD: { - int method; ulong method_conv; + int method; + LINT_INIT(method_conv); + myisam_stats_method_str= argument; if ((method=find_type(argument, &myisam_stats_method_typelib, 2)) <= 0) { @@ -6654,7 +7275,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } /* Initiates DEBUG - but no debugging here ! */ -extern "C" gptr * +static gptr * mysql_getopt_value(const char *keyname, uint key_length, const struct my_option *option) { @@ -6683,7 +7304,7 @@ mysql_getopt_value(const char *keyname, uint key_length, } -void option_error_reporter(enum loglevel level, const char *format, ...) +static void option_error_reporter(enum loglevel level, const char *format, ...) { va_list args; va_start(args, format); @@ -6724,22 +7345,6 @@ static void get_options(int argc,char **argv) !opt_slow_log) sql_print_warning("options --log-slow-admin-statements and --log-queries-not-using-indexes have no effect if --log-slow-queries is not set"); - /* - Check that the default storage engine is actually available. - */ - if (!ha_storage_engine_is_enabled((enum db_type) - global_system_variables.table_type)) - { - if (!opt_bootstrap) - { - sql_print_error("Default storage engine (%s) is not available", - ha_get_storage_engine((enum db_type) - global_system_variables.table_type)); - exit(1); - } - global_system_variables.table_type= DB_TYPE_MYISAM; - } - if (argc > 0) { fprintf(stderr, "%s: Too many arguments (first extra is '%s').\nUse --help to get a list of available options\n", my_progname, *argv); @@ -6789,8 +7394,6 @@ static void get_options(int argc,char **argv) my_default_record_cache_size=global_system_variables.read_buff_size; myisam_max_temp_length= (my_off_t) global_system_variables.myisam_max_sort_file_size; - myisam_max_extra_temp_length= - (my_off_t) global_system_variables.myisam_max_extra_sort_file_size; /* Set global variables based on startup options */ myisam_block_size=(uint) 1 << my_bit_log2(opt_myisam_block_size); @@ -6903,6 +7506,7 @@ static void fix_paths(void) CHARSET_DIR, NullS); } (void) my_load_path(mysql_charsets_dir, mysql_charsets_dir, buff); + convert_dirname(mysql_charsets_dir, mysql_charsets_dir, NullS); charsets_dir=mysql_charsets_dir; if (init_tmpdir(&mysql_tmpdir_list, opt_mysql_tmpdir)) @@ -6914,6 +7518,16 @@ static void fix_paths(void) exit(1); } #endif /* HAVE_REPLICATION */ + /* + Convert the secure-file-priv option to system format, allowing + a quick strcmp to check if read or write is in an allowed dir + */ + if (opt_secure_file_priv) + { + convert_dirname(buff, opt_secure_file_priv, NullS); + my_free(opt_secure_file_priv, MYF(0)); + opt_secure_file_priv= my_strdup(buff, MYF(MY_FAE)); + } } @@ -7020,6 +7634,8 @@ static int test_if_case_insensitive(const char *dir_name) /* Create file to store pid number */ +#ifndef EMBEDDED_LIBRARY + static void create_pid_file() { File file; @@ -7037,20 +7653,79 @@ static void create_pid_file() (void) my_close(file, MYF(0)); } sql_perror("Can't start server: can't create PID file"); - exit(1); + exit(1); } +#endif /* EMBEDDED_LIBRARY */ + +/* Clear most status variables */ +void refresh_status(THD *thd) +{ + pthread_mutex_lock(&LOCK_status); + + /* Add thread's status variabes to global status */ + add_to_status(&global_status_var, &thd->status_var); + + /* Reset thread's status variables */ + bzero((char*) &thd->status_var, sizeof(thd->status_var)); + + /* Reset some global variables */ + for (struct show_var_st *ptr=status_vars; ptr->name; ptr++) + { + if (ptr->type == SHOW_LONG) + *(ulong*) ptr->value= 0; + } + + /* Reset the counters of all key caches (default and named). */ + process_key_caches(reset_key_cache_counters); + pthread_mutex_unlock(&LOCK_status); + + /* + Set max_used_connections to the number of currently open + connections. Lock LOCK_thread_count out of LOCK_status to avoid + deadlocks. Status reset becomes not atomic, but status data is + not exact anyway. + */ + pthread_mutex_lock(&LOCK_thread_count); + max_used_connections= thread_count-delayed_insert_threads; + pthread_mutex_unlock(&LOCK_thread_count); +} + + +/***************************************************************************** + Instantiate have_xyx for missing storage engines +*****************************************************************************/ +#undef have_berkeley_db +#undef have_innodb +#undef have_ndbcluster +#undef have_example_db +#undef have_archive_db +#undef have_csv_db +#undef have_federated_db +#undef have_partition_db +#undef have_blackhole_db + +SHOW_COMP_OPTION have_berkeley_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_innodb= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_ndbcluster= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_example_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_archive_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_csv_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_federated_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_partition_db= SHOW_OPTION_NO; +SHOW_COMP_OPTION have_blackhole_db= SHOW_OPTION_NO; /***************************************************************************** Instantiate templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION /* Used templates */ template class I_List<THD>; template class I_List_iterator<THD>; template class I_List<i_string>; template class I_List<i_string_pair>; template class I_List<NAMED_LIST>; -FIX_GCC_LINKING_PROBLEM +template class I_List<Statement>; +template class I_List_iterator<Statement>; #endif diff --git a/sql/mysqld_suffix.h b/sql/mysqld_suffix.h index 405c5d855b7..b348f272db1 100644 --- a/sql/mysqld_suffix.h +++ b/sql/mysqld_suffix.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/net_serv.cc b/sql/net_serv.cc index a5a05d381cd..ef929bc67f0 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -52,6 +51,10 @@ #include <signal.h> #include <errno.h> +#ifdef __NETWARE__ +#include <sys/select.h> +#endif + #ifdef EMBEDDED_LIBRARY #undef MYSQL_SERVER #undef MYSQL_CLIENT @@ -72,7 +75,7 @@ /* The following is because alarms doesn't work on windows. */ #define NO_ALARM #endif - + #ifndef NO_ALARM #include "my_pthread.h" void sql_print_error(const char *format,...); @@ -83,7 +86,6 @@ void sql_print_error(const char *format,...); #include "thr_alarm.h" #ifdef MYSQL_SERVER -#define USE_QUERY_CACHE /* The following variables/functions should really not be declared extern, but as it's hard to include mysql_priv.h here, we have to @@ -92,12 +94,19 @@ void sql_print_error(const char *format,...); extern uint test_flags; extern ulong bytes_sent, bytes_received, net_big_packet_count; extern pthread_mutex_t LOCK_bytes_sent , LOCK_bytes_received; +#ifndef MYSQL_INSTANCE_MANAGER +#ifdef HAVE_QUERY_CACHE +#define USE_QUERY_CACHE +extern void query_cache_init_query(NET *net); extern void query_cache_insert(NET *net, const char *packet, ulong length); -#else -#undef statistic_add -#undef statistic_increment -#define statistic_add(A,B,C) -#define statistic_increment(A,B) +#endif // HAVE_QUERY_CACHE +#define update_statistics(A) A +#endif /* MYSQL_INSTANCE_MANGER */ +#endif /* defined(MYSQL_SERVER) && !defined(MYSQL_INSTANCE_MANAGER) */ + +#if !defined(MYSQL_SERVER) || defined(MYSQL_INSTANCE_MANAGER) +#define update_statistics(A) +#define thd_increment_bytes_sent(N) #endif #define TEST_BLOCKING 8 @@ -118,7 +127,7 @@ my_bool my_net_init(NET *net, Vio* vio) DBUG_RETURN(1); net->buff_end=net->buff+net->max_packet; net->vio = vio; - net->no_send_ok = 0; + net->no_send_ok= net->no_send_eof= net->no_send_error= 0; net->error=0; net->return_errno=0; net->return_status=0; net->pkt_nr=net->compress_pkt_nr=0; net->write_pos=net->read_pos = net->buff; @@ -126,7 +135,11 @@ my_bool my_net_init(NET *net, Vio* vio) net->compress=0; net->reading_or_writing=0; net->where_b = net->remain_in_buf=0; net->last_errno=0; - net->query_cache_query=0; +#ifdef USE_QUERY_CACHE + query_cache_init_query(net); +#else + net->query_cache_query= 0; +#endif net->report_error= 0; if (vio != 0) /* If real connection */ @@ -191,30 +204,136 @@ my_bool net_realloc(NET *net, ulong length) DBUG_RETURN(0); } - /* Remove unwanted characters from connection */ + +/* + Check if there is any data to be read from the socket + + SYNOPSIS + net_data_is_ready() + sd socket descriptor + + DESCRIPTION + Check if there is any data to be read from the socket. + + RETURN VALUES + 0 No data to read + 1 Data or EOF to read + -1 Don't know if data is ready or not +*/ + +#if !defined(EMBEDDED_LIBRARY) + +static int net_data_is_ready(my_socket sd) +{ +#ifdef HAVE_POLL + struct pollfd ufds; + int res; + + ufds.fd= sd; + ufds.events= POLLIN | POLLPRI; + if (!(res= poll(&ufds, 1, 0))) + return 0; + if (res < 0 || !(ufds.revents & (POLLIN | POLLPRI))) + return 0; + return 1; +#else + fd_set sfds; + struct timeval tv; + int res; + +#ifndef __WIN__ + /* Windows uses an _array_ of 64 fd's as default, so it's safe */ + if (sd >= FD_SETSIZE) + return -1; +#define NET_DATA_IS_READY_CAN_RETURN_MINUS_ONE +#endif + + FD_ZERO(&sfds); + FD_SET(sd, &sfds); + + tv.tv_sec= tv.tv_usec= 0; + + if ((res= select(sd+1, &sfds, NULL, NULL, &tv)) < 0) + return 0; + else + return test(res ? FD_ISSET(sd, &sfds) : 0); +#endif /* HAVE_POLL */ +} + +#endif /* EMBEDDED_LIBRARY */ + +/* + Remove unwanted characters from connection + and check if disconnected + + SYNOPSIS + net_clear() + net NET handler + + DESCRIPTION + Read from socket until there is nothing more to read. Discard + what is read. + + If there is anything when to read 'net_clear' is called this + normally indicates an error in the protocol. + + When connection is properly closed (for TCP it means with + a FIN packet), then select() considers a socket "ready to read", + in the sense that there's EOF to read, but read() returns 0. + +*/ void net_clear(NET *net) { +#if !defined(EMBEDDED_LIBRARY) + int count, ready; +#endif DBUG_ENTER("net_clear"); -#if !defined(EXTRA_DEBUG) && !defined(EMBEDDED_LIBRARY) + +#if !defined(EMBEDDED_LIBRARY) + while((ready= net_data_is_ready(net->vio->sd)) > 0) + { + /* The socket is ready */ + if ((count= vio_read(net->vio, (char*) (net->buff), + (uint32) net->max_packet)) > 0) + { + DBUG_PRINT("info",("skipped %d bytes from file: %s", + count, vio_description(net->vio))); +#if defined(EXTRA_DEBUG) && (MYSQL_VERSION_ID < 51000) + fprintf(stderr,"skipped %d bytes from file: %s\n", + count, vio_description(net->vio)); +#endif + } + else + { + DBUG_PRINT("info",("socket ready but only EOF to read - disconnected")); + net->error= 2; + break; + } + } +#ifdef NET_DATA_IS_READY_CAN_RETURN_MINUS_ONE + /* 'net_data_is_ready' returned "don't know" */ + if (ready == -1) { - int count; /* One may get 'unused' warn */ + /* Read unblocking to clear net */ my_bool old_mode; if (!vio_blocking(net->vio, FALSE, &old_mode)) { - while ((count = vio_read(net->vio, (char*) (net->buff), - (uint32) net->max_packet)) > 0) + while ((count= vio_read(net->vio, (char*) (net->buff), + (uint32) net->max_packet)) > 0) DBUG_PRINT("info",("skipped %d bytes from file: %s", count, vio_description(net->vio))); vio_blocking(net->vio, TRUE, &old_mode); } } -#endif /* EXTRA_DEBUG */ +#endif +#endif net->pkt_nr=net->compress_pkt_nr=0; /* Ready for new command */ net->write_pos=net->buff; DBUG_VOID_RETURN; } + /* Flush write_buffer if not empty. */ my_bool net_flush(NET *net) @@ -445,9 +564,8 @@ net_real_write(NET *net,const char *packet,ulong len) my_bool net_blocking = vio_is_blocking(net->vio); DBUG_ENTER("net_real_write"); -#if defined(MYSQL_SERVER) && defined(HAVE_QUERY_CACHE) - if (net->query_cache_query != 0) - query_cache_insert(net, packet, len); +#if defined(MYSQL_SERVER) && defined(USE_QUERY_CACHE) + query_cache_insert(net, packet, len); #endif if (net->error == 2) @@ -484,7 +602,10 @@ net_real_write(NET *net,const char *packet,ulong len) } #endif /* HAVE_COMPRESS */ - /* DBUG_DUMP("net",packet,len); */ +#ifdef DEBUG_DATA_PACKETS + DBUG_DUMP("data",packet,len); +#endif + #ifndef NO_ALARM thr_alarm_init(&alarmed); if (net_blocking) @@ -554,7 +675,7 @@ net_real_write(NET *net,const char *packet,ulong len) break; } pos+=length; - statistic_add(bytes_sent,length,&LOCK_bytes_sent); + update_statistics(thd_increment_bytes_sent(length)); } #ifndef __WIN__ end: @@ -625,7 +746,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, DBUG_PRINT("enter",("bytes_to_skip: %u", (uint) remain)); /* The following is good for debugging */ - statistic_increment(net_big_packet_count,&LOCK_bytes_received); + update_statistics(thd_increment_net_big_packet_count(1)); if (!thr_alarm_in_use(alarmed)) { @@ -641,7 +762,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, uint length= min(remain, net->max_packet); if (net_safe_read(net, (char*) net->buff, length, alarmed)) DBUG_RETURN(1); - statistic_add(bytes_received, length, &LOCK_bytes_received); + update_statistics(thd_increment_bytes_received(length)); remain -= (uint32) length; } if (old != MAX_PACKET_LENGTH) @@ -697,7 +818,7 @@ my_real_read(NET *net, ulong *complen) { my_bool interrupted = vio_should_retry(net->vio); - DBUG_PRINT("info",("vio_read returned %d, errno: %d", + DBUG_PRINT("info",("vio_read returned %ld, errno: %d", length, vio_errno(net->vio))); #if (!defined(__WIN__) && !defined(__EMX__) && !defined(OS2)) || defined(MYSQL_SERVER) /* @@ -766,7 +887,7 @@ my_real_read(NET *net, ulong *complen) } remain -= (uint32) length; pos+= (ulong) length; - statistic_add(bytes_received,(ulong) length,&LOCK_bytes_received); + update_statistics(thd_increment_bytes_received(length)); } if (i == 0) { /* First parts is packet length */ diff --git a/sql/nt_servc.h b/sql/nt_servc.h index 6d74eaccea2..a3c12569114 100644 --- a/sql/nt_servc.h +++ b/sql/nt_servc.h @@ -48,8 +48,9 @@ class NTService //service install / un-install - BOOL Install(int startType,LPCSTR szInternName,LPCSTR szDisplayName,LPCSTR szFullPath, - LPCSTR szAccountName=NULL,LPCSTR szPassword=NULL); + BOOL Install(int startType,LPCSTR szInternName,LPCSTR szDisplayName, + LPCSTR szFullPath, LPCSTR szAccountName=NULL, + LPCSTR szPassword=NULL); BOOL SeekStatus(LPCSTR szInternName, int OperationType); BOOL Remove(LPCSTR szInternName); BOOL IsService(LPCSTR ServiceName); diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 01b366077b0..c3aa1f52556 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -23,13 +22,25 @@ */ +/* + Classes in this file are used in the following way: + 1. For a selection condition a tree of SEL_IMERGE/SEL_TREE/SEL_ARG objects + is created. #of rows in table and index statistics are ignored at this + step. + 2. Created SEL_TREE and index stats data are used to construct a + TABLE_READ_PLAN-derived object (TRP_*). Several 'candidate' table read + plans may be created. + 3. The least expensive table read plan is used to create a tree of + QUICK_SELECT_I-derived objects which are later used for row retrieval. + QUICK_RANGEs are also created in this step. +*/ + #ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation #endif #include "mysql_priv.h" #include <m_ctype.h> -#include <nisam.h> #include "sql_select.h" #ifndef EXTRA_DEBUG @@ -37,6 +48,11 @@ #define test_use_count(A) {} #endif +/* + Convert double value to #rows. Currently this does floor(), and we + might consider using round() instead. +*/ +#define double2rows(x) ((ha_rows)(x)) static int sel_cmp(Field *f,char *a,char *b,uint8 a_flag,uint8 b_flag); @@ -262,6 +278,8 @@ public: } inline void merge_flags(SEL_ARG *arg) { maybe_flag|=arg->maybe_flag; } inline void maybe_smaller() { maybe_flag=1; } + /* Return true iff it's a single-point null interval */ + inline bool is_null_interval() { return maybe_null && max_value[0] == 1; } inline int cmp_min_to_min(SEL_ARG* arg) { return sel_cmp(field,min_value, arg->min_value, min_flag, arg->min_flag); @@ -354,8 +372,7 @@ public: min_value=arg->max_value; min_flag=arg->max_flag & NEAR_MAX ? 0 : NEAR_MIN; } - void store(uint length,char **min_key,uint min_key_flag, - char **max_key, uint max_key_flag) + void store_min(uint length,char **min_key,uint min_key_flag) { if ((min_flag & GEOM_FLAG) || (!(min_flag & NO_MIN_RANGE) && @@ -370,6 +387,11 @@ public: memcpy(*min_key,min_value,length); (*min_key)+= length; } + } + void store(uint length,char **min_key,uint min_key_flag, + char **max_key, uint max_key_flag) + { + store_min(length, min_key, min_key_flag); if (!(max_flag & NO_MAX_RANGE) && !(max_key_flag & (NO_MAX_RANGE | NEAR_MAX))) { @@ -454,33 +476,84 @@ public: SEL_ARG *clone_tree(struct st_qsel_param *param); }; +class SEL_IMERGE; + class SEL_TREE :public Sql_alloc { public: enum Type { IMPOSSIBLE, ALWAYS, MAYBE, KEY, KEY_SMALLER } type; SEL_TREE(enum Type type_arg) :type(type_arg) {} - SEL_TREE() :type(KEY) { bzero((char*) keys,sizeof(keys));} + SEL_TREE() :type(KEY) + { + keys_map.clear_all(); + bzero((char*) keys,sizeof(keys)); + } SEL_ARG *keys[MAX_KEY]; + key_map keys_map; /* bitmask of non-NULL elements in keys */ + + /* + Possible ways to read rows using index_merge. The list is non-empty only + if type==KEY. Currently can be non empty only if keys_map.is_clear_all(). + */ + List<SEL_IMERGE> merges; + + /* The members below are filled/used only after get_mm_tree is done */ + key_map ror_scans_map; /* bitmask of ROR scan-able elements in keys */ + uint n_ror_scans; /* number of set bits in ror_scans_map */ + + struct st_ror_scan_info **ror_scans; /* list of ROR key scans */ + struct st_ror_scan_info **ror_scans_end; /* last ROR scan */ + /* Note that #records for each key scan is stored in table->quick_rows */ }; typedef struct st_qsel_param { THD *thd; TABLE *table; - KEY_PART *key_parts,*key_parts_end,*key[MAX_KEY]; - MEM_ROOT *mem_root; + KEY_PART *key_parts,*key_parts_end; + KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */ + MEM_ROOT *mem_root, *old_root; table_map prev_tables,read_tables,current_table; - uint baseflag, keys, max_key_part, range_count; + uint baseflag, max_key_part, range_count; + + uint keys; /* number of keys used in the query */ + + /* used_key_no -> table_key_no translation table */ uint real_keynr[MAX_KEY]; + char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH], max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; bool quick; // Don't calulate possible keys COND *cond; + + uint fields_bitmap_size; + MY_BITMAP needed_fields; /* bitmask of fields needed by the query */ + MY_BITMAP tmp_covered_fields; + + key_map *needed_reg; /* ptr to SQL_SELECT::needed_reg */ + + uint *imerge_cost_buff; /* buffer for index_merge cost estimates */ + uint imerge_cost_buff_size; /* size of the buffer */ + + /* TRUE if last checked tree->key can be used for ROR-scan */ + bool is_ror_scan; + /* Number of ranges in the last checked tree->key */ + uint n_ranges; + uint8 first_null_comp; /* first null component if any, 0 - otherwise */ /* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */ uint alloced_sel_args; } PARAM; +class TABLE_READ_PLAN; + class TRP_RANGE; + class TRP_ROR_INTERSECT; + class TRP_ROR_UNION; + class TRP_ROR_INDEX_MERGE; + class TRP_GROUP_MIN_MAX; + +struct st_ror_scan_info; + static SEL_TREE * get_mm_parts(PARAM *param,COND *cond_func,Field *field, Item_func::Functype type,Item *value, Item_result cmp_type); @@ -488,33 +561,276 @@ static SEL_ARG *get_mm_leaf(PARAM *param,COND *cond_func,Field *field, KEY_PART *key_part, Item_func::Functype type,Item *value); static SEL_TREE *get_mm_tree(PARAM *param,COND *cond); + +static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts); static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree); static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, uint max_key_flag); -static QUICK_SELECT *get_quick_select(PARAM *param,uint index, - SEL_ARG *key_tree); +QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index, + SEL_ARG *key_tree, + MEM_ROOT *alloc = NULL); +static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, + bool index_read_must_be_used, + double read_time); +static +TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, + double read_time, + bool *are_all_covering); +static +TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, + SEL_TREE *tree, + double read_time); +static +TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, + double read_time); +static +TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree); +static double get_index_only_read_time(const PARAM* param, ha_rows records, + int keynr); + #ifndef DBUG_OFF -static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg); +static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, + const char *msg); +static void print_ror_scans_arr(TABLE *table, const char *msg, + struct st_ror_scan_info **start, + struct st_ror_scan_info **end); +static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg); #endif + static SEL_TREE *tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_TREE *tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2); static SEL_ARG *key_or(PARAM *param, SEL_ARG *key1,SEL_ARG *key2); static SEL_ARG *key_and(PARAM *param, SEL_ARG *key1,SEL_ARG *key2,uint clone_flag); static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1); -static bool get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, +bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, char *max_key,uint max_key_flag); static bool eq_tree(SEL_ARG* a,SEL_ARG *b); static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE); -static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length); +static bool null_part_in_key(KEY_PART *key_part, const char *key, + uint length); +bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param); + + +/* + SEL_IMERGE is a list of possible ways to do index merge, i.e. it is + a condition in the following form: + (t_1||t_2||...||t_N) && (next) + + where all t_i are SEL_TREEs, next is another SEL_IMERGE and no pair + (t_i,t_j) contains SEL_ARGS for the same index. + + SEL_TREE contained in SEL_IMERGE always has merges=NULL. + + This class relies on memory manager to do the cleanup. +*/ + +class SEL_IMERGE : public Sql_alloc +{ + enum { PREALLOCED_TREES= 10}; +public: + SEL_TREE *trees_prealloced[PREALLOCED_TREES]; + SEL_TREE **trees; /* trees used to do index_merge */ + SEL_TREE **trees_next; /* last of these trees */ + SEL_TREE **trees_end; /* end of allocated space */ + + SEL_ARG ***best_keys; /* best keys to read in SEL_TREEs */ + + SEL_IMERGE() : + trees(&trees_prealloced[0]), + trees_next(trees), + trees_end(trees + PREALLOCED_TREES) + {} + int or_sel_tree(PARAM *param, SEL_TREE *tree); + int or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree); + int or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge); +}; + + +/* + Add SEL_TREE to this index_merge without any checks, + + NOTES + This function implements the following: + (x_1||...||x_N) || t = (x_1||...||x_N||t), where x_i, t are SEL_TREEs + + RETURN + 0 - OK + -1 - Out of memory. +*/ + +int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree) +{ + if (trees_next == trees_end) + { + const int realloc_ratio= 2; /* Double size for next round */ + uint old_elements= (trees_end - trees); + uint old_size= sizeof(SEL_TREE**) * old_elements; + uint new_size= old_size * realloc_ratio; + SEL_TREE **new_trees; + if (!(new_trees= (SEL_TREE**)alloc_root(param->mem_root, new_size))) + return -1; + memcpy(new_trees, trees, old_size); + trees= new_trees; + trees_next= trees + old_elements; + trees_end= trees + old_elements * realloc_ratio; + } + *(trees_next++)= tree; + return 0; +} + + +/* + Perform OR operation on this SEL_IMERGE and supplied SEL_TREE new_tree, + combining new_tree with one of the trees in this SEL_IMERGE if they both + have SEL_ARGs for the same key. + + SYNOPSIS + or_sel_tree_with_checks() + param PARAM from SQL_SELECT::test_quick_select + new_tree SEL_TREE with type KEY or KEY_SMALLER. + + NOTES + This does the following: + (t_1||...||t_k)||new_tree = + either + = (t_1||...||t_k||new_tree) + or + = (t_1||....||(t_j|| new_tree)||...||t_k), + + where t_i, y are SEL_TREEs. + new_tree is combined with the first t_j it has a SEL_ARG on common + key with. As a consequence of this, choice of keys to do index_merge + read may depend on the order of conditions in WHERE part of the query. + + RETURN + 0 OK + 1 One of the trees was combined with new_tree to SEL_TREE::ALWAYS, + and (*this) should be discarded. + -1 An error occurred. +*/ + +int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree) +{ + for (SEL_TREE** tree = trees; + tree != trees_next; + tree++) + { + if (sel_trees_can_be_ored(*tree, new_tree, param)) + { + *tree = tree_or(param, *tree, new_tree); + if (!*tree) + return 1; + if (((*tree)->type == SEL_TREE::MAYBE) || + ((*tree)->type == SEL_TREE::ALWAYS)) + return 1; + /* SEL_TREE::IMPOSSIBLE is impossible here */ + return 0; + } + } + + /* New tree cannot be combined with any of existing trees. */ + return or_sel_tree(param, new_tree); +} + + +/* + Perform OR operation on this index_merge and supplied index_merge list. + + RETURN + 0 - OK + 1 - One of conditions in result is always TRUE and this SEL_IMERGE + should be discarded. + -1 - An error occurred +*/ + +int SEL_IMERGE::or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge) +{ + for (SEL_TREE** tree= imerge->trees; + tree != imerge->trees_next; + tree++) + { + if (or_sel_tree_with_checks(param, *tree)) + return 1; + } + return 0; +} + + +/* + Perform AND operation on two index_merge lists and store result in *im1. +*/ + +inline void imerge_list_and_list(List<SEL_IMERGE> *im1, List<SEL_IMERGE> *im2) +{ + im1->concat(im2); +} + + +/* + Perform OR operation on 2 index_merge lists, storing result in first list. + + NOTES + The following conversion is implemented: + (a_1 &&...&& a_N)||(b_1 &&...&& b_K) = AND_i,j(a_i || b_j) => + => (a_1||b_1). + + i.e. all conjuncts except the first one are currently dropped. + This is done to avoid producing N*K ways to do index_merge. + + If (a_1||b_1) produce a condition that is always TRUE, NULL is returned + and index_merge is discarded (while it is actually possible to try + harder). + + As a consequence of this, choice of keys to do index_merge read may depend + on the order of conditions in WHERE part of the query. + + RETURN + 0 OK, result is stored in *im1 + other Error, both passed lists are unusable +*/ + +int imerge_list_or_list(PARAM *param, + List<SEL_IMERGE> *im1, + List<SEL_IMERGE> *im2) +{ + SEL_IMERGE *imerge= im1->head(); + im1->empty(); + im1->push_back(imerge); + + return imerge->or_sel_imerge_with_checks(param, im2->head()); +} + + +/* + Perform OR operation on index_merge list and key tree. + + RETURN + 0 OK, result is stored in *im1. + other Error +*/ + +int imerge_list_or_tree(PARAM *param, + List<SEL_IMERGE> *im1, + SEL_TREE *tree) +{ + SEL_IMERGE *imerge; + List_iterator<SEL_IMERGE> it(*im1); + while ((imerge= it++)) + { + if (imerge->or_sel_tree_with_checks(param, tree)) + it.remove(); + } + return im1->is_empty(); +} /*************************************************************************** -** Basic functions for SQL_SELECT and QUICK_SELECT +** Basic functions for SQL_SELECT and QUICK_RANGE_SELECT ***************************************************************************/ /* make a select from mysql info @@ -524,13 +840,16 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length); */ SQL_SELECT *make_select(TABLE *head, table_map const_tables, - table_map read_tables, COND *conds, int *error) + table_map read_tables, COND *conds, + bool allow_null_cond, + int *error) { SQL_SELECT *select; DBUG_ENTER("make_select"); *error=0; - if (!conds) + + if (!conds && !allow_null_cond) DBUG_RETURN(0); if (!(select= new SQL_SELECT)) { @@ -570,7 +889,7 @@ void SQL_SELECT::cleanup() free_cond=0; delete cond; cond= 0; - } + } close_cached_file(&file); } @@ -582,11 +901,29 @@ SQL_SELECT::~SQL_SELECT() #undef index // Fix for Unixware 7 -QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc) - :dont_free(0),sorted(0),error(0),index(key_nr),max_used_key_length(0), - used_key_parts(0), head(table), it(ranges),range(0) +QUICK_SELECT_I::QUICK_SELECT_I() + :max_used_key_length(0), + used_key_parts(0) +{} + +QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr, + bool no_alloc, MEM_ROOT *parent_alloc) + :dont_free(0),error(0),free_file(0),in_range(0),cur_range(NULL),last_range(0) { - if (!no_alloc) + sorted= 0; + index= key_nr; + head= table; + key_part_info= head->key_info[index].key_part; + my_init_dynamic_array(&ranges, sizeof(QUICK_RANGE*), 16, 16); + + /* 'thd' is not accessible in QUICK_RANGE_SELECT::reset(). */ + multi_range_bufsiz= thd->variables.read_rnd_buff_size; + multi_range_count= thd->variables.multi_range_count; + multi_range_length= 0; + multi_range= NULL; + multi_range_buff= NULL; + + if (!no_alloc && !parent_alloc) { // Allocates everything through the internal memroot init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); @@ -594,21 +931,470 @@ QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc) } else bzero((char*) &alloc,sizeof(alloc)); - file=head->file; - record=head->record[0]; - init(); + file= head->file; + record= head->record[0]; } -QUICK_SELECT::~QUICK_SELECT() + +int QUICK_RANGE_SELECT::init() { + DBUG_ENTER("QUICK_RANGE_SELECT::init"); + + if (file->inited != handler::NONE) + file->ha_index_or_rnd_end(); + DBUG_RETURN(error= file->ha_index_init(index)); +} + + +void QUICK_RANGE_SELECT::range_end() +{ + if (file->inited != handler::NONE) + file->ha_index_or_rnd_end(); +} + + +QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT() +{ + DBUG_ENTER("QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT"); if (!dont_free) { - if (file->inited) - file->ha_index_end(); + /* file is NULL for CPK scan on covering ROR-intersection */ + if (file) + { + range_end(); + if (head->key_read) + { + head->key_read= 0; + file->extra(HA_EXTRA_NO_KEYREAD); + } + if (free_file) + { + DBUG_PRINT("info", ("Freeing separate handler 0x%lx (free: %d)", (long) file, + free_file)); + file->reset(); + file->external_lock(current_thd, F_UNLCK); + file->close(); + } + } + delete_dynamic(&ranges); /* ranges are allocated in alloc */ free_root(&alloc,MYF(0)); } + if (multi_range) + my_free((char*) multi_range, MYF(0)); + if (multi_range_buff) + my_free((char*) multi_range_buff, MYF(0)); + DBUG_VOID_RETURN; +} + + +QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param, + TABLE *table) + :pk_quick_select(NULL), thd(thd_param) +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT"); + index= MAX_KEY; + head= table; + bzero(&read_record, sizeof(read_record)); + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + DBUG_VOID_RETURN; +} + +int QUICK_INDEX_MERGE_SELECT::init() +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::init"); + DBUG_RETURN(0); +} + +int QUICK_INDEX_MERGE_SELECT::reset() +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::reset"); + DBUG_RETURN(read_keys_and_merge()); +} + +bool +QUICK_INDEX_MERGE_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick_sel_range) +{ + /* + Save quick_select that does scan on clustered primary key as it will be + processed separately. + */ + if (head->file->primary_key_is_clustered() && + quick_sel_range->index == head->s->primary_key) + pk_quick_select= quick_sel_range; + else + return quick_selects.push_back(quick_sel_range); + return 0; +} + +QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT() +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT"); + quick_it.rewind(); + while ((quick= quick_it++)) + quick->file= NULL; + quick_selects.delete_elements(); + delete pk_quick_select; + free_root(&alloc,MYF(0)); + DBUG_VOID_RETURN; +} + + +QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(THD *thd_param, + TABLE *table, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) + : cpk_quick(NULL), thd(thd_param), need_to_fetch_row(retrieve_full_rows), + scans_inited(FALSE) +{ + index= MAX_KEY; + head= table; + record= head->record[0]; + if (!parent_alloc) + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + else + bzero(&alloc, sizeof(MEM_ROOT)); + last_rowid= (byte*)alloc_root(parent_alloc? parent_alloc : &alloc, + head->file->ref_length); +} + + +/* + Do post-constructor initialization. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::init() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_INTERSECT_SELECT::init() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::init"); + /* Check if last_rowid was successfully allocated in ctor */ + DBUG_RETURN(!last_rowid); +} + + +/* + Initialize this quick select to be a ROR-merged scan. + + SYNOPSIS + QUICK_RANGE_SELECT::init_ror_merged_scan() + reuse_handler If TRUE, use head->file, otherwise create a separate + handler object + + NOTES + This function creates and prepares for subsequent use a separate handler + object if it can't reuse head->file. The reason for this is that during + ROR-merge several key scans are performed simultaneously, and a single + handler is only capable of preserving context of a single key scan. + + In ROR-merge the quick select doing merge does full records retrieval, + merged quick selects read only keys. + + RETURN + 0 ROR child scan initialized, ok to use. + 1 error +*/ + +int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) +{ + handler *save_file= file; + DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan"); + + if (reuse_handler) + { + DBUG_PRINT("info", ("Reusing handler %p", file)); + if (!head->no_keyread) + { + head->key_read= 1; + file->extra(HA_EXTRA_KEYREAD); + } + if (file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) || + init() || reset()) + { + DBUG_RETURN(1); + } + DBUG_RETURN(0); + } + + /* Create a separate handler object for this quick select */ + if (free_file) + { + /* already have own 'handler' object. */ + DBUG_RETURN(0); + } + + THD *thd= current_thd; + if (!(file= head->file->clone(thd->mem_root))) + { + /* Caller will free the memory */ + goto failure; + } + if (file->external_lock(thd, F_RDLCK)) + goto failure; + if (!head->no_keyread) + { + head->key_read= 1; + file->extra(HA_EXTRA_KEYREAD); + } + if (file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY) || + init() || reset()) + { + file->external_lock(thd, F_UNLCK); + file->close(); + goto failure; + } + free_file= TRUE; + last_rowid= file->ref; + DBUG_RETURN(0); + +failure: + file= save_file; + DBUG_RETURN(1); } + +/* + Initialize this quick select to be a part of a ROR-merged scan. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan() + reuse_handler If TRUE, use head->file, otherwise create separate + handler object. + RETURN + 0 OK + other error code +*/ +int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler) +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan"); + + /* Initialize all merged "children" quick selects */ + DBUG_ASSERT(!need_to_fetch_row || reuse_handler); + if (!need_to_fetch_row && reuse_handler) + { + quick= quick_it++; + /* + There is no use of this->file. Use it for the first of merged range + selects. + */ + if (quick->init_ror_merged_scan(TRUE)) + DBUG_RETURN(1); + quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); + } + while ((quick= quick_it++)) + { + if (quick->init_ror_merged_scan(FALSE)) + DBUG_RETURN(1); + quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); + /* All merged scans share the same record buffer in intersection. */ + quick->record= head->record[0]; + } + + if (need_to_fetch_row && head->file->ha_rnd_init(1)) + { + DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_INTERSECT_SELECT::reset() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::reset"); + if (!scans_inited && init_ror_merged_scan(TRUE)) + DBUG_RETURN(1); + scans_inited= TRUE; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + while ((quick= it++)) + quick->reset(); + DBUG_RETURN(0); +} + + +/* + Add a merged quick select to this ROR-intersection quick select. + + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::push_quick_back() + quick Quick select to be added. The quick select must return + rows in rowid order. + NOTES + This call can only be made before init() is called. + + RETURN + FALSE OK + TRUE Out of memory. +*/ + +bool +QUICK_ROR_INTERSECT_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick) +{ + return quick_selects.push_back(quick); +} + +QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT"); + quick_selects.delete_elements(); + delete cpk_quick; + free_root(&alloc,MYF(0)); + if (need_to_fetch_row && head->file->inited != handler::NONE) + head->file->ha_rnd_end(); + DBUG_VOID_RETURN; +} + + +QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param, + TABLE *table) + : thd(thd_param), scans_inited(FALSE) +{ + index= MAX_KEY; + head= table; + rowid_length= table->file->ref_length; + record= head->record[0]; + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + thd_param->mem_root= &alloc; +} + + +/* + Do post-constructor initialization. + SYNOPSIS + QUICK_ROR_UNION_SELECT::init() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_UNION_SELECT::init() +{ + DBUG_ENTER("QUICK_ROR_UNION_SELECT::init"); + if (init_queue(&queue, quick_selects.elements, 0, + FALSE , QUICK_ROR_UNION_SELECT::queue_cmp, + (void*) this)) + { + bzero(&queue, sizeof(QUEUE)); + DBUG_RETURN(1); + } + + if (!(cur_rowid= (byte*)alloc_root(&alloc, 2*head->file->ref_length))) + DBUG_RETURN(1); + prev_rowid= cur_rowid + head->file->ref_length; + DBUG_RETURN(0); +} + + +/* + Comparison function to be used QUICK_ROR_UNION_SELECT::queue priority + queue. + + SYNPOSIS + QUICK_ROR_UNION_SELECT::queue_cmp() + arg Pointer to QUICK_ROR_UNION_SELECT + val1 First merged select + val2 Second merged select +*/ + +int QUICK_ROR_UNION_SELECT::queue_cmp(void *arg, byte *val1, byte *val2) +{ + QUICK_ROR_UNION_SELECT *self= (QUICK_ROR_UNION_SELECT*)arg; + return self->head->file->cmp_ref(((QUICK_SELECT_I*)val1)->last_rowid, + ((QUICK_SELECT_I*)val2)->last_rowid); +} + + +/* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_UNION_SELECT::reset() +{ + QUICK_SELECT_I *quick; + int error; + DBUG_ENTER("QUICK_ROR_UNION_SELECT::reset"); + have_prev_rowid= FALSE; + if (!scans_inited) + { + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->init_ror_merged_scan(FALSE)) + DBUG_RETURN(1); + } + scans_inited= TRUE; + } + queue_remove_all(&queue); + /* + Initialize scans for merged quick selects and put all merged quick + selects into the queue. + */ + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->reset()) + DBUG_RETURN(1); + if ((error= quick->get_next())) + { + if (error == HA_ERR_END_OF_FILE) + continue; + DBUG_RETURN(error); + } + quick->save_last_pos(); + queue_insert(&queue, (byte*)quick); + } + + if (head->file->ha_rnd_init(1)) + { + DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); + DBUG_RETURN(1); + } + + DBUG_RETURN(0); +} + + +bool +QUICK_ROR_UNION_SELECT::push_quick_back(QUICK_SELECT_I *quick_sel_range) +{ + return quick_selects.push_back(quick_sel_range); +} + +QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT() +{ + DBUG_ENTER("QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT"); + delete_queue(&queue); + quick_selects.delete_elements(); + if (head->file->inited != handler::NONE) + head->file->ha_rnd_end(); + free_root(&alloc,MYF(0)); + DBUG_VOID_RETURN; +} + + QUICK_RANGE::QUICK_RANGE() :min_key(0),max_key(0),min_length(0),max_length(0), flag(NO_MIN_RANGE | NO_MAX_RANGE) @@ -820,7 +1606,7 @@ uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit) if (!ord->asc) return MAX_KEY; - for (idx= 0; idx < table->keys; idx++) + for (idx= 0; idx < table->s->keys; idx++) { if (!(table->keys_in_use_for_query.is_set(idx))) continue; @@ -871,27 +1657,279 @@ uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit) /* - Test if a key can be used in different ranges + Table rows retrieval plan. Range optimizer creates QUICK_SELECT_I-derived + objects from table read plans. +*/ +class TABLE_READ_PLAN +{ +public: + /* + Plan read cost, with or without cost of full row retrieval, depending + on plan creation parameters. + */ + double read_cost; + ha_rows records; /* estimate of #rows to be examined */ + + /* + If TRUE, the scan returns rows in rowid order. This is used only for + scans that can be both ROR and non-ROR. + */ + bool is_ror; + + /* + Create quick select for this plan. + SYNOPSIS + make_quick() + param Parameter from test_quick_select + retrieve_full_rows If TRUE, created quick select will do full record + retrieval. + parent_alloc Memory pool to use, if any. + + NOTES + retrieve_full_rows is ignored by some implementations. + + RETURN + created quick select + NULL on any error. + */ + virtual QUICK_SELECT_I *make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc=NULL) = 0; + + /* Table read plans are allocated on MEM_ROOT and are never deleted */ + static void *operator new(size_t size, MEM_ROOT *mem_root) + { return (void*) alloc_root(mem_root, (uint) size); } + static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } + static void operator delete(void *ptr, MEM_ROOT *mem_root) { /* Never called */ } + virtual ~TABLE_READ_PLAN() {} /* Remove gcc warning */ + +}; + +class TRP_ROR_INTERSECT; +class TRP_ROR_UNION; +class TRP_INDEX_MERGE; + + +/* + Plan for a QUICK_RANGE_SELECT scan. + TRP_RANGE::make_quick ignores retrieve_full_rows parameter because + QUICK_RANGE_SELECT doesn't distinguish between 'index only' scans and full + record retrieval scans. +*/ + +class TRP_RANGE : public TABLE_READ_PLAN +{ +public: + SEL_ARG *key; /* set of intervals to be used in "range" method retrieval */ + uint key_idx; /* key number in PARAM::key */ + + TRP_RANGE(SEL_ARG *key_arg, uint idx_arg) + : key(key_arg), key_idx(idx_arg) + {} + virtual ~TRP_RANGE() {} /* Remove gcc warning */ + + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc) + { + DBUG_ENTER("TRP_RANGE::make_quick"); + QUICK_RANGE_SELECT *quick; + if ((quick= get_quick_select(param, key_idx, key, parent_alloc))) + { + quick->records= records; + quick->read_time= read_cost; + } + DBUG_RETURN(quick); + } +}; + + +/* Plan for QUICK_ROR_INTERSECT_SELECT scan. */ + +class TRP_ROR_INTERSECT : public TABLE_READ_PLAN +{ +public: + TRP_ROR_INTERSECT() {} /* Remove gcc warning */ + virtual ~TRP_ROR_INTERSECT() {} /* Remove gcc warning */ + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + + /* Array of pointers to ROR range scans used in this intersection */ + struct st_ror_scan_info **first_scan; + struct st_ror_scan_info **last_scan; /* End of the above array */ + struct st_ror_scan_info *cpk_scan; /* Clustered PK scan, if there is one */ + bool is_covering; /* TRUE if no row retrieval phase is necessary */ + double index_scan_costs; /* SUM(cost(index_scan)) */ +}; + + +/* + Plan for QUICK_ROR_UNION_SELECT scan. + QUICK_ROR_UNION_SELECT always retrieves full rows, so retrieve_full_rows + is ignored by make_quick. +*/ + +class TRP_ROR_UNION : public TABLE_READ_PLAN +{ +public: + TRP_ROR_UNION() {} /* Remove gcc warning */ + virtual ~TRP_ROR_UNION() {} /* Remove gcc warning */ + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + TABLE_READ_PLAN **first_ror; /* array of ptrs to plans for merged scans */ + TABLE_READ_PLAN **last_ror; /* end of the above array */ +}; + + +/* + Plan for QUICK_INDEX_MERGE_SELECT scan. + QUICK_ROR_INTERSECT_SELECT always retrieves full rows, so retrieve_full_rows + is ignored by make_quick. +*/ + +class TRP_INDEX_MERGE : public TABLE_READ_PLAN +{ +public: + TRP_INDEX_MERGE() {} /* Remove gcc warning */ + virtual ~TRP_INDEX_MERGE() {} /* Remove gcc warning */ + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + TRP_RANGE **range_scans; /* array of ptrs to plans of merged scans */ + TRP_RANGE **range_scans_end; /* end of the array */ +}; + +/* + Plan for a QUICK_GROUP_MIN_MAX_SELECT scan. +*/ + +class TRP_GROUP_MIN_MAX : public TABLE_READ_PLAN +{ +private: + bool have_min, have_max; + KEY_PART_INFO *min_max_arg_part; + uint group_prefix_len; + uint used_key_parts; + uint group_key_parts; + KEY *index_info; + uint index; + uint key_infix_len; + byte key_infix[MAX_KEY_LENGTH]; + SEL_TREE *range_tree; /* Represents all range predicates in the query. */ + SEL_ARG *index_tree; /* The SEL_ARG sub-tree corresponding to index_info. */ + uint param_idx; /* Index of used key in param->key. */ + /* Number of records selected by the ranges in index_tree. */ +public: + ha_rows quick_prefix_records; +public: + TRP_GROUP_MIN_MAX(bool have_min_arg, bool have_max_arg, + KEY_PART_INFO *min_max_arg_part_arg, + uint group_prefix_len_arg, uint used_key_parts_arg, + uint group_key_parts_arg, KEY *index_info_arg, + uint index_arg, uint key_infix_len_arg, + byte *key_infix_arg, + SEL_TREE *tree_arg, SEL_ARG *index_tree_arg, + uint param_idx_arg, ha_rows quick_prefix_records_arg) + : have_min(have_min_arg), have_max(have_max_arg), + min_max_arg_part(min_max_arg_part_arg), + group_prefix_len(group_prefix_len_arg), used_key_parts(used_key_parts_arg), + group_key_parts(group_key_parts_arg), index_info(index_info_arg), + index(index_arg), key_infix_len(key_infix_len_arg), range_tree(tree_arg), + index_tree(index_tree_arg), param_idx(param_idx_arg), + quick_prefix_records(quick_prefix_records_arg) + { + if (key_infix_len) + memcpy(this->key_infix, key_infix_arg, key_infix_len); + } + virtual ~TRP_GROUP_MIN_MAX() {} /* Remove gcc warning */ + + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); +}; + + +/* + Fill param->needed_fields with bitmap of fields used in the query. SYNOPSIS - SQL_SELECT::test_quick_select(thd,keys_to_use, prev_tables, - limit, force_quick_range) + fill_used_fields_bitmap() + param Parameter from test_quick_select function. - Updates the following in the select parameter: - needed_reg - Bits for keys with may be used if all prev regs are read - quick - Parameter to use when reading records. - In the table struct the following information is updated: - quick_keys - Which keys can be used - quick_rows - How many rows the key matches + NOTES + Clustered PK members are not put into the bitmap as they are implicitly + present in all keys (and it is impossible to avoid reading them). + RETURN + 0 Ok + 1 Out of memory. +*/ - RETURN VALUES - -1 if impossible select - 0 if can't use quick_select - 1 if found usable range +static int fill_used_fields_bitmap(PARAM *param) +{ + TABLE *table= param->table; + param->fields_bitmap_size= (table->s->fields/8 + 1); + uchar *tmp; + uint pk; + param->tmp_covered_fields.bitmap= 0; + if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) || + bitmap_init(¶m->needed_fields, tmp, param->fields_bitmap_size*8, + FALSE)) + return 1; + + bitmap_clear_all(¶m->needed_fields); + for (uint i= 0; i < table->s->fields; i++) + { + if (param->thd->query_id == table->field[i]->query_id) + bitmap_set_bit(¶m->needed_fields, i+1); + } + + pk= param->table->s->primary_key; + if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY) + { + /* The table uses clustered PK and it is not internally generated */ + KEY_PART_INFO *key_part= param->table->key_info[pk].key_part; + KEY_PART_INFO *key_part_end= key_part + + param->table->key_info[pk].key_parts; + for (;key_part != key_part_end; ++key_part) + { + bitmap_clear_bit(¶m->needed_fields, key_part->fieldnr); + } + } + return 0; +} + + +/* + Test if a key can be used in different ranges + + SYNOPSIS + SQL_SELECT::test_quick_select() + thd Current thread + keys_to_use Keys to use for range retrieval + prev_tables Tables assumed to be already read when the scan is + performed (but not read at the moment of this call) + limit Query limit + force_quick_range Prefer to use range (instead of full table scan) even + if it is more expensive. + + NOTES + Updates the following in the select parameter: + needed_reg - Bits for keys with may be used if all prev regs are read + quick - Parameter to use when reading records. + + In the table struct the following information is updated: + quick_keys - Which keys can be used + quick_rows - How many rows the key matches + + TODO + Check if this function really needs to modify keys_to_use, and change the + code to pass it by reference if it doesn't. + + In addition to force_quick_range other means can be (an usually are) used + to make this function prefer range over full table scan. Figure out if + force_quick_range is really needed. - TODO - check if the function really needs to modify keys_to_use, and change the - code to pass it by reference if not + RETURN + -1 if impossible select (i.e. certainly no rows will be selected) + 0 if can't use quick_select + 1 if found usable ranges and quick select has been successfully created. */ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, @@ -900,28 +1938,29 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, { uint idx; double scan_time; - DBUG_ENTER("test_quick_select"); + DBUG_ENTER("SQL_SELECT::test_quick_select"); DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu", - keys_to_use.to_ulonglong(), (ulong) prev_tables, + (ulong) keys_to_use.to_ulonglong(), (ulong) prev_tables, (ulong) const_tables)); - + DBUG_PRINT("info", ("records: %lu", (ulong) head->file->records)); delete quick; quick=0; - needed_reg.clear_all(); quick_keys.clear_all(); - if (!cond || (specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range || + needed_reg.clear_all(); + quick_keys.clear_all(); + if ((specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range || !limit) DBUG_RETURN(0); /* purecov: inspected */ if (keys_to_use.is_clear_all()) DBUG_RETURN(0); - records=head->file->records; + records= head->file->records; if (!records) records++; /* purecov: inspected */ - scan_time=(double) records / TIME_FOR_COMPARE+1; - read_time=(double) head->file->scan_time()+ scan_time + 1.1; + scan_time= (double) records / TIME_FOR_COMPARE + 1; + read_time= (double) head->file->scan_time() + scan_time + 1.1; if (head->force_index) scan_time= read_time= DBL_MAX; if (limit < records) - read_time=(double) records+scan_time+1; // Force to use index + read_time= (double) records + scan_time + 1; // Force to use index else if (read_time <= 2.0 && !force_quick_range) DBUG_RETURN(0); /* No need for quick select */ @@ -930,8 +1969,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, keys_to_use.intersect(head->keys_in_use_for_query); if (!keys_to_use.is_clear_all()) { - MEM_ROOT *old_root,alloc; - SEL_TREE *tree; + MEM_ROOT alloc; + SEL_TREE *tree= NULL; KEY_PART *key_parts; KEY *key_info; PARAM param; @@ -945,22 +1984,30 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, param.table=head; param.keys=0; param.mem_root= &alloc; + param.old_root= thd->mem_root; + param.needed_reg= &needed_reg; + param.imerge_cost_buff_size= 0; + thd->no_errors=1; // Don't warn about NULL init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); - if (!(param.key_parts = (KEY_PART*) alloc_root(&alloc, - sizeof(KEY_PART)* - head->key_parts))) + if (!(param.key_parts= (KEY_PART*) alloc_root(&alloc, + sizeof(KEY_PART)* + head->s->key_parts)) || + fill_used_fields_bitmap(¶m)) { thd->no_errors=0; free_root(&alloc,MYF(0)); // Return memory & allocator DBUG_RETURN(0); // Can't use range } key_parts= param.key_parts; - old_root= thd->mem_root; thd->mem_root= &alloc; + /* + Make an array with description of all key parts of all table keys. + This is used in get_mm_parts function. + */ key_info= head->key_info; - for (idx=0 ; idx < head->keys ; idx++, key_info++) + for (idx=0 ; idx < head->s->keys ; idx++, key_info++) { KEY_PART_INFO *key_part_info; if (!keys_to_use.is_set(idx)) @@ -981,88 +2028,150 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, key_parts->null_bit= key_part_info->null_bit; key_parts->image_type = (key_info->flags & HA_SPATIAL) ? Field::itMBR : Field::itRAW; - key_parts->flag= key_part_info->key_part_flag; + key_parts->flag= (uint8) key_part_info->key_part_flag; } param.real_keynr[param.keys++]=idx; } param.key_parts_end=key_parts; param.alloced_sel_args= 0; - if ((tree=get_mm_tree(¶m,cond))) + /* Calculate cost of full index read for the shortest covering index */ + if (!head->used_keys.is_clear_all()) + { + int key_for_use= find_shortest_key(head, &head->used_keys); + double key_read_time= (get_index_only_read_time(¶m, records, + key_for_use) + + (double) records / TIME_FOR_COMPARE); + DBUG_PRINT("info", ("'all'+'using index' scan will be using key %d, " + "read time %g", key_for_use, key_read_time)); + if (key_read_time < read_time) + read_time= key_read_time; + } + + TABLE_READ_PLAN *best_trp= NULL; + TRP_GROUP_MIN_MAX *group_trp; + double best_read_time= read_time; + + if (cond) + { + if ((tree= get_mm_tree(¶m,cond))) + { + if (tree->type == SEL_TREE::IMPOSSIBLE) + { + records=0L; /* Return -1 from this function. */ + read_time= (double) HA_POS_ERROR; + goto free_mem; + } + if (tree->type != SEL_TREE::KEY && + tree->type != SEL_TREE::KEY_SMALLER) + goto free_mem; + } + } + + /* + Try to construct a QUICK_GROUP_MIN_MAX_SELECT. + Notice that it can be constructed no matter if there is a range tree. + */ + group_trp= get_best_group_min_max(¶m, tree); + if (group_trp && group_trp->read_cost < best_read_time) + { + best_trp= group_trp; + best_read_time= best_trp->read_cost; + } + + if (tree) { - if (tree->type == SEL_TREE::IMPOSSIBLE) + /* + It is possible to use a range-based quick select (but it might be + slower than 'all' table scan). + */ + if (tree->merges.is_empty()) { - records=0L; // Return -1 from this function - read_time= (double) HA_POS_ERROR; + TRP_RANGE *range_trp; + TRP_ROR_INTERSECT *rori_trp; + bool can_build_covering= FALSE; + + /* Get best 'range' plan and prepare data for making other plans */ + if ((range_trp= get_key_scans_params(¶m, tree, FALSE, + best_read_time))) + { + best_trp= range_trp; + best_read_time= best_trp->read_cost; + } + + /* + Simultaneous key scans and row deletes on several handler + objects are not allowed so don't use ROR-intersection for + table deletes. + */ + if ((thd->lex->sql_command != SQLCOM_DELETE)) +#ifdef NOT_USED + if ((thd->lex->sql_command != SQLCOM_UPDATE)) +#endif + { + /* + Get best non-covering ROR-intersection plan and prepare data for + building covering ROR-intersection. + */ + if ((rori_trp= get_best_ror_intersect(¶m, tree, best_read_time, + &can_build_covering))) + { + best_trp= rori_trp; + best_read_time= best_trp->read_cost; + /* + Try constructing covering ROR-intersect only if it looks possible + and worth doing. + */ + if (!rori_trp->is_covering && can_build_covering && + (rori_trp= get_best_covering_ror_intersect(¶m, tree, + best_read_time))) + best_trp= rori_trp; + } + } } - else if (tree->type == SEL_TREE::KEY || - tree->type == SEL_TREE::KEY_SMALLER) + else { - SEL_ARG **key,**end,**best_key=0; + /* Try creating index_merge/ROR-union scan. */ + SEL_IMERGE *imerge; + TABLE_READ_PLAN *best_conj_trp= NULL, *new_conj_trp; + LINT_INIT(new_conj_trp); /* no empty index_merge lists possible */ + + DBUG_PRINT("info",("No range reads possible," + " trying to construct index_merge")); + List_iterator_fast<SEL_IMERGE> it(tree->merges); + while ((imerge= it++)) + { + new_conj_trp= get_best_disjunct_quick(¶m, imerge, best_read_time); + if (!best_conj_trp || (new_conj_trp && new_conj_trp->read_cost < + best_conj_trp->read_cost)) + best_conj_trp= new_conj_trp; + } + if (best_conj_trp) + best_trp= best_conj_trp; + } + } + thd->mem_root= param.old_root; - for (idx=0,key=tree->keys, end=key+param.keys ; - key != end ; - key++,idx++) - { - ha_rows found_records; - double found_read_time; - if (*key) - { - uint keynr= param.real_keynr[idx]; - if ((*key)->type == SEL_ARG::MAYBE_KEY || - (*key)->maybe_flag) - needed_reg.set_bit(keynr); - - found_records=check_quick_select(¶m, idx, *key); - if (found_records != HA_POS_ERROR && found_records > 2 && - head->used_keys.is_set(keynr) && - (head->file->index_flags(keynr, param.max_key_part, 1) & - HA_KEYREAD_ONLY)) - { - /* - We can resolve this by only reading through this key. - Assume that we will read trough the whole key range - and that all key blocks are half full (normally things are - much better). - */ - uint keys_per_block= (head->file->block_size/2/ - (head->key_info[keynr].key_length+ - head->file->ref_length) + 1); - found_read_time=((double) (found_records+keys_per_block-1)/ - (double) keys_per_block); - } - else - found_read_time= (head->file->read_time(keynr, - param.range_count, - found_records)+ - (double) found_records / TIME_FOR_COMPARE); - DBUG_PRINT("info",("read_time: %g found_read_time: %g", - read_time, found_read_time)); - if (read_time > found_read_time && found_records != HA_POS_ERROR) - { - read_time=found_read_time; - records=found_records; - best_key=key; - } - } - } - if (best_key && records) - { - if ((quick=get_quick_select(¶m,(uint) (best_key-tree->keys), - *best_key))) - { - quick->records=records; - quick->read_time=read_time; - } - } + /* If we got a read plan, create a quick select from it. */ + if (best_trp) + { + records= best_trp->records; + if (!(quick= best_trp->make_quick(¶m, TRUE)) || quick->init()) + { + delete quick; + quick= NULL; } } + + free_mem: free_root(&alloc,MYF(0)); // Return memory & allocator - thd->mem_root= old_root; + thd->mem_root= param.old_root; thd->no_errors=0; } - DBUG_EXECUTE("info",print_quick(quick,&needed_reg);); + + DBUG_EXECUTE("info", print_quick(quick, &needed_reg);); + /* Assume that if the user is using 'limit' we will only need to scan limit rows if we are using a key @@ -1070,11 +2179,1837 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, DBUG_RETURN(records ? test(quick) : -1); } + +/* + Get cost of 'sweep' full records retrieval. + SYNOPSIS + get_sweep_read_cost() + param Parameter from test_quick_select + records # of records to be retrieved + RETURN + cost of sweep +*/ + +double get_sweep_read_cost(const PARAM *param, ha_rows records) +{ + double result; + DBUG_ENTER("get_sweep_read_cost"); + if (param->table->file->primary_key_is_clustered()) + { + result= param->table->file->read_time(param->table->s->primary_key, + records, records); + } + else + { + double n_blocks= + ceil(ulonglong2double(param->table->file->data_file_length) / IO_SIZE); + double busy_blocks= + n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records))); + if (busy_blocks < 1.0) + busy_blocks= 1.0; + DBUG_PRINT("info",("sweep: nblocks: %g, busy_blocks: %g", n_blocks, + busy_blocks)); + /* + Disabled: Bail out if # of blocks to read is bigger than # of blocks in + table data file. + if (max_cost != DBL_MAX && (busy_blocks+index_reads_cost) >= n_blocks) + return 1; + */ + JOIN *join= param->thd->lex->select_lex.join; + if (!join || join->tables == 1) + { + /* No join, assume reading is done in one 'sweep' */ + result= busy_blocks*(DISK_SEEK_BASE_COST + + DISK_SEEK_PROP_COST*n_blocks/busy_blocks); + } + else + { + /* + Possibly this is a join with source table being non-last table, so + assume that disk seeks are random here. + */ + result= busy_blocks; + } + } + DBUG_PRINT("return",("cost: %g", result)); + DBUG_RETURN(result); +} + + +/* + Get best plan for a SEL_IMERGE disjunctive expression. + SYNOPSIS + get_best_disjunct_quick() + param Parameter from check_quick_select function + imerge Expression to use + read_time Don't create scans with cost > read_time + + NOTES + index_merge cost is calculated as follows: + index_merge_cost = + cost(index_reads) + (see #1) + cost(rowid_to_row_scan) + (see #2) + cost(unique_use) (see #3) + + 1. cost(index_reads) =SUM_i(cost(index_read_i)) + For non-CPK scans, + cost(index_read_i) = {cost of ordinary 'index only' scan} + For CPK scan, + cost(index_read_i) = {cost of non-'index only' scan} + + 2. cost(rowid_to_row_scan) + If table PK is clustered then + cost(rowid_to_row_scan) = + {cost of ordinary clustered PK scan with n_ranges=n_rows} + + Otherwise, we use the following model to calculate costs: + We need to retrieve n_rows rows from file that occupies n_blocks blocks. + We assume that offsets of rows we need are independent variates with + uniform distribution in [0..max_file_offset] range. + + We'll denote block as "busy" if it contains row(s) we need to retrieve + and "empty" if doesn't contain rows we need. + + Probability that a block is empty is (1 - 1/n_blocks)^n_rows (this + applies to any block in file). Let x_i be a variate taking value 1 if + block #i is empty and 0 otherwise. + + Then E(x_i) = (1 - 1/n_blocks)^n_rows; + + E(n_empty_blocks) = E(sum(x_i)) = sum(E(x_i)) = + = n_blocks * ((1 - 1/n_blocks)^n_rows) = + ~= n_blocks * exp(-n_rows/n_blocks). + + E(n_busy_blocks) = n_blocks*(1 - (1 - 1/n_blocks)^n_rows) = + ~= n_blocks * (1 - exp(-n_rows/n_blocks)). + + Average size of "hole" between neighbor non-empty blocks is + E(hole_size) = n_blocks/E(n_busy_blocks). + + The total cost of reading all needed blocks in one "sweep" is: + + E(n_busy_blocks)* + (DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*n_blocks/E(n_busy_blocks)). + + 3. Cost of Unique use is calculated in Unique::get_use_cost function. + + ROR-union cost is calculated in the same way index_merge, but instead of + Unique a priority queue is used. + + RETURN + Created read plan + NULL - Out of memory or no read scan could be built. +*/ + +static +TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, + double read_time) +{ + SEL_TREE **ptree; + TRP_INDEX_MERGE *imerge_trp= NULL; + uint n_child_scans= imerge->trees_next - imerge->trees; + TRP_RANGE **range_scans; + TRP_RANGE **cur_child; + TRP_RANGE **cpk_scan= NULL; + bool imerge_too_expensive= FALSE; + double imerge_cost= 0.0; + ha_rows cpk_scan_records= 0; + ha_rows non_cpk_scan_records= 0; + bool pk_is_clustered= param->table->file->primary_key_is_clustered(); + bool all_scans_ror_able= TRUE; + bool all_scans_rors= TRUE; + uint unique_calc_buff_size; + TABLE_READ_PLAN **roru_read_plans; + TABLE_READ_PLAN **cur_roru_plan; + double roru_index_costs; + ha_rows roru_total_records; + double roru_intersect_part= 1.0; + DBUG_ENTER("get_best_disjunct_quick"); + DBUG_PRINT("info", ("Full table scan cost: %g", read_time)); + + if (!(range_scans= (TRP_RANGE**)alloc_root(param->mem_root, + sizeof(TRP_RANGE*)* + n_child_scans))) + DBUG_RETURN(NULL); + /* + Collect best 'range' scan for each of disjuncts, and, while doing so, + analyze possibility of ROR scans. Also calculate some values needed by + other parts of the code. + */ + for (ptree= imerge->trees, cur_child= range_scans; + ptree != imerge->trees_next; + ptree++, cur_child++) + { + DBUG_EXECUTE("info", print_sel_tree(param, *ptree, &(*ptree)->keys_map, + "tree in SEL_IMERGE");); + if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, read_time))) + { + /* + One of index scans in this index_merge is more expensive than entire + table read for another available option. The entire index_merge (and + any possible ROR-union) will be more expensive then, too. We continue + here only to update SQL_SELECT members. + */ + imerge_too_expensive= TRUE; + } + if (imerge_too_expensive) + continue; + + imerge_cost += (*cur_child)->read_cost; + all_scans_ror_able &= ((*ptree)->n_ror_scans > 0); + all_scans_rors &= (*cur_child)->is_ror; + if (pk_is_clustered && + param->real_keynr[(*cur_child)->key_idx] == + param->table->s->primary_key) + { + cpk_scan= cur_child; + cpk_scan_records= (*cur_child)->records; + } + else + non_cpk_scan_records += (*cur_child)->records; + } + + DBUG_PRINT("info", ("index_merge scans cost %g", imerge_cost)); + if (imerge_too_expensive || (imerge_cost > read_time) || + (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) && + read_time != DBL_MAX) + { + /* + Bail out if it is obvious that both index_merge and ROR-union will be + more expensive + */ + DBUG_PRINT("info", ("Sum of index_merge scans is more expensive than " + "full table scan, bailing out")); + DBUG_RETURN(NULL); + } + if (all_scans_rors) + { + roru_read_plans= (TABLE_READ_PLAN**)range_scans; + goto skip_to_ror_scan; + } + if (cpk_scan) + { + /* + Add one ROWID comparison for each row retrieved on non-CPK scan. (it + is done in QUICK_RANGE_SELECT::row_in_ranges) + */ + imerge_cost += non_cpk_scan_records / TIME_FOR_COMPARE_ROWID; + } + + /* Calculate cost(rowid_to_row_scan) */ + imerge_cost += get_sweep_read_cost(param, non_cpk_scan_records); + DBUG_PRINT("info",("index_merge cost with rowid-to-row scan: %g", + imerge_cost)); + if (imerge_cost > read_time) + goto build_ror_index_merge; + + /* Add Unique operations cost */ + unique_calc_buff_size= + Unique::get_cost_calc_buff_size(non_cpk_scan_records, + param->table->file->ref_length, + param->thd->variables.sortbuff_size); + if (param->imerge_cost_buff_size < unique_calc_buff_size) + { + if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root, + unique_calc_buff_size))) + DBUG_RETURN(NULL); + param->imerge_cost_buff_size= unique_calc_buff_size; + } + + imerge_cost += + Unique::get_use_cost(param->imerge_cost_buff, non_cpk_scan_records, + param->table->file->ref_length, + param->thd->variables.sortbuff_size); + DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)", + imerge_cost, read_time)); + if (imerge_cost < read_time) + { + if ((imerge_trp= new (param->mem_root)TRP_INDEX_MERGE)) + { + imerge_trp->read_cost= imerge_cost; + imerge_trp->records= non_cpk_scan_records + cpk_scan_records; + imerge_trp->records= min(imerge_trp->records, + param->table->file->records); + imerge_trp->range_scans= range_scans; + imerge_trp->range_scans_end= range_scans + n_child_scans; + read_time= imerge_cost; + } + } + +build_ror_index_merge: + if (!all_scans_ror_able || param->thd->lex->sql_command == SQLCOM_DELETE) + DBUG_RETURN(imerge_trp); + + /* Ok, it is possible to build a ROR-union, try it. */ + bool dummy; + if (!(roru_read_plans= + (TABLE_READ_PLAN**)alloc_root(param->mem_root, + sizeof(TABLE_READ_PLAN*)* + n_child_scans))) + DBUG_RETURN(imerge_trp); +skip_to_ror_scan: + roru_index_costs= 0.0; + roru_total_records= 0; + cur_roru_plan= roru_read_plans; + + /* Find 'best' ROR scan for each of trees in disjunction */ + for (ptree= imerge->trees, cur_child= range_scans; + ptree != imerge->trees_next; + ptree++, cur_child++, cur_roru_plan++) + { + /* + Assume the best ROR scan is the one that has cheapest full-row-retrieval + scan cost. + Also accumulate index_only scan costs as we'll need them to calculate + overall index_intersection cost. + */ + double cost; + if ((*cur_child)->is_ror) + { + /* Ok, we have index_only cost, now get full rows scan cost */ + cost= param->table->file-> + read_time(param->real_keynr[(*cur_child)->key_idx], 1, + (*cur_child)->records) + + rows2double((*cur_child)->records) / TIME_FOR_COMPARE; + } + else + cost= read_time; + + TABLE_READ_PLAN *prev_plan= *cur_child; + if (!(*cur_roru_plan= get_best_ror_intersect(param, *ptree, cost, + &dummy))) + { + if (prev_plan->is_ror) + *cur_roru_plan= prev_plan; + else + DBUG_RETURN(imerge_trp); + roru_index_costs += (*cur_roru_plan)->read_cost; + } + else + roru_index_costs += + ((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs; + roru_total_records += (*cur_roru_plan)->records; + roru_intersect_part *= (*cur_roru_plan)->records / + param->table->file->records; + } + + /* + rows to retrieve= + SUM(rows_in_scan_i) - table_rows * PROD(rows_in_scan_i / table_rows). + This is valid because index_merge construction guarantees that conditions + in disjunction do not share key parts. + */ + roru_total_records -= (ha_rows)(roru_intersect_part* + param->table->file->records); + /* ok, got a ROR read plan for each of the disjuncts + Calculate cost: + cost(index_union_scan(scan_1, ... scan_n)) = + SUM_i(cost_of_index_only_scan(scan_i)) + + queue_use_cost(rowid_len, n) + + cost_of_row_retrieval + See get_merge_buffers_cost function for queue_use_cost formula derivation. + */ + + double roru_total_cost; + roru_total_cost= roru_index_costs + + rows2double(roru_total_records)*log((double)n_child_scans) / + (TIME_FOR_COMPARE_ROWID * M_LN2) + + get_sweep_read_cost(param, roru_total_records); + + DBUG_PRINT("info", ("ROR-union: cost %g, %d members", roru_total_cost, + n_child_scans)); + TRP_ROR_UNION* roru; + if (roru_total_cost < read_time) + { + if ((roru= new (param->mem_root) TRP_ROR_UNION)) + { + roru->first_ror= roru_read_plans; + roru->last_ror= roru_read_plans + n_child_scans; + roru->read_cost= roru_total_cost; + roru->records= roru_total_records; + DBUG_RETURN(roru); + } + } + DBUG_RETURN(imerge_trp); +} + + +/* + Calculate cost of 'index only' scan for given index and number of records. + + SYNOPSIS + get_index_only_read_time() + param parameters structure + records #of records to read + keynr key to read + + NOTES + It is assumed that we will read trough the whole key range and that all + key blocks are half full (normally things are much better). It is also + assumed that each time we read the next key from the index, the handler + performs a random seek, thus the cost is proportional to the number of + blocks read. + + TODO: + Move this to handler->read_time() by adding a flag 'index-only-read' to + this call. The reason for doing this is that the current function doesn't + handle the case when the row is stored in the b-tree (like in innodb + clustered index) +*/ + +static double get_index_only_read_time(const PARAM* param, ha_rows records, + int keynr) +{ + double read_time; + uint keys_per_block= (param->table->file->block_size/2/ + (param->table->key_info[keynr].key_length+ + param->table->file->ref_length) + 1); + read_time=((double) (records+keys_per_block-1)/ + (double) keys_per_block); + return read_time; +} + + +typedef struct st_ror_scan_info +{ + uint idx; /* # of used key in param->keys */ + uint keynr; /* # of used key in table */ + ha_rows records; /* estimate of # records this scan will return */ + + /* Set of intervals over key fields that will be used for row retrieval. */ + SEL_ARG *sel_arg; + + /* Fields used in the query and covered by this ROR scan. */ + MY_BITMAP covered_fields; + uint used_fields_covered; /* # of set bits in covered_fields */ + int key_rec_length; /* length of key record (including rowid) */ + + /* + Cost of reading all index records with values in sel_arg intervals set + (assuming there is no need to access full table records) + */ + double index_read_cost; + uint first_uncovered_field; /* first unused bit in covered_fields */ + uint key_components; /* # of parts in the key */ +} ROR_SCAN_INFO; + + +/* + Create ROR_SCAN_INFO* structure with a single ROR scan on index idx using + sel_arg set of intervals. + + SYNOPSIS + make_ror_scan() + param Parameter from test_quick_select function + idx Index of key in param->keys + sel_arg Set of intervals for a given key + + RETURN + NULL - out of memory + ROR scan structure containing a scan for {idx, sel_arg} +*/ + +static +ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg) +{ + ROR_SCAN_INFO *ror_scan; + uchar *bitmap_buf; + uint keynr; + DBUG_ENTER("make_ror_scan"); + + if (!(ror_scan= (ROR_SCAN_INFO*)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO)))) + DBUG_RETURN(NULL); + + ror_scan->idx= idx; + ror_scan->keynr= keynr= param->real_keynr[idx]; + ror_scan->key_rec_length= (param->table->key_info[keynr].key_length + + param->table->file->ref_length); + ror_scan->sel_arg= sel_arg; + ror_scan->records= param->table->quick_rows[keynr]; + + if (!(bitmap_buf= (uchar*)alloc_root(param->mem_root, + param->fields_bitmap_size))) + DBUG_RETURN(NULL); + + if (bitmap_init(&ror_scan->covered_fields, bitmap_buf, + param->fields_bitmap_size*8, FALSE)) + DBUG_RETURN(NULL); + bitmap_clear_all(&ror_scan->covered_fields); + + KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part; + KEY_PART_INFO *key_part_end= key_part + + param->table->key_info[keynr].key_parts; + for (;key_part != key_part_end; ++key_part) + { + if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr)) + bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr); + } + ror_scan->index_read_cost= + get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr], + ror_scan->keynr); + DBUG_RETURN(ror_scan); +} + + +/* + Compare two ROR_SCAN_INFO** by E(#records_matched) * key_record_length. + SYNOPSIS + cmp_ror_scan_info() + a ptr to first compared value + b ptr to second compared value + + RETURN + -1 a < b + 0 a = b + 1 a > b +*/ + +static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +{ + double val1= rows2double((*a)->records) * (*a)->key_rec_length; + double val2= rows2double((*b)->records) * (*b)->key_rec_length; + return (val1 < val2)? -1: (val1 == val2)? 0 : 1; +} + +/* + Compare two ROR_SCAN_INFO** by + (#covered fields in F desc, + #components asc, + number of first not covered component asc) + + SYNOPSIS + cmp_ror_scan_info_covering() + a ptr to first compared value + b ptr to second compared value + + RETURN + -1 a < b + 0 a = b + 1 a > b +*/ + +static int cmp_ror_scan_info_covering(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +{ + if ((*a)->used_fields_covered > (*b)->used_fields_covered) + return -1; + if ((*a)->used_fields_covered < (*b)->used_fields_covered) + return 1; + if ((*a)->key_components < (*b)->key_components) + return -1; + if ((*a)->key_components > (*b)->key_components) + return 1; + if ((*a)->first_uncovered_field < (*b)->first_uncovered_field) + return -1; + if ((*a)->first_uncovered_field > (*b)->first_uncovered_field) + return 1; + return 0; +} + + +/* Auxiliary structure for incremental ROR-intersection creation */ +typedef struct +{ + const PARAM *param; + MY_BITMAP covered_fields; /* union of fields covered by all scans */ + /* + Fraction of table records that satisfies conditions of all scans. + This is the number of full records that will be retrieved if a + non-index_only index intersection will be employed. + */ + double out_rows; + /* TRUE if covered_fields is a superset of needed_fields */ + bool is_covering; + + ha_rows index_records; /* sum(#records to look in indexes) */ + double index_scan_costs; /* SUM(cost of 'index-only' scans) */ + double total_cost; +} ROR_INTERSECT_INFO; + + +/* + Allocate a ROR_INTERSECT_INFO and initialize it to contain zero scans. + + SYNOPSIS + ror_intersect_init() + param Parameter from test_quick_select + + RETURN + allocated structure + NULL on error +*/ + +static +ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param) +{ + ROR_INTERSECT_INFO *info; + uchar* buf; + if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root, + sizeof(ROR_INTERSECT_INFO)))) + return NULL; + info->param= param; + if (!(buf= (uchar*)alloc_root(param->mem_root, param->fields_bitmap_size))) + return NULL; + if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8, + FALSE)) + return NULL; + info->is_covering= FALSE; + info->index_scan_costs= 0.0; + info->index_records= 0; + info->out_rows= param->table->file->records; + bitmap_clear_all(&info->covered_fields); + return info; +} + +void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) +{ + dst->param= src->param; + memcpy(dst->covered_fields.bitmap, src->covered_fields.bitmap, + src->covered_fields.bitmap_size); + dst->out_rows= src->out_rows; + dst->is_covering= src->is_covering; + dst->index_records= src->index_records; + dst->index_scan_costs= src->index_scan_costs; + dst->total_cost= src->total_cost; +} + + +/* + Get selectivity of a ROR scan wrt ROR-intersection. + + SYNOPSIS + ror_scan_selectivity() + info ROR-interection + scan ROR scan + + NOTES + Suppose we have a condition on several keys + cond=k_11=c_11 AND k_12=c_12 AND ... // parts of first key + k_21=c_21 AND k_22=c_22 AND ... // parts of second key + ... + k_n1=c_n1 AND k_n3=c_n3 AND ... (1) //parts of the key used by *scan + + where k_ij may be the same as any k_pq (i.e. keys may have common parts). + + A full row is retrieved if entire condition holds. + + The recursive procedure for finding P(cond) is as follows: + + First step: + Pick 1st part of 1st key and break conjunction (1) into two parts: + cond= (k_11=c_11 AND R) + + Here R may still contain condition(s) equivalent to k_11=c_11. + Nevertheless, the following holds: + + P(k_11=c_11 AND R) = P(k_11=c_11) * P(R | k_11=c_11). + + Mark k_11 as fixed field (and satisfied condition) F, save P(F), + save R to be cond and proceed to recursion step. + + Recursion step: + We have a set of fixed fields/satisfied conditions) F, probability P(F), + and remaining conjunction R + Pick next key part on current key and its condition "k_ij=c_ij". + We will add "k_ij=c_ij" into F and update P(F). + Lets denote k_ij as t, R = t AND R1, where R1 may still contain t. Then + + P((t AND R1)|F) = P(t|F) * P(R1|t|F) = P(t|F) * P(R1|(t AND F)) (2) + + (where '|' mean conditional probability, not "or") + + Consider the first multiplier in (2). One of the following holds: + a) F contains condition on field used in t (i.e. t AND F = F). + Then P(t|F) = 1 + + b) F doesn't contain condition on field used in t. Then F and t are + considered independent. + + P(t|F) = P(t|(fields_before_t_in_key AND other_fields)) = + = P(t|fields_before_t_in_key). + + P(t|fields_before_t_in_key) = #records(fields_before_t_in_key) / + #records(fields_before_t_in_key, t) + + The second multiplier is calculated by applying this step recursively. + + IMPLEMENTATION + This function calculates the result of application of the "recursion step" + described above for all fixed key members of a single key, accumulating set + of covered fields, selectivity, etc. + + The calculation is conducted as follows: + Lets denote #records(keypart1, ... keypartK) as n_k. We need to calculate + + n_{k1} n_{k_2} + --------- * --------- * .... (3) + n_{k1-1} n_{k2_1} + + where k1,k2,... are key parts which fields were not yet marked as fixed + ( this is result of application of option b) of the recursion step for + parts of a single key). + Since it is reasonable to expect that most of the fields are not marked + as fixed, we calculate (3) as + + n_{i1} n_{i_2} + (3) = n_{max_key_part} / ( --------- * --------- * .... ) + n_{i1-1} n_{i2_1} + + where i1,i2, .. are key parts that were already marked as fixed. + + In order to minimize number of expensive records_in_range calls we group + and reduce adjacent fractions. + + RETURN + Selectivity of given ROR scan. + +*/ + +static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info, + const ROR_SCAN_INFO *scan) +{ + double selectivity_mult= 1.0; + KEY_PART_INFO *key_part= info->param->table->key_info[scan->keynr].key_part; + byte key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */ + char *key_ptr= (char*) key_val; + SEL_ARG *sel_arg, *tuple_arg= NULL; + bool cur_covered; + bool prev_covered= test(bitmap_is_set(&info->covered_fields, + key_part->fieldnr)); + key_range min_range; + key_range max_range; + min_range.key= (byte*) key_val; + min_range.flag= HA_READ_KEY_EXACT; + max_range.key= (byte*) key_val; + max_range.flag= HA_READ_AFTER_KEY; + ha_rows prev_records= info->param->table->file->records; + DBUG_ENTER("ror_intersect_selectivity"); + + for (sel_arg= scan->sel_arg; sel_arg; + sel_arg= sel_arg->next_key_part) + { + DBUG_PRINT("info",("sel_arg step")); + cur_covered= test(bitmap_is_set(&info->covered_fields, + key_part[sel_arg->part].fieldnr)); + if (cur_covered != prev_covered) + { + /* create (part1val, ..., part{n-1}val) tuple. */ + ha_rows records; + if (!tuple_arg) + { + tuple_arg= scan->sel_arg; + /* Here we use the length of the first key part */ + tuple_arg->store_min(key_part->store_length, &key_ptr, 0); + } + while (tuple_arg->next_key_part != sel_arg) + { + tuple_arg= tuple_arg->next_key_part; + tuple_arg->store_min(key_part[tuple_arg->part].store_length, &key_ptr, 0); + } + min_range.length= max_range.length= ((char*) key_ptr - (char*) key_val); + records= (info->param->table->file-> + records_in_range(scan->keynr, &min_range, &max_range)); + if (cur_covered) + { + /* uncovered -> covered */ + double tmp= rows2double(records)/rows2double(prev_records); + DBUG_PRINT("info", ("Selectivity multiplier: %g", tmp)); + selectivity_mult *= tmp; + prev_records= HA_POS_ERROR; + } + else + { + /* covered -> uncovered */ + prev_records= records; + } + } + prev_covered= cur_covered; + } + if (!prev_covered) + { + double tmp= rows2double(info->param->table->quick_rows[scan->keynr]) / + rows2double(prev_records); + DBUG_PRINT("info", ("Selectivity multiplier: %g", tmp)); + selectivity_mult *= tmp; + } + DBUG_PRINT("info", ("Returning multiplier: %g", selectivity_mult)); + DBUG_RETURN(selectivity_mult); +} + + +/* + Check if adding a ROR scan to a ROR-intersection reduces its cost of + ROR-intersection and if yes, update parameters of ROR-intersection, + including its cost. + + SYNOPSIS + ror_intersect_add() + param Parameter from test_quick_select + info ROR-intersection structure to add the scan to. + ror_scan ROR scan info to add. + is_cpk_scan If TRUE, add the scan as CPK scan (this can be inferred + from other parameters and is passed separately only to + avoid duplicating the inference code) + + NOTES + Adding a ROR scan to ROR-intersect "makes sense" iff the cost of ROR- + intersection decreases. The cost of ROR-intersection is calculated as + follows: + + cost= SUM_i(key_scan_cost_i) + cost_of_full_rows_retrieval + + When we add a scan the first increases and the second decreases. + + cost_of_full_rows_retrieval= + (union of indexes used covers all needed fields) ? + cost_of_sweep_read(E(rows_to_retrieve), rows_in_table) : + 0 + + E(rows_to_retrieve) = #rows_in_table * ror_scan_selectivity(null, scan1) * + ror_scan_selectivity({scan1}, scan2) * ... * + ror_scan_selectivity({scan1,...}, scanN). + RETURN + TRUE ROR scan added to ROR-intersection, cost updated. + FALSE It doesn't make sense to add this ROR scan to this ROR-intersection. +*/ + +static bool ror_intersect_add(ROR_INTERSECT_INFO *info, + ROR_SCAN_INFO* ror_scan, bool is_cpk_scan) +{ + double selectivity_mult= 1.0; + + DBUG_ENTER("ror_intersect_add"); + DBUG_PRINT("info", ("Current out_rows= %g", info->out_rows)); + DBUG_PRINT("info", ("Adding scan on %s", + info->param->table->key_info[ror_scan->keynr].name)); + DBUG_PRINT("info", ("is_cpk_scan: %d",is_cpk_scan)); + + selectivity_mult = ror_scan_selectivity(info, ror_scan); + if (selectivity_mult == 1.0) + { + /* Don't add this scan if it doesn't improve selectivity. */ + DBUG_PRINT("info", ("The scan doesn't improve selectivity.")); + DBUG_RETURN(FALSE); + } + + info->out_rows *= selectivity_mult; + DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost)); + + if (is_cpk_scan) + { + /* + CPK scan is used to filter out rows. We apply filtering for + each record of every scan. Assuming 1/TIME_FOR_COMPARE_ROWID + per check this gives us: + */ + info->index_scan_costs += rows2double(info->index_records) / + TIME_FOR_COMPARE_ROWID; + } + else + { + info->index_records += info->param->table->quick_rows[ror_scan->keynr]; + info->index_scan_costs += ror_scan->index_read_cost; + bitmap_union(&info->covered_fields, &ror_scan->covered_fields); + if (!info->is_covering && bitmap_is_subset(&info->param->needed_fields, + &info->covered_fields)) + { + DBUG_PRINT("info", ("ROR-intersect is covering now")); + info->is_covering= TRUE; + } + } + + info->total_cost= info->index_scan_costs; + DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost)); + if (!info->is_covering) + { + info->total_cost += + get_sweep_read_cost(info->param, double2rows(info->out_rows)); + DBUG_PRINT("info", ("info->total_cost= %g", info->total_cost)); + } + DBUG_PRINT("info", ("New out_rows= %g", info->out_rows)); + DBUG_PRINT("info", ("New cost= %g, %scovering", info->total_cost, + info->is_covering?"" : "non-")); + DBUG_RETURN(TRUE); +} + + +/* + Get best ROR-intersection plan using non-covering ROR-intersection search + algorithm. The returned plan may be covering. + + SYNOPSIS + get_best_ror_intersect() + param Parameter from test_quick_select function. + tree Transformed restriction condition to be used to look + for ROR scans. + read_time Do not return read plans with cost > read_time. + are_all_covering [out] set to TRUE if union of all scans covers all + fields needed by the query (and it is possible to build + a covering ROR-intersection) + + NOTES + get_key_scans_params must be called before this function can be called. + + When this function is called by ROR-union construction algorithm it + assumes it is building an uncovered ROR-intersection (and thus # of full + records to be retrieved is wrong here). This is a hack. + + IMPLEMENTATION + The approximate best non-covering plan search algorithm is as follows: + + find_min_ror_intersection_scan() + { + R= select all ROR scans; + order R by (E(#records_matched) * key_record_length). + + S= first(R); -- set of scans that will be used for ROR-intersection + R= R-first(S); + min_cost= cost(S); + min_scan= make_scan(S); + while (R is not empty) + { + firstR= R - first(R); + if (!selectivity(S + firstR < selectivity(S))) + continue; + + S= S + first(R); + if (cost(S) < min_cost) + { + min_cost= cost(S); + min_scan= make_scan(S); + } + } + return min_scan; + } + + See ror_intersect_add function for ROR intersection costs. + + Special handling for Clustered PK scans + Clustered PK contains all table fields, so using it as a regular scan in + index intersection doesn't make sense: a range scan on CPK will be less + expensive in this case. + Clustered PK scan has special handling in ROR-intersection: it is not used + to retrieve rows, instead its condition is used to filter row references + we get from scans on other keys. + + RETURN + ROR-intersection table read plan + NULL if out of memory or no suitable plan found. +*/ + +static +TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, + double read_time, + bool *are_all_covering) +{ + uint idx; + double min_cost= DBL_MAX; + DBUG_ENTER("get_best_ror_intersect"); + + if ((tree->n_ror_scans < 2) || !param->table->file->records) + DBUG_RETURN(NULL); + + /* + Step1: Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of + them. Also find and save clustered PK scan if there is one. + */ + ROR_SCAN_INFO **cur_ror_scan; + ROR_SCAN_INFO *cpk_scan= NULL; + uint cpk_no; + bool cpk_scan_used= FALSE; + + if (!(tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + param->keys))) + return NULL; + cpk_no= ((param->table->file->primary_key_is_clustered()) ? + param->table->s->primary_key : MAX_KEY); + + for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++) + { + ROR_SCAN_INFO *scan; + if (!tree->ror_scans_map.is_set(idx)) + continue; + if (!(scan= make_ror_scan(param, idx, tree->keys[idx]))) + return NULL; + if (param->real_keynr[idx] == cpk_no) + { + cpk_scan= scan; + tree->n_ror_scans--; + } + else + *(cur_ror_scan++)= scan; + } + + tree->ror_scans_end= cur_ror_scan; + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, "original", + tree->ror_scans, + tree->ror_scans_end);); + /* + Ok, [ror_scans, ror_scans_end) is array of ptrs to initialized + ROR_SCAN_INFO's. + Step 2: Get best ROR-intersection using an approximate algorithm. + */ + qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*), + (qsort_cmp)cmp_ror_scan_info); + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, "ordered", + tree->ror_scans, + tree->ror_scans_end);); + + ROR_SCAN_INFO **intersect_scans; /* ROR scans used in index intersection */ + ROR_SCAN_INFO **intersect_scans_end; + if (!(intersect_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + tree->n_ror_scans))) + return NULL; + intersect_scans_end= intersect_scans; + + /* Create and incrementally update ROR intersection. */ + ROR_INTERSECT_INFO *intersect, *intersect_best; + if (!(intersect= ror_intersect_init(param)) || + !(intersect_best= ror_intersect_init(param))) + return NULL; + + /* [intersect_scans,intersect_scans_best) will hold the best intersection */ + ROR_SCAN_INFO **intersect_scans_best; + cur_ror_scan= tree->ror_scans; + intersect_scans_best= intersect_scans; + while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering) + { + /* S= S + first(R); R= R - first(R); */ + if (!ror_intersect_add(intersect, *cur_ror_scan, FALSE)) + { + cur_ror_scan++; + continue; + } + + *(intersect_scans_end++)= *(cur_ror_scan++); + + if (intersect->total_cost < min_cost) + { + /* Local minimum found, save it */ + ror_intersect_cpy(intersect_best, intersect); + intersect_scans_best= intersect_scans_end; + min_cost = intersect->total_cost; + } + } + + if (intersect_scans_best == intersect_scans) + { + DBUG_PRINT("info", ("None of scans increase selectivity")); + DBUG_RETURN(NULL); + } + + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, + "best ROR-intersection", + intersect_scans, + intersect_scans_best);); + + *are_all_covering= intersect->is_covering; + uint best_num= intersect_scans_best - intersect_scans; + ror_intersect_cpy(intersect, intersect_best); + + /* + Ok, found the best ROR-intersection of non-CPK key scans. + Check if we should add a CPK scan. If the obtained ROR-intersection is + covering, it doesn't make sense to add CPK scan. + */ + if (cpk_scan && !intersect->is_covering) + { + if (ror_intersect_add(intersect, cpk_scan, TRUE) && + (intersect->total_cost < min_cost)) + { + cpk_scan_used= TRUE; + intersect_best= intersect; //just set pointer here + } + } + + /* Ok, return ROR-intersect plan if we have found one */ + TRP_ROR_INTERSECT *trp= NULL; + if (min_cost < read_time && (cpk_scan_used || best_num > 1)) + { + if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT)) + DBUG_RETURN(trp); + if (!(trp->first_scan= + (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)*best_num))) + DBUG_RETURN(NULL); + memcpy(trp->first_scan, intersect_scans, best_num*sizeof(ROR_SCAN_INFO*)); + trp->last_scan= trp->first_scan + best_num; + trp->is_covering= intersect_best->is_covering; + trp->read_cost= intersect_best->total_cost; + /* Prevent divisons by zero */ + ha_rows best_rows = double2rows(intersect_best->out_rows); + if (!best_rows) + best_rows= 1; + trp->records= best_rows; + trp->index_scan_costs= intersect_best->index_scan_costs; + trp->cpk_scan= cpk_scan_used? cpk_scan: NULL; + DBUG_PRINT("info", ("Returning non-covering ROR-intersect plan:" + "cost %g, records %lu", + trp->read_cost, (ulong) trp->records)); + } + DBUG_RETURN(trp); +} + + +/* + Get best covering ROR-intersection. + SYNOPSIS + get_best_covering_ror_intersect() + param Parameter from test_quick_select function. + tree SEL_TREE with sets of intervals for different keys. + read_time Don't return table read plans with cost > read_time. + + RETURN + Best covering ROR-intersection plan + NULL if no plan found. + + NOTES + get_best_ror_intersect must be called for a tree before calling this + function for it. + This function invalidates tree->ror_scans member values. + + The following approximate algorithm is used: + I=set of all covering indexes + F=set of all fields to cover + S={} + + do { + Order I by (#covered fields in F desc, + #components asc, + number of first not covered component asc); + F=F-covered by first(I); + S=S+first(I); + I=I-first(I); + } while F is not empty. +*/ + +static +TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, + SEL_TREE *tree, + double read_time) +{ + ROR_SCAN_INFO **ror_scan_mark; + ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end; + DBUG_ENTER("get_best_covering_ror_intersect"); + uint nbits= param->fields_bitmap_size*8; + + for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan) + (*scan)->key_components= + param->table->key_info[(*scan)->keynr].key_parts; + + /* + Run covering-ROR-search algorithm. + Assume set I is [ror_scan .. ror_scans_end) + */ + + /*I=set of all covering indexes */ + ror_scan_mark= tree->ror_scans; + + MY_BITMAP *covered_fields= ¶m->tmp_covered_fields; + if (!covered_fields->bitmap) + covered_fields->bitmap= (uchar*)alloc_root(param->mem_root, + param->fields_bitmap_size); + if (!covered_fields->bitmap || + bitmap_init(covered_fields, covered_fields->bitmap, nbits, FALSE)) + DBUG_RETURN(0); + bitmap_clear_all(covered_fields); + + double total_cost= 0.0f; + ha_rows records=0; + bool all_covered; + + DBUG_PRINT("info", ("Building covering ROR-intersection")); + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "building covering ROR-I", + ror_scan_mark, ror_scans_end);); + do { + /* + Update changed sorting info: + #covered fields, + number of first not covered component + Calculate and save these values for each of remaining scans. + */ + for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan) + { + bitmap_subtract(&(*scan)->covered_fields, covered_fields); + (*scan)->used_fields_covered= + bitmap_bits_set(&(*scan)->covered_fields); + (*scan)->first_uncovered_field= + bitmap_get_first(&(*scan)->covered_fields); + } + + qsort(ror_scan_mark, ror_scans_end-ror_scan_mark, sizeof(ROR_SCAN_INFO*), + (qsort_cmp)cmp_ror_scan_info_covering); + + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "remaining scans", + ror_scan_mark, ror_scans_end);); + + /* I=I-first(I) */ + total_cost += (*ror_scan_mark)->index_read_cost; + records += (*ror_scan_mark)->records; + DBUG_PRINT("info", ("Adding scan on %s", + param->table->key_info[(*ror_scan_mark)->keynr].name)); + if (total_cost > read_time) + DBUG_RETURN(NULL); + /* F=F-covered by first(I) */ + bitmap_union(covered_fields, &(*ror_scan_mark)->covered_fields); + all_covered= bitmap_is_subset(¶m->needed_fields, covered_fields); + } while ((++ror_scan_mark < ror_scans_end) && !all_covered); + + if (!all_covered || (ror_scan_mark - tree->ror_scans) == 1) + DBUG_RETURN(NULL); + + /* + Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with + cost total_cost. + */ + DBUG_PRINT("info", ("Covering ROR-intersect scans cost: %g", total_cost)); + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "creating covering ROR-intersect", + tree->ror_scans, ror_scan_mark);); + + /* Add priority queue use cost. */ + total_cost += rows2double(records)* + log((double)(ror_scan_mark - tree->ror_scans)) / + (TIME_FOR_COMPARE_ROWID * M_LN2); + DBUG_PRINT("info", ("Covering ROR-intersect full cost: %g", total_cost)); + + if (total_cost > read_time) + DBUG_RETURN(NULL); + + TRP_ROR_INTERSECT *trp; + if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT)) + DBUG_RETURN(trp); + uint best_num= (ror_scan_mark - tree->ror_scans); + if (!(trp->first_scan= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + best_num))) + DBUG_RETURN(NULL); + memcpy(trp->first_scan, tree->ror_scans, best_num*sizeof(ROR_SCAN_INFO*)); + trp->last_scan= trp->first_scan + best_num; + trp->is_covering= TRUE; + trp->read_cost= total_cost; + trp->records= records; + trp->cpk_scan= NULL; + + DBUG_PRINT("info", + ("Returning covering ROR-intersect plan: cost %g, records %lu", + trp->read_cost, (ulong) trp->records)); + DBUG_RETURN(trp); +} + + +/* + Get best "range" table read plan for given SEL_TREE. + Also update PARAM members and store ROR scans info in the SEL_TREE. + SYNOPSIS + get_key_scans_params + param parameters from test_quick_select + tree make range select for this SEL_TREE + index_read_must_be_used if TRUE, assume 'index only' option will be set + (except for clustered PK indexes) + read_time don't create read plans with cost > read_time. + RETURN + Best range read plan + NULL if no plan found or error occurred +*/ + +static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, + bool index_read_must_be_used, + double read_time) +{ + int idx; + SEL_ARG **key,**end, **key_to_read= NULL; + ha_rows best_records; + TRP_RANGE* read_plan= NULL; + bool pk_is_clustered= param->table->file->primary_key_is_clustered(); + DBUG_ENTER("get_key_scans_params"); + LINT_INIT(best_records); /* protected by key_to_read */ + /* + Note that there may be trees that have type SEL_TREE::KEY but contain no + key reads at all, e.g. tree for expression "key1 is not null" where key1 + is defined as "not null". + */ + DBUG_EXECUTE("info", print_sel_tree(param, tree, &tree->keys_map, + "tree scans");); + tree->ror_scans_map.clear_all(); + tree->n_ror_scans= 0; + for (idx= 0,key=tree->keys, end=key+param->keys; + key != end ; + key++,idx++) + { + ha_rows found_records; + double found_read_time; + if (*key) + { + uint keynr= param->real_keynr[idx]; + if ((*key)->type == SEL_ARG::MAYBE_KEY || + (*key)->maybe_flag) + param->needed_reg->set_bit(keynr); + + bool read_index_only= index_read_must_be_used ? TRUE : + (bool) param->table->used_keys.is_set(keynr); + + found_records= check_quick_select(param, idx, *key); + if (param->is_ror_scan) + { + tree->n_ror_scans++; + tree->ror_scans_map.set_bit(idx); + } + double cpu_cost= (double) found_records / TIME_FOR_COMPARE; + if (found_records != HA_POS_ERROR && found_records > 2 && + read_index_only && + (param->table->file->index_flags(keynr, param->max_key_part,1) & + HA_KEYREAD_ONLY) && + !(pk_is_clustered && keynr == param->table->s->primary_key)) + { + /* + We can resolve this by only reading through this key. + 0.01 is added to avoid races between range and 'index' scan. + */ + found_read_time= get_index_only_read_time(param,found_records,keynr) + + cpu_cost + 0.01; + } + else + { + /* + cost(read_through_index) = cost(disk_io) + cost(row_in_range_checks) + The row_in_range check is in QUICK_RANGE_SELECT::cmp_next function. + */ + found_read_time= param->table->file->read_time(keynr, + param->range_count, + found_records) + + cpu_cost + 0.01; + } + DBUG_PRINT("info",("key %s: found_read_time: %g (cur. read_time: %g)", + param->table->key_info[keynr].name, found_read_time, + read_time)); + + if (read_time > found_read_time && found_records != HA_POS_ERROR + /*|| read_time == DBL_MAX*/ ) + { + read_time= found_read_time; + best_records= found_records; + key_to_read= key; + } + + } + } + + DBUG_EXECUTE("info", print_sel_tree(param, tree, &tree->ror_scans_map, + "ROR scans");); + if (key_to_read) + { + idx= key_to_read - tree->keys; + if ((read_plan= new (param->mem_root) TRP_RANGE(*key_to_read, idx))) + { + read_plan->records= best_records; + read_plan->is_ror= tree->ror_scans_map.is_set(idx); + read_plan->read_cost= read_time; + DBUG_PRINT("info", + ("Returning range plan for key %s, cost %g, records %lu", + param->table->key_info[param->real_keynr[idx]].name, + read_plan->read_cost, (ulong) read_plan->records)); + } + } + else + DBUG_PRINT("info", ("No 'range' table read plan found")); + + DBUG_RETURN(read_plan); +} + + +QUICK_SELECT_I *TRP_INDEX_MERGE::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_INDEX_MERGE_SELECT *quick_imerge; + QUICK_RANGE_SELECT *quick; + /* index_merge always retrieves full rows, ignore retrieve_full_rows */ + if (!(quick_imerge= new QUICK_INDEX_MERGE_SELECT(param->thd, param->table))) + return NULL; + + quick_imerge->records= records; + quick_imerge->read_time= read_cost; + for (TRP_RANGE **range_scan= range_scans; range_scan != range_scans_end; + range_scan++) + { + if (!(quick= (QUICK_RANGE_SELECT*) + ((*range_scan)->make_quick(param, FALSE, &quick_imerge->alloc)))|| + quick_imerge->push_quick_back(quick)) + { + delete quick; + delete quick_imerge; + return NULL; + } + } + return quick_imerge; +} + +QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_ROR_INTERSECT_SELECT *quick_intrsect; + QUICK_RANGE_SELECT *quick; + DBUG_ENTER("TRP_ROR_INTERSECT::make_quick"); + MEM_ROOT *alloc; + + if ((quick_intrsect= + new QUICK_ROR_INTERSECT_SELECT(param->thd, param->table, + retrieve_full_rows? (!is_covering):FALSE, + parent_alloc))) + { + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "creating ROR-intersect", + first_scan, last_scan);); + alloc= parent_alloc? parent_alloc: &quick_intrsect->alloc; + for (; first_scan != last_scan;++first_scan) + { + if (!(quick= get_quick_select(param, (*first_scan)->idx, + (*first_scan)->sel_arg, alloc)) || + quick_intrsect->push_quick_back(quick)) + { + delete quick_intrsect; + DBUG_RETURN(NULL); + } + } + if (cpk_scan) + { + if (!(quick= get_quick_select(param, cpk_scan->idx, + cpk_scan->sel_arg, alloc))) + { + delete quick_intrsect; + DBUG_RETURN(NULL); + } + quick->file= NULL; + quick_intrsect->cpk_quick= quick; + } + quick_intrsect->records= records; + quick_intrsect->read_time= read_cost; + } + DBUG_RETURN(quick_intrsect); +} + + +QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_ROR_UNION_SELECT *quick_roru; + TABLE_READ_PLAN **scan; + QUICK_SELECT_I *quick; + DBUG_ENTER("TRP_ROR_UNION::make_quick"); + /* + It is impossible to construct a ROR-union that will not retrieve full + rows, ignore retrieve_full_rows parameter. + */ + if ((quick_roru= new QUICK_ROR_UNION_SELECT(param->thd, param->table))) + { + for (scan= first_ror; scan != last_ror; scan++) + { + if (!(quick= (*scan)->make_quick(param, FALSE, &quick_roru->alloc)) || + quick_roru->push_quick_back(quick)) + DBUG_RETURN(NULL); + } + quick_roru->records= records; + quick_roru->read_time= read_cost; + } + DBUG_RETURN(quick_roru); +} + + +/* + Build a SEL_TREE for <> or NOT BETWEEN predicate + + SYNOPSIS + get_ne_mm_tree() + param PARAM from SQL_SELECT::test_quick_select + cond_func item for the predicate + field field in the predicate + lt_value constant that field should be smaller + gt_value constant that field should be greaterr + cmp_type compare type for the field + + RETURN + # Pointer to tree built tree + 0 on error +*/ + +static SEL_TREE *get_ne_mm_tree(PARAM *param, Item_func *cond_func, + Field *field, + Item *lt_value, Item *gt_value, + Item_result cmp_type) +{ + SEL_TREE *tree; + tree= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC, + lt_value, cmp_type); + if (tree) + { + tree= tree_or(param, tree, get_mm_parts(param, cond_func, field, + Item_func::GT_FUNC, + gt_value, cmp_type)); + } + return tree; +} + + +/* + Build a SEL_TREE for a simple predicate + + SYNOPSIS + get_func_mm_tree() + param PARAM from SQL_SELECT::test_quick_select + cond_func item for the predicate + field field in the predicate + value constant in the predicate + cmp_type compare type for the field + inv TRUE <> NOT cond_func is considered + (makes sense only when cond_func is BETWEEN or IN) + + RETURN + Pointer to the tree built tree +*/ + +static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func, + Field *field, Item *value, + Item_result cmp_type, bool inv) +{ + SEL_TREE *tree= 0; + DBUG_ENTER("get_func_mm_tree"); + + switch (cond_func->functype()) { + + case Item_func::NE_FUNC: + tree= get_ne_mm_tree(param, cond_func, field, value, value, cmp_type); + break; + + case Item_func::BETWEEN: + { + if (!value) + { + if (inv) + { + tree= get_ne_mm_tree(param, cond_func, field, cond_func->arguments()[1], + cond_func->arguments()[2], cmp_type); + } + else + { + tree= get_mm_parts(param, cond_func, field, Item_func::GE_FUNC, + cond_func->arguments()[1],cmp_type); + if (tree) + { + tree= tree_and(param, tree, get_mm_parts(param, cond_func, field, + Item_func::LE_FUNC, + cond_func->arguments()[2], + cmp_type)); + } + } + } + else + tree= get_mm_parts(param, cond_func, field, + (inv ? + (value == (Item*)1 ? Item_func::GT_FUNC : + Item_func::LT_FUNC): + (value == (Item*)1 ? Item_func::LE_FUNC : + Item_func::GE_FUNC)), + cond_func->arguments()[0], cmp_type); + break; + } + case Item_func::IN_FUNC: + { + Item_func_in *func=(Item_func_in*) cond_func; + + if (inv) + { + if (func->array && func->cmp_type != ROW_RESULT) + { + /* + We get here for conditions in form "t.key NOT IN (c1, c2, ...)", + where c{i} are constants. Our goal is to produce a SEL_TREE that + represents intervals: + + ($MIN<t.key<c1) OR (c1<t.key<c2) OR (c2<t.key<c3) OR ... (*) + + where $MIN is either "-inf" or NULL. + + The most straightforward way to produce it is to convert NOT IN + into "(t.key != c1) AND (t.key != c2) AND ... " and let the range + analyzer to build SEL_TREE from that. The problem is that the + range analyzer will use O(N^2) memory (which is probably a bug), + and people do use big NOT IN lists (e.g. see BUG#15872, BUG#21282), + will run out of memory. + + Another problem with big lists like (*) is that a big list is + unlikely to produce a good "range" access, while considering that + range access will require expensive CPU calculations (and for + MyISAM even index accesses). In short, big NOT IN lists are rarely + worth analyzing. + + Considering the above, we'll handle NOT IN as follows: + * if the number of entries in the NOT IN list is less than + NOT_IN_IGNORE_THRESHOLD, construct the SEL_TREE (*) manually. + * Otherwise, don't produce a SEL_TREE. + */ +#define NOT_IN_IGNORE_THRESHOLD 1000 + MEM_ROOT *tmp_root= param->mem_root; + param->thd->mem_root= param->old_root; + /* + Create one Item_type constant object. We'll need it as + get_mm_parts only accepts constant values wrapped in Item_Type + objects. + We create the Item on param->mem_root which points to + per-statement mem_root (while thd->mem_root is currently pointing + to mem_root local to range optimizer). + */ + Item *value_item= func->array->create_item(); + param->thd->mem_root= tmp_root; + + if (func->array->count > NOT_IN_IGNORE_THRESHOLD || !value_item) + break; + + /* Get a SEL_TREE for "(-inf|NULL) < X < c_0" interval. */ + uint i=0; + do + { + func->array->value_to_item(i, value_item); + tree= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC, + value_item, cmp_type); + if (!tree) + break; + i++; + } while (i < func->array->count && tree->type == SEL_TREE::IMPOSSIBLE); + + if (!tree || tree->type == SEL_TREE::IMPOSSIBLE) + { + /* We get here in cases like "t.unsigned NOT IN (-1,-2,-3) */ + tree= NULL; + break; + } + SEL_TREE *tree2; + for (; i < func->array->count; i++) + { + if (func->array->compare_elems(i, i-1)) + { + /* Get a SEL_TREE for "-inf < X < c_i" interval */ + func->array->value_to_item(i, value_item); + tree2= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC, + value_item, cmp_type); + if (!tree2) + { + tree= NULL; + break; + } + + /* Change all intervals to be "c_{i-1} < X < c_i" */ + for (uint idx= 0; idx < param->keys; idx++) + { + SEL_ARG *new_interval, *last_val; + if (((new_interval= tree2->keys[idx])) && + (tree->keys[idx]) && + ((last_val= tree->keys[idx]->last()))) + { + new_interval->min_value= last_val->max_value; + new_interval->min_flag= NEAR_MIN; + } + } + /* + The following doesn't try to allocate memory so no need to + check for NULL. + */ + tree= tree_or(param, tree, tree2); + } + } + + if (tree && tree->type != SEL_TREE::IMPOSSIBLE) + { + /* + Get the SEL_TREE for the last "c_last < X < +inf" interval + (value_item cotains c_last already) + */ + tree2= get_mm_parts(param, cond_func, field, Item_func::GT_FUNC, + value_item, cmp_type); + tree= tree_or(param, tree, tree2); + } + } + else + { + tree= get_ne_mm_tree(param, cond_func, field, + func->arguments()[1], func->arguments()[1], + cmp_type); + if (tree) + { + Item **arg, **end; + for (arg= func->arguments()+2, end= arg+func->argument_count()-2; + arg < end ; arg++) + { + tree= tree_and(param, tree, get_ne_mm_tree(param, cond_func, field, + *arg, *arg, cmp_type)); + } + } + } + } + else + { + tree= get_mm_parts(param, cond_func, field, Item_func::EQ_FUNC, + func->arguments()[1], cmp_type); + if (tree) + { + Item **arg, **end; + for (arg= func->arguments()+2, end= arg+func->argument_count()-2; + arg < end ; arg++) + { + tree= tree_or(param, tree, get_mm_parts(param, cond_func, field, + Item_func::EQ_FUNC, + *arg, cmp_type)); + } + } + } + break; + } + default: + { + /* + Here the function for the following predicates are processed: + <, <=, =, >=, >, LIKE, IS NULL, IS NOT NULL. + If the predicate is of the form (value op field) it is handled + as the equivalent predicate (field rev_op value), e.g. + 2 <= a is handled as a >= 2. + */ + Item_func::Functype func_type= + (value != cond_func->arguments()[0]) ? cond_func->functype() : + ((Item_bool_func2*) cond_func)->rev_functype(); + tree= get_mm_parts(param, cond_func, field, func_type, value, cmp_type); + } + } + + DBUG_RETURN(tree); +} + + +/* + Build conjunction of all SEL_TREEs for a simple predicate applying equalities + + SYNOPSIS + get_full_func_mm_tree() + param PARAM from SQL_SELECT::test_quick_select + cond_func item for the predicate + field_item field in the predicate + value constant in the predicate + (for BETWEEN it contains the number of the field argument, + for IN it's always 0) + inv TRUE <> NOT cond_func is considered + (makes sense only when cond_func is BETWEEN or IN) + + DESCRIPTION + For a simple SARGable predicate of the form (f op c), where f is a field and + c is a constant, the function builds a conjunction of all SEL_TREES that can + be obtained by the substitution of f for all different fields equal to f. + + NOTES + If the WHERE condition contains a predicate (fi op c), + then not only SELL_TREE for this predicate is built, but + the trees for the results of substitution of fi for + each fj belonging to the same multiple equality as fi + are built as well. + E.g. for WHERE t1.a=t2.a AND t2.a > 10 + a SEL_TREE for t2.a > 10 will be built for quick select from t2 + and + a SEL_TREE for t1.a > 10 will be built for quick select from t1. + + A BETWEEN predicate of the form (fi [NOT] BETWEEN c1 AND c2) is treated + in a similar way: we build a conjuction of trees for the results + of all substitutions of fi for equal fj. + Yet a predicate of the form (c BETWEEN f1i AND f2i) is processed + differently. It is considered as a conjuction of two SARGable + predicates (f1i <= c) and (f2i <=c) and the function get_full_func_mm_tree + is called for each of them separately producing trees for + AND j (f1j <=c ) and AND j (f2j <= c) + After this these two trees are united in one conjunctive tree. + It's easy to see that the same tree is obtained for + AND j,k (f1j <=c AND f2k<=c) + which is equivalent to + AND j,k (c BETWEEN f1j AND f2k). + The validity of the processing of the predicate (c NOT BETWEEN f1i AND f2i) + which equivalent to (f1i > c OR f2i < c) is not so obvious. Here the + function get_full_func_mm_tree is called for (f1i > c) and (f2i < c) + producing trees for AND j (f1j > c) and AND j (f2j < c). Then this two + trees are united in one OR-tree. The expression + (AND j (f1j > c) OR AND j (f2j < c) + is equivalent to the expression + AND j,k (f1j > c OR f2k < c) + which is just a translation of + AND j,k (c NOT BETWEEN f1j AND f2k) + + In the cases when one of the items f1, f2 is a constant c1 we do not create + a tree for it at all. It works for BETWEEN predicates but does not + work for NOT BETWEEN predicates as we have to evaluate the expression + with it. If it is TRUE then the other tree can be completely ignored. + We do not do it now and no trees are built in these cases for + NOT BETWEEN predicates. + + As to IN predicates only ones of the form (f IN (c1,...,cn)), + where f1 is a field and c1,...,cn are constant, are considered as + SARGable. We never try to narrow the index scan using predicates of + the form (c IN (c1,...,f,...,cn)). + + RETURN + Pointer to the tree representing the built conjunction of SEL_TREEs +*/ + +static SEL_TREE *get_full_func_mm_tree(PARAM *param, Item_func *cond_func, + Item_field *field_item, Item *value, + bool inv) +{ + SEL_TREE *tree= 0; + SEL_TREE *ftree= 0; + table_map ref_tables= 0; + table_map param_comp= ~(param->prev_tables | param->read_tables | + param->current_table); + DBUG_ENTER("get_full_func_mm_tree"); + + for (uint i= 0; i < cond_func->arg_count; i++) + { + Item *arg= cond_func->arguments()[i]->real_item(); + if (arg != field_item) + ref_tables|= arg->used_tables(); + } + Field *field= field_item->field; + Item_result cmp_type= field->cmp_type(); + if (!((ref_tables | field->table->map) & param_comp)) + ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type, inv); + Item_equal *item_equal= field_item->item_equal; + if (item_equal) + { + Item_equal_iterator it(*item_equal); + Item_field *item; + while ((item= it++)) + { + Field *f= item->field; + if (field->eq(f)) + continue; + if (!((ref_tables | f->table->map) & param_comp)) + { + tree= get_func_mm_tree(param, cond_func, f, value, cmp_type, inv); + ftree= !ftree ? tree : tree_and(param, ftree, tree); + } + } + } + DBUG_RETURN(ftree); +} + /* make a select tree of all keys in condition */ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) { SEL_TREE *tree=0; + SEL_TREE *ftree= 0; + Item_field *field_item= 0; + bool inv= FALSE; + Item *value= 0; DBUG_ENTER("get_mm_tree"); if (cond->type() == Item::COND_ITEM) @@ -1118,14 +4053,26 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) /* Here when simple cond */ if (cond->const_item()) { - if (cond->val_int()) - DBUG_RETURN(new SEL_TREE(SEL_TREE::ALWAYS)); - DBUG_RETURN(new SEL_TREE(SEL_TREE::IMPOSSIBLE)); + /* + During the cond->val_int() evaluation we can come across a subselect + item which may allocate memory on the thd->mem_root and assumes + all the memory allocated has the same life span as the subselect + item itself. So we have to restore the thread's mem_root here. + */ + MEM_ROOT *tmp_root= param->mem_root; + param->thd->mem_root= param->old_root; + tree= cond->val_int() ? new(tmp_root) SEL_TREE(SEL_TREE::ALWAYS) : + new(tmp_root) SEL_TREE(SEL_TREE::IMPOSSIBLE); + param->thd->mem_root= tmp_root; + DBUG_RETURN(tree); } - table_map ref_tables=cond->used_tables(); + table_map ref_tables= 0; + table_map param_comp= ~(param->prev_tables | param->read_tables | + param->current_table); if (cond->type() != Item::FUNC_ITEM) { // Should be a field + ref_tables= cond->used_tables(); if ((ref_tables & param->current_table) || (ref_tables & ~(param->prev_tables | param->read_tables))) DBUG_RETURN(0); @@ -1133,103 +4080,109 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) } Item_func *cond_func= (Item_func*) cond; - if (cond_func->select_optimize() == Item_func::OPTIMIZE_NONE) - DBUG_RETURN(0); // Can't be calculated + if (cond_func->functype() == Item_func::BETWEEN || + cond_func->functype() == Item_func::IN_FUNC) + inv= ((Item_func_opt_neg *) cond_func)->negated; + else if (cond_func->select_optimize() == Item_func::OPTIMIZE_NONE) + DBUG_RETURN(0); param->cond= cond; - if (cond_func->functype() == Item_func::BETWEEN) + switch (cond_func->functype()) { + case Item_func::BETWEEN: + if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM) + { + field_item= (Item_field*) (cond_func->arguments()[0]->real_item()); + ftree= get_full_func_mm_tree(param, cond_func, field_item, NULL, inv); + } + + /* + Concerning the code below see the NOTES section in + the comments for the function get_full_func_mm_tree() + */ + for (uint i= 1 ; i < cond_func->arg_count ; i++) + { + + if (cond_func->arguments()[i]->real_item()->type() == Item::FIELD_ITEM) + { + field_item= (Item_field*) (cond_func->arguments()[i]->real_item()); + SEL_TREE *tmp= get_full_func_mm_tree(param, cond_func, + field_item, (Item*) i, inv); + if (inv) + tree= !tree ? tmp : tree_or(param, tree, tmp); + else + tree= tree_and(param, tree, tmp); + } + else if (inv) + { + tree= 0; + break; + } + } + + ftree = tree_and(param, ftree, tree); + break; + case Item_func::IN_FUNC: + { + Item_func_in *func=(Item_func_in*) cond_func; + if (func->key_item()->real_item()->type() != Item::FIELD_ITEM) + DBUG_RETURN(0); + field_item= (Item_field*) (func->key_item()->real_item()); + ftree= get_full_func_mm_tree(param, cond_func, field_item, NULL, inv); + break; + } + case Item_func::MULT_EQUAL_FUNC: { - if (!((Item_func_between *)(cond_func))->negated && - cond_func->arguments()[0]->type() == Item::FIELD_ITEM) + Item_equal *item_equal= (Item_equal *) cond; + if (!(value= item_equal->get_const())) + DBUG_RETURN(0); + Item_equal_iterator it(*item_equal); + ref_tables= value->used_tables(); + while ((field_item= it++)) { - Field *field=((Item_field*) (cond_func->arguments()[0]))->field; - Item_result cmp_type=field->cmp_type(); - DBUG_RETURN(tree_and(param, - get_mm_parts(param, cond_func, field, - Item_func::GE_FUNC, - cond_func->arguments()[1], cmp_type), - get_mm_parts(param, cond_func, field, - Item_func::LE_FUNC, - cond_func->arguments()[2], cmp_type))); + Field *field= field_item->field; + Item_result cmp_type= field->cmp_type(); + if (!((ref_tables | field->table->map) & param_comp)) + { + tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC, + value,cmp_type); + ftree= !ftree ? tree : tree_and(param, ftree, tree); + } } - DBUG_RETURN(0); + + DBUG_RETURN(ftree); } - if (cond_func->functype() == Item_func::IN_FUNC) - { // COND OR - Item_func_in *func=(Item_func_in*) cond_func; - if (!func->negated && func->key_item()->type() == Item::FIELD_ITEM) - { - Field *field=((Item_field*) (func->key_item()))->field; - Item_result cmp_type=field->cmp_type(); - tree= get_mm_parts(param,cond_func,field,Item_func::EQ_FUNC, - func->arguments()[1],cmp_type); - if (!tree) - DBUG_RETURN(tree); // Not key field - for (uint i=2 ; i < func->argument_count(); i++) - { - SEL_TREE *new_tree=get_mm_parts(param,cond_func,field, - Item_func::EQ_FUNC, - func->arguments()[i],cmp_type); - tree=tree_or(param,tree,new_tree); - } - DBUG_RETURN(tree); - } - DBUG_RETURN(0); // Can't optimize this IN - } - - if (ref_tables & ~(param->prev_tables | param->read_tables | - param->current_table)) - DBUG_RETURN(0); // Can't be calculated yet - if (!(ref_tables & param->current_table)) - DBUG_RETURN(new SEL_TREE(SEL_TREE::MAYBE)); // This may be false or true - - /* check field op const */ - /* btw, ft_func's arguments()[0] isn't FIELD_ITEM. SerG*/ - if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) - { - tree= get_mm_parts(param, cond_func, - ((Item_field*) (cond_func->arguments()[0]))->field, - cond_func->functype(), - cond_func->arg_count > 1 ? cond_func->arguments()[1] : - 0, - ((Item_field*) (cond_func->arguments()[0]))->field-> - cmp_type()); - } - /* check const op field */ - if (!tree && - cond_func->have_rev_func() && - cond_func->arguments()[1]->type() == Item::FIELD_ITEM) - { - DBUG_RETURN(get_mm_parts(param, cond_func, - ((Item_field*) - (cond_func->arguments()[1]))->field, - ((Item_bool_func2*) cond_func)->rev_functype(), - cond_func->arguments()[0], - ((Item_field*) - (cond_func->arguments()[1]))->field->cmp_type() - )); + default: + if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM) + { + field_item= (Item_field*) (cond_func->arguments()[0]->real_item()); + value= cond_func->arg_count > 1 ? cond_func->arguments()[1] : 0; + } + else if (cond_func->have_rev_func() && + cond_func->arguments()[1]->real_item()->type() == + Item::FIELD_ITEM) + { + field_item= (Item_field*) (cond_func->arguments()[1]->real_item()); + value= cond_func->arguments()[0]; + } + else + DBUG_RETURN(0); + ftree= get_full_func_mm_tree(param, cond_func, field_item, value, inv); } - DBUG_RETURN(tree); + + DBUG_RETURN(ftree); } static SEL_TREE * get_mm_parts(PARAM *param, COND *cond_func, Field *field, - Item_func::Functype type, + Item_func::Functype type, Item *value, Item_result cmp_type) { - bool ne_func= FALSE; DBUG_ENTER("get_mm_parts"); if (field->table != param->table) DBUG_RETURN(0); - if (type == Item_func::NE_FUNC) - { - ne_func= TRUE; - type= Item_func::LT_FUNC; - } - KEY_PART *key_part = param->key_parts; KEY_PART *end = param->key_parts_end; SEL_TREE *tree=0; @@ -1252,30 +4205,21 @@ get_mm_parts(PARAM *param, COND *cond_func, Field *field, if (sel_arg->type == SEL_ARG::IMPOSSIBLE) { tree->type=SEL_TREE::IMPOSSIBLE; - /* If this is an NE_FUNC, we still need to check GT_FUNC. */ - if (!ne_func) - DBUG_RETURN(tree); + DBUG_RETURN(tree); } } else { // This key may be used later - if (!(sel_arg= new SEL_ARG(SEL_ARG::MAYBE_KEY))) + if (!(sel_arg= new SEL_ARG(SEL_ARG::MAYBE_KEY))) DBUG_RETURN(0); // OOM } sel_arg->part=(uchar) key_part->part; tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg); + tree->keys_map.set_bit(key_part->key); } } - if (ne_func) - { - SEL_TREE *tree2= get_mm_parts(param, cond_func, - field, Item_func::GT_FUNC, - value, cmp_type); - /* tree_or() will return 0 if tree2 is 0 */ - tree= tree_or(param,tree,tree2); - } DBUG_RETURN(tree); } @@ -1284,26 +4228,42 @@ static SEL_ARG * get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, Item_func::Functype type,Item *value) { - uint maybe_null=(uint) field->real_maybe_null(), copies; - uint field_length=field->pack_length()+maybe_null; - SEL_ARG *tree; - char *str, *str2; + uint maybe_null=(uint) field->real_maybe_null(); + bool optimize_range; + SEL_ARG *tree= 0; + MEM_ROOT *alloc= param->mem_root; + char *str; + ulong orig_sql_mode; + int err; DBUG_ENTER("get_mm_leaf"); + /* + We need to restore the runtime mem_root of the thread in this + function because it evaluates the value of its argument, while + the argument can be any, e.g. a subselect. The subselect + items, in turn, assume that all the memory allocated during + the evaluation has the same life span as the item itself. + TODO: opt_range.cc should not reset thd->mem_root at all. + */ + param->thd->mem_root= param->old_root; if (!value) // IS NULL or IS NOT NULL { - if (field->table->outer_join) // Can't use a key on this - DBUG_RETURN(0); + if (field->table->maybe_null) // Can't use a key on this + goto end; if (!maybe_null) // Not null field - DBUG_RETURN(type == Item_func::ISNULL_FUNC ? &null_element : 0); - if (!(tree=new SEL_ARG(field,is_null_string,is_null_string))) - DBUG_RETURN(0); // out of memory + { + if (type == Item_func::ISNULL_FUNC) + tree= &null_element; + goto end; + } + if (!(tree= new (alloc) SEL_ARG(field,is_null_string,is_null_string))) + goto end; // out of memory if (type == Item_func::ISNOTNULL_FUNC) { tree->min_flag=NEAR_MIN; /* IS NOT NULL -> X > NULL */ tree->max_flag=NO_MAX_RANGE; } - DBUG_RETURN(tree); + goto end; } /* @@ -1323,7 +4283,10 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, key_part->image_type == Field::itRAW && ((Field_str*)field)->charset() != conf_func->compare_collation() && !(conf_func->compare_collation()->state & MY_CS_BINSORT)) - DBUG_RETURN(0); + goto end; + + optimize_range= field->optimize_range(param->real_keynr[key_part->key], + key_part->part); if (type == Item_func::LIKE_FUNC) { @@ -1331,12 +4294,15 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, char buff1[MAX_FIELD_WIDTH],*min_str,*max_str; String tmp(buff1,sizeof(buff1),value->collation.collation),*res; uint length,offset,min_length,max_length; + uint field_length= field->pack_length()+maybe_null; - if (!field->optimize_range(param->real_keynr[key_part->key], - key_part->part)) - DBUG_RETURN(0); // Can't optimize this + if (!optimize_range) + goto end; if (!(res= value->val_str(&tmp))) - DBUG_RETURN(&null_element); + { + tree= &null_element; + goto end; + } /* TODO: @@ -1349,7 +4315,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, res= &tmp; } if (field->cmp_type() != STRING_RESULT) - DBUG_RETURN(0); // Can only optimize strings + goto end; // Can only optimize strings offset=maybe_null; length=key_part->store_length; @@ -1374,35 +4340,37 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, field_length= length; } length+=offset; - if (!(min_str= (char*) alloc_root(param->mem_root, length*2))) - DBUG_RETURN(0); + if (!(min_str= (char*) alloc_root(alloc, length*2))) + goto end; + max_str=min_str+length; if (maybe_null) max_str[0]= min_str[0]=0; + field_length-= maybe_null; like_error= my_like_range(field->charset(), res->ptr(), res->length(), ((Item_func_like*)(param->cond))->escape, wild_one, wild_many, - field_length-maybe_null, + field_length, min_str+offset, max_str+offset, &min_length, &max_length); if (like_error) // Can't optimize with LIKE - DBUG_RETURN(0); + goto end; - if (offset != maybe_null) // Blob + if (offset != maybe_null) // BLOB or VARCHAR { int2store(min_str+maybe_null,min_length); int2store(max_str+maybe_null,max_length); } - DBUG_RETURN(new SEL_ARG(field,min_str,max_str)); + tree= new (alloc) SEL_ARG(field, min_str, max_str); + goto end; } - if (!field->optimize_range(param->real_keynr[key_part->key], - key_part->part) && + if (!optimize_range && type != Item_func::EQ_FUNC && type != Item_func::EQUAL_FUNC) - DBUG_RETURN(0); // Can't optimize this + goto end; // Can't optimize this /* We can't always use indexes when comparing a string index to a number @@ -1411,47 +4379,50 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, if (field->result_type() == STRING_RESULT && value->result_type() != STRING_RESULT && field->cmp_type() != value->result_type()) - DBUG_RETURN(0); - - if (value->save_in_field(field, 1) < 0) + goto end; + /* For comparison purposes allow invalid dates like 2000-01-32 */ + orig_sql_mode= field->table->in_use->variables.sql_mode; + if (value->real_item()->type() == Item::STRING_ITEM && + (field->type() == FIELD_TYPE_DATE || + field->type() == FIELD_TYPE_DATETIME)) + field->table->in_use->variables.sql_mode|= MODE_INVALID_DATES; + err= value->save_in_field_no_warnings(field, 1); + if (err > 0 && field->cmp_type() != value->result_type()) { + if ((type == Item_func::EQ_FUNC || type == Item_func::EQUAL_FUNC) && + value->result_type() == item_cmp_type(field->result_type(), + value->result_type())) + + { + tree= new (alloc) SEL_ARG(field, 0, 0); + tree->type= SEL_ARG::IMPOSSIBLE; + } + else + { + /* + TODO: We should return trees of the type SEL_ARG::IMPOSSIBLE + for the cases like int_field > 999999999999999999999999 as well. + */ + tree= 0; + } + goto end; + } + if (err < 0) + { + field->table->in_use->variables.sql_mode= orig_sql_mode; /* This happens when we try to insert a NULL field in a not null column */ - DBUG_RETURN(&null_element); // cmp with NULL is never true - } - /* Get local copy of key */ - copies= 1; - if (field->key_type() == HA_KEYTYPE_VARTEXT) - copies= 2; - str= str2= (char*) alloc_root(param->mem_root, - (key_part->store_length)*copies+1); + tree= &null_element; // cmp with NULL is never TRUE + goto end; + } + field->table->in_use->variables.sql_mode= orig_sql_mode; + str= (char*) alloc_root(alloc, key_part->store_length+1); if (!str) - DBUG_RETURN(0); + goto end; if (maybe_null) *str= (char) field->is_real_null(); // Set to 1 if null - field->get_key_image(str+maybe_null, key_part->length, - field->charset(), key_part->image_type); - if (copies == 2) - { - /* - The key is stored as 2 byte length + key - key doesn't match end space. In other words, a key 'X ' should match - all rows between 'X' and 'X ...' - */ - uint length= uint2korr(str+maybe_null); - str2= str+ key_part->store_length; - /* remove end space */ - while (length > 0 && str[length+HA_KEY_BLOB_LENGTH+maybe_null-1] == ' ') - length--; - int2store(str+maybe_null, length); - /* Create key that is space filled */ - memcpy(str2, str, length + HA_KEY_BLOB_LENGTH + maybe_null); - my_fill_8bit(field->charset(), - str2+ length+ HA_KEY_BLOB_LENGTH +maybe_null, - key_part->length-length, ' '); - int2store(str2+maybe_null, key_part->length); - } - if (!(tree=new SEL_ARG(field,str,str2))) - DBUG_RETURN(0); // out of memory + field->get_key_image(str+maybe_null, key_part->length, key_part->image_type); + if (!(tree= new (alloc) SEL_ARG(field, str, str))) + goto end; // out of memory /* Check if we are comparing an UNSIGNED integer with a negative constant. @@ -1464,9 +4435,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, negative integers (which otherwise fails because at query execution time negative integers are cast to unsigned if compared with unsigned). */ - Item_result field_result_type= field->result_type(); - Item_result value_result_type= value->result_type(); - if (field_result_type == INT_RESULT && value_result_type == INT_RESULT && + if (field->result_type() == INT_RESULT && + value->result_type() == INT_RESULT && ((Field_num*)field)->unsigned_flag && !((Item_int*)value)->unsigned_flag) { longlong item_val= value->val_int(); @@ -1475,10 +4445,13 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, if (type == Item_func::LT_FUNC || type == Item_func::LE_FUNC) { tree->type= SEL_ARG::IMPOSSIBLE; - DBUG_RETURN(tree); + goto end; } if (type == Item_func::GT_FUNC || type == Item_func::GE_FUNC) - DBUG_RETURN(0); + { + tree= 0; + goto end; + } } } @@ -1543,6 +4516,9 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, default: break; } + +end: + param->thd->mem_root= alloc; DBUG_RETURN(tree); } @@ -1552,8 +4528,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, ** If tree is 0 it means that the condition can't be tested. It refers ** to a non existent table or to a field in current table with isn't a key. ** The different tree flags: -** IMPOSSIBLE: Condition is never true -** ALWAYS: Condition is always true +** IMPOSSIBLE: Condition is never TRUE +** ALWAYS: Condition is always TRUE ** MAYBE: Condition may exists when tables are read ** MAYBE_KEY: Condition refers to a key that may be used in join loop ** KEY_RANGE: Condition uses a key @@ -1622,6 +4598,8 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) tree1->type=SEL_TREE::KEY_SMALLER; DBUG_RETURN(tree1); } + key_map result_keys; + result_keys.clear_all(); /* Join the trees key per key */ SEL_ARG **key1,**key2,**end; @@ -1639,18 +4617,60 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE) { tree1->type= SEL_TREE::IMPOSSIBLE; + DBUG_RETURN(tree1); + } + result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - if (param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + if (*key1 && param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) (*key1)->test_use_count(*key1); #endif - break; - } } } + tree1->keys_map= result_keys; + /* dispose index_merge if there is a "range" option */ + if (!result_keys.is_clear_all()) + { + tree1->merges.empty(); + DBUG_RETURN(tree1); + } + + /* ok, both trees are index_merge trees */ + imerge_list_and_list(&tree1->merges, &tree2->merges); DBUG_RETURN(tree1); } +/* + Check if two SEL_TREES can be combined into one (i.e. a single key range + read can be constructed for "cond_of_tree1 OR cond_of_tree2" ) without + using index_merge. +*/ + +bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param) +{ + key_map common_keys= tree1->keys_map; + DBUG_ENTER("sel_trees_can_be_ored"); + common_keys.intersect(tree2->keys_map); + + if (common_keys.is_clear_all()) + DBUG_RETURN(FALSE); + + /* trees have a common key, check if they refer to same key part */ + SEL_ARG **key1,**key2; + for (uint key_no=0; key_no < param->keys; key_no++) + { + if (common_keys.is_set(key_no)) + { + key1= tree1->keys + key_no; + key2= tree2->keys + key_no; + if ((*key1)->part == (*key2)->part) + { + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} static SEL_TREE * tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) @@ -1667,20 +4687,63 @@ tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) if (tree2->type == SEL_TREE::MAYBE) DBUG_RETURN(tree2); - /* Join the trees key per key */ - SEL_ARG **key1,**key2,**end; - SEL_TREE *result=0; - for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; - key1 != end ; key1++,key2++) + SEL_TREE *result= 0; + key_map result_keys; + result_keys.clear_all(); + if (sel_trees_can_be_ored(tree1, tree2, param)) { - *key1= key_or(param, *key1, *key2); - if (*key1) + /* Join the trees key per key */ + SEL_ARG **key1,**key2,**end; + for (key1= tree1->keys,key2= tree2->keys,end= key1+param->keys ; + key1 != end ; key1++,key2++) { - result=tree1; // Added to tree1 + *key1=key_or(param, *key1, *key2); + if (*key1) + { + result=tree1; // Added to tree1 + result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - if (param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) - (*key1)->test_use_count(*key1); + if (param->alloced_sel_args < SEL_ARG::MAX_SEL_ARGS) + (*key1)->test_use_count(*key1); #endif + } + } + if (result) + result->keys_map= result_keys; + } + else + { + /* ok, two trees have KEY type but cannot be used without index merge */ + if (tree1->merges.is_empty() && tree2->merges.is_empty()) + { + SEL_IMERGE *merge; + /* both trees are "range" trees, produce new index merge structure */ + if (!(result= new SEL_TREE()) || !(merge= new SEL_IMERGE()) || + (result->merges.push_back(merge)) || + (merge->or_sel_tree(param, tree1)) || + (merge->or_sel_tree(param, tree2))) + result= NULL; + else + result->type= tree1->type; + } + else if (!tree1->merges.is_empty() && !tree2->merges.is_empty()) + { + if (imerge_list_or_list(param, &tree1->merges, &tree2->merges)) + result= new SEL_TREE(SEL_TREE::ALWAYS); + else + result= tree1; + } + else + { + /* one tree is index merge tree and another is range tree */ + if (tree1->merges.is_empty()) + swap_variables(SEL_TREE*, tree1, tree2); + + /* add tree2 to tree1->merges, checking if it collapses to ALWAYS */ + if (imerge_list_or_tree(param, &tree1->merges, tree2)) + result= new SEL_TREE(SEL_TREE::ALWAYS); + else + result= tree1; } } DBUG_RETURN(result); @@ -1731,7 +4794,6 @@ and_all_keys(PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) } - /* Produce a SEL_ARG graph that represents "key1 AND key2" @@ -1776,7 +4838,7 @@ key_and(PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) clone_flag=swap_clone_flag(clone_flag); } - // If one of the key is MAYBE_KEY then the found region may be smaller + /* If one of the key is MAYBE_KEY then the found region may be smaller */ if (key2->type == SEL_ARG::MAYBE_KEY) { if (key1->use_count > 1) @@ -1815,6 +4877,13 @@ key_and(PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag) return 0; // Can't optimize this } + if ((key1->min_flag | key2->min_flag) & GEOM_FLAG) + { + key1->free_tree(); + key2->free_tree(); + return 0; // Can't optimize this + } + key1->use_count--; key2->use_count--; SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0; @@ -2240,7 +5309,7 @@ SEL_ARG::find_range(SEL_ARG *key) SYNOPSIS tree_delete() key Key that is to be deleted from tree (this) - + NOTE This also frees all sub trees that is used by the element @@ -2487,7 +5556,7 @@ SEL_ARG *rb_delete_fixup(SEL_ARG *root,SEL_ARG *key,SEL_ARG *par) } - /* Test that the proporties for a red-black tree holds */ + /* Test that the properties for a red-black tree hold */ #ifdef EXTRA_DEBUG int test_rb_tree(SEL_ARG *element,SEL_ARG *parent) @@ -2619,7 +5688,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) ulong count=count_key_part_usage(root,pos->next_key_part); if (count > pos->next_key_part->use_count) { - sql_print_information("Use_count: Wrong count for key at %lx, %lu " + sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu " "should be %lu", (long unsigned int)pos, pos->next_key_part->use_count, count); return; @@ -2628,68 +5697,175 @@ void SEL_ARG::test_use_count(SEL_ARG *root) } } if (e_count != elements) - sql_print_warning("Wrong use count: %u (should be %u) for tree at %lx", + sql_print_warning("Wrong use count: %u (should be %u) for tree at 0x%lx", e_count, elements, (long unsigned int) this); } #endif +/* + Calculate estimate of number records that will be retrieved by a range + scan on given index using given SEL_ARG intervals tree. + SYNOPSIS + check_quick_select + param Parameter from test_quick_select + idx Number of index to use in PARAM::key SEL_TREE::key + tree Transformed selection condition, tree->key[idx] holds intervals + tree to be used for scanning. + NOTES + param->is_ror_scan is set to reflect if the key scan is a ROR (see + is_key_scan_ror function for more info) + param->table->quick_*, param->range_count (and maybe others) are + updated with data of given key scan, see check_quick_keys for details. -/***************************************************************************** -** Check how many records we will find by using the found tree -*****************************************************************************/ + RETURN + Estimate # of records to be retrieved. + HA_POS_ERROR if estimate calculation failed due to table handler problems. + +*/ static ha_rows check_quick_select(PARAM *param,uint idx,SEL_ARG *tree) { ha_rows records; + bool cpk_scan; + uint key; DBUG_ENTER("check_quick_select"); + param->is_ror_scan= FALSE; + param->first_null_comp= 0; + if (!tree) DBUG_RETURN(HA_POS_ERROR); // Can't use it param->max_key_part=0; param->range_count=0; + key= param->real_keynr[idx]; + if (tree->type == SEL_ARG::IMPOSSIBLE) DBUG_RETURN(0L); // Impossible select. return if (tree->type != SEL_ARG::KEY_RANGE || tree->part != 0) DBUG_RETURN(HA_POS_ERROR); // Don't use tree + + enum ha_key_alg key_alg= param->table->key_info[key].algorithm; + if ((key_alg != HA_KEY_ALG_BTREE) && (key_alg!= HA_KEY_ALG_UNDEF)) + { + /* Records are not ordered by rowid for other types of indexes. */ + cpk_scan= FALSE; + } + else + { + /* + Clustered PK scan is a special case, check_quick_keys doesn't recognize + CPK scans as ROR scans (while actually any CPK scan is a ROR scan). + */ + cpk_scan= ((param->table->s->primary_key == param->real_keynr[idx]) && + param->table->file->primary_key_is_clustered()); + param->is_ror_scan= !cpk_scan; + } + param->n_ranges= 0; + records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0); if (records != HA_POS_ERROR) { - uint key=param->real_keynr[idx]; param->table->quick_keys.set_bit(key); param->table->quick_rows[key]=records; param->table->quick_key_parts[key]=param->max_key_part+1; + param->table->quick_n_ranges[key]= param->n_ranges; + if (cpk_scan) + param->is_ror_scan= TRUE; } + if (param->table->file->index_flags(key, 0, TRUE) & HA_KEY_SCAN_NOT_ROR) + param->is_ror_scan= FALSE; DBUG_PRINT("exit", ("Records: %lu", (ulong) records)); DBUG_RETURN(records); } +/* + Recursively calculate estimate of # rows that will be retrieved by + key scan on key idx. + SYNOPSIS + check_quick_keys() + param Parameter from test_quick select function. + idx Number of key to use in PARAM::keys in list of used keys + (param->real_keynr[idx] holds the key number in table) + key_tree SEL_ARG tree being examined. + min_key Buffer with partial min key value tuple + min_key_flag + max_key Buffer with partial max key value tuple + max_key_flag + + NOTES + The function does the recursive descent on the tree via SEL_ARG::left, + SEL_ARG::right, and SEL_ARG::next_key_part edges. The #rows estimates + are calculated using records_in_range calls at the leaf nodes and then + summed. + + param->min_key and param->max_key are used to hold prefixes of key value + tuples. + + The side effects are: + + param->max_key_part is updated to hold the maximum number of key parts used + in scan minus 1. + + param->range_count is incremented if the function finds a range that + wasn't counted by the caller. + + param->is_ror_scan is cleared if the function detects that the key scan is + not a Rowid-Ordered Retrieval scan ( see comments for is_key_scan_ror + function for description of which key scans are ROR scans) +*/ + static ha_rows check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, uint max_key_flag) { - ha_rows records=0,tmp; + ha_rows records=0, tmp; + uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length; + char *tmp_min_key, *tmp_max_key; + uint8 save_first_null_comp= param->first_null_comp; param->max_key_part=max(param->max_key_part,key_tree->part); if (key_tree->left != &null_element) { + /* + There are at least two intervals for current key part, i.e. condition + was converted to something like + (keyXpartY less/equals c1) OR (keyXpartY more/equals c2). + This is not a ROR scan if the key is not Clustered Primary Key. + */ + param->is_ror_scan= FALSE; records=check_quick_keys(param,idx,key_tree->left,min_key,min_key_flag, max_key,max_key_flag); if (records == HA_POS_ERROR) // Impossible return records; } - uint tmp_min_flag,tmp_max_flag,keynr; - char *tmp_min_key=min_key,*tmp_max_key=max_key; - + tmp_min_key= min_key; + tmp_max_key= max_key; key_tree->store(param->key[idx][key_tree->part].store_length, &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); - uint min_key_length= (uint) (tmp_min_key- param->min_key); - uint max_key_length= (uint) (tmp_max_key- param->max_key); + min_key_length= (uint) (tmp_min_key- param->min_key); + max_key_length= (uint) (tmp_max_key- param->max_key); + + if (param->is_ror_scan) + { + /* + If the index doesn't cover entire key, mark the scan as non-ROR scan. + Actually we're cutting off some ROR scans here. + */ + uint16 fieldnr= param->table->key_info[param->real_keynr[idx]]. + key_part[key_tree->part].fieldnr - 1; + if (param->table->field[fieldnr]->key_length() != + param->key[idx][key_tree->part].length) + param->is_ror_scan= FALSE; + } + + if (!param->first_null_comp && key_tree->is_null_interval()) + param->first_null_comp= key_tree->part+1; if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && @@ -2704,6 +5880,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, tmp_max_key, max_key_flag | key_tree->max_flag); goto end; // Ugly, but efficient } + else + { + /* The interval for current key part is not c1 <= keyXpartY <= c1 */ + param->is_ror_scan= FALSE; + } + tmp_min_flag=key_tree->min_flag; tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) @@ -2728,10 +5910,33 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, (param->table->key_info[keynr].flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME && min_key_length == max_key_length && - !memcmp(param->min_key,param->max_key,min_key_length)) + !memcmp(param->min_key,param->max_key,min_key_length) && + !param->first_null_comp) + { tmp=1; // Max one record + param->n_ranges++; + } else { + if (param->is_ror_scan) + { + /* + If we get here, the condition on the key was converted to form + "(keyXpart1 = c1) AND ... AND (keyXpart{key_tree->part - 1} = cN) AND + somecond(keyXpart{key_tree->part})" + Check if + somecond is "keyXpart{key_tree->part} = const" and + uncovered "tail" of KeyX parts is either empty or is identical to + first members of clustered primary key. + */ + if (!(min_key_length == max_key_length && + !memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) && + !key_tree->min_flag && !key_tree->max_flag && + is_key_scan_ror(param, keynr, key_tree->part + 1))) + param->is_ror_scan= FALSE; + } + param->n_ranges++; + if (tmp_min_flag & GEOM_FLAG) { key_range min_range; @@ -2768,32 +5973,128 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, records+=tmp; if (key_tree->right != &null_element) { + /* + There are at least two intervals for current key part, i.e. condition + was converted to something like + (keyXpartY less/equals c1) OR (keyXpartY more/equals c2). + This is not a ROR scan if the key is not Clustered Primary Key. + */ + param->is_ror_scan= FALSE; tmp=check_quick_keys(param,idx,key_tree->right,min_key,min_key_flag, max_key,max_key_flag); if (tmp == HA_POS_ERROR) return tmp; records+=tmp; } + param->first_null_comp= save_first_null_comp; return records; } -/**************************************************************************** -** change a tree to a structure to be used by quick_select -** This uses it's own malloc tree -****************************************************************************/ +/* + Check if key scan on given index with equality conditions on first n key + parts is a ROR scan. + + SYNOPSIS + is_key_scan_ror() + param Parameter from test_quick_select + keynr Number of key in the table. The key must not be a clustered + primary key. + nparts Number of first key parts for which equality conditions + are present. + + NOTES + ROR (Rowid Ordered Retrieval) key scan is a key scan that produces + ordered sequence of rowids (ha_xxx::cmp_ref is the comparison function) + + An index scan is a ROR scan if it is done using a condition in form + + "key1_1=c_1 AND ... AND key1_n=c_n" (1) + + where the index is defined on (key1_1, ..., key1_N [,a_1, ..., a_n]) + + and the table has a clustered Primary Key + + PRIMARY KEY(a_1, ..., a_n, b1, ..., b_k) with first key parts being + identical to uncovered parts ot the key being scanned (2) + + Scans on HASH indexes are not ROR scans, + any range scan on clustered primary key is ROR scan (3) + + Check (1) is made in check_quick_keys() + Check (3) is made check_quick_select() + Check (2) is made by this function. + + RETURN + TRUE If the scan is ROR-scan + FALSE otherwise +*/ + +static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) +{ + KEY *table_key= param->table->key_info + keynr; + KEY_PART_INFO *key_part= table_key->key_part + nparts; + KEY_PART_INFO *key_part_end= (table_key->key_part + + table_key->key_parts); + uint pk_number; + + if (key_part == key_part_end) + return TRUE; + pk_number= param->table->s->primary_key; + if (!param->table->file->primary_key_is_clustered() || pk_number == MAX_KEY) + return FALSE; + + KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part; + KEY_PART_INFO *pk_part_end= pk_part + + param->table->key_info[pk_number].key_parts; + for (;(key_part!=key_part_end) && (pk_part != pk_part_end); + ++key_part, ++pk_part) + { + if ((key_part->field != pk_part->field) || + (key_part->length != pk_part->length)) + return FALSE; + } + return (key_part == key_part_end); +} + + +/* + Create a QUICK_RANGE_SELECT from given key and SEL_ARG tree for that key. -static QUICK_SELECT * -get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) + SYNOPSIS + get_quick_select() + param + idx Index of used key in param->key. + key_tree SEL_ARG tree for the used key + parent_alloc If not NULL, use it to allocate memory for + quick select data. Otherwise use quick->alloc. + NOTES + The caller must call QUICK_SELECT::init for returned quick select + + CAUTION! This function may change thd->mem_root to a MEM_ROOT which will be + deallocated when the returned quick select is deleted. + + RETURN + NULL on error + otherwise created quick select +*/ + +QUICK_RANGE_SELECT * +get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree, + MEM_ROOT *parent_alloc) { - QUICK_SELECT *quick; + QUICK_RANGE_SELECT *quick; DBUG_ENTER("get_quick_select"); if (param->table->key_info[param->real_keynr[idx]].flags & HA_SPATIAL) - quick=new QUICK_SELECT_GEOM(param->thd, param->table, param->real_keynr[idx], - 0); + quick=new QUICK_RANGE_SELECT_GEOM(param->thd, param->table, + param->real_keynr[idx], + test(parent_alloc), + parent_alloc); else - quick=new QUICK_SELECT(param->thd, param->table, param->real_keynr[idx]); + quick=new QUICK_RANGE_SELECT(param->thd, param->table, + param->real_keynr[idx], + test(parent_alloc)); if (quick) { @@ -2807,9 +6108,10 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) else { quick->key_parts=(KEY_PART*) - memdup_root(&quick->alloc,(char*) param->key[idx], - sizeof(KEY_PART)* - param->table->key_info[param->real_keynr[idx]].key_parts); + memdup_root(parent_alloc? parent_alloc : &quick->alloc, + (char*) param->key[idx], + sizeof(KEY_PART)* + param->table->key_info[param->real_keynr[idx]].key_parts); } } DBUG_RETURN(quick); @@ -2819,9 +6121,8 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) /* ** Fix this to get all possible sub_ranges */ - -static bool -get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, +bool +get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, char *max_key, uint max_key_flag) { @@ -2917,7 +6218,8 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, set_if_bigger(quick->max_used_key_length,range->min_length); set_if_bigger(quick->max_used_key_length,range->max_length); set_if_bigger(quick->used_key_parts, (uint) key_tree->part+1); - quick->ranges.push_back(range); + if (insert_dynamic(&quick->ranges, (gptr)&range)) + return 1; end: if (key_tree->right != &null_element) @@ -2931,12 +6233,12 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, Return 1 if there is only one range and this uses the whole primary key */ -bool QUICK_SELECT::unique_key_range() +bool QUICK_RANGE_SELECT::unique_key_range() { if (ranges.elements == 1) { - QUICK_RANGE *tmp; - if (((tmp=ranges.head())->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) + QUICK_RANGE *tmp= *((QUICK_RANGE**)ranges.buffer); + if ((tmp->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) { KEY *key=head->key_info+index; return ((key->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME && @@ -2947,11 +6249,11 @@ bool QUICK_SELECT::unique_key_range() } -/* Returns true if any part of the key is NULL */ +/* Returns TRUE if any part of the key is NULL */ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) { - for (const char *end=key+length ; + for (const char *end=key+length ; key < end; key+= key_part++->store_length) { @@ -2962,31 +6264,97 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) } -/**************************************************************************** - Create a QUICK RANGE based on a key -****************************************************************************/ +bool QUICK_SELECT_I::is_keys_used(List<Item> *fields) +{ + return is_key_used(head, index, *fields); +} -QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) +bool QUICK_INDEX_MERGE_SELECT::is_keys_used(List<Item> *fields) { - MEM_ROOT *old_root= thd->mem_root; - /* The following call may change thd->mem_root */ - QUICK_SELECT *quick= new QUICK_SELECT(thd, table, ref->key); + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (is_key_used(head, quick->index, *fields)) + return 1; + } + return 0; +} + +bool QUICK_ROR_INTERSECT_SELECT::is_keys_used(List<Item> *fields) +{ + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (is_key_used(head, quick->index, *fields)) + return 1; + } + return 0; +} + +bool QUICK_ROR_UNION_SELECT::is_keys_used(List<Item> *fields) +{ + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->is_keys_used(fields)) + return 1; + } + return 0; +} + + +/* + Create quick select from ref/ref_or_null scan. + + SYNOPSIS + get_quick_select_for_ref() + thd Thread handle + table Table to access + ref ref[_or_null] scan parameters + records Estimate of number of records (needed only to construct + quick select) + NOTES + This allocates things in a new memory root, as this may be called many + times during a query. + + RETURN + Quick select that retrieves the same rows as passed ref scan + NULL on error. +*/ + +QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + TABLE_REF *ref, ha_rows records) +{ + MEM_ROOT *old_root, *alloc; + QUICK_RANGE_SELECT *quick; KEY *key_info = &table->key_info[ref->key]; KEY_PART *key_part; QUICK_RANGE *range; uint part; + old_root= thd->mem_root; + /* The following call may change thd->mem_root */ + quick= new QUICK_RANGE_SELECT(thd, table, ref->key, 0); + /* save mem_root set by QUICK_RANGE_SELECT constructor */ + alloc= thd->mem_root; + /* + return back default mem_root (thd->mem_root) changed by + QUICK_RANGE_SELECT constructor + */ + thd->mem_root= old_root; + if (!quick) return 0; /* no ranges found */ - if (cp_buffer_from_ref(thd, ref)) - { - if (thd->is_fatal_error) - goto err; // out of memory - goto ok; // empty range - } + if (quick->init()) + goto err; + quick->records= records; - if (!(range= new QUICK_RANGE())) - goto err; // out of memory + if (cp_buffer_from_ref(thd,ref) && thd->is_fatal_error || + !(range= new(alloc) QUICK_RANGE())) + goto err; // out of memory range->min_key=range->max_key=(char*) ref->key_buff; range->min_length=range->max_length=ref->key_length; @@ -3005,12 +6373,12 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) key_part->length= key_info->key_part[part].length; key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; - key_part->flag= key_info->key_part[part].key_part_flag; + key_part->flag= (uint8) key_info->key_part[part].key_part_flag; } - if (quick->ranges.push_back(range)) + if (insert_dynamic(&quick->ranges,(gptr)&range)) goto err; - /* + /* Add a NULL range if REF_OR_NULL optimization is used. For example: if we have "WHERE A=2 OR A IS NULL" we created the (A=2) range above @@ -3021,115 +6389,638 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) QUICK_RANGE *null_range; *ref->null_ref_key= 1; // Set null byte then create a range - if (!(null_range= new QUICK_RANGE((char*)ref->key_buff, ref->key_length, - (char*)ref->key_buff, ref->key_length, - EQ_RANGE))) + if (!(null_range= new (alloc) QUICK_RANGE((char*)ref->key_buff, + ref->key_length, + (char*)ref->key_buff, + ref->key_length, + EQ_RANGE))) goto err; *ref->null_ref_key= 0; // Clear null byte - if (quick->ranges.push_back(null_range)) + if (insert_dynamic(&quick->ranges,(gptr)&null_range)) goto err; } -ok: - thd->mem_root= old_root; return quick; err: - thd->mem_root= old_root; delete quick; return 0; } - /* get next possible record using quick-struct */ -int QUICK_SELECT::get_next() +/* + Perform key scans for all used indexes (except CPK), get rowids and merge + them into an ordered non-recurrent sequence of rowids. + + The merge/duplicate removal is performed using Unique class. We put all + rowids into Unique, get the sorted sequence and destroy the Unique. + + If table has a clustered primary key that covers all rows (TRUE for bdb + and innodb currently) and one of the index_merge scans is a scan on PK, + then + rows that will be retrieved by PK scan are not put into Unique and + primary key scan is not performed here, it is performed later separately. + + RETURN + 0 OK + other error +*/ + +int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() { - DBUG_ENTER("get_next"); + List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it(quick_selects); + QUICK_RANGE_SELECT* cur_quick; + int result; + Unique *unique; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique"); + /* We're going to just read rowids. */ + if (head->file->extra(HA_EXTRA_KEYREAD)) + DBUG_RETURN(1); + + /* + Make innodb retrieve all PK member fields, so + * ha_innobase::position (which uses them) call works. + * We can filter out rows that will be retrieved by clustered PK. + (This also creates a deficiency - it is possible that we will retrieve + parts of key that are not used by current query at all.) + */ + if (head->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY)) + DBUG_RETURN(1); + + cur_quick_it.rewind(); + cur_quick= cur_quick_it++; + DBUG_ASSERT(cur_quick != 0); + + /* + We reuse the same instance of handler so we need to call both init and + reset here. + */ + if (cur_quick->init() || cur_quick->reset()) + DBUG_RETURN(1); + + unique= new Unique(refpos_order_cmp, (void *)head->file, + head->file->ref_length, + thd->variables.sortbuff_size); + if (!unique) + DBUG_RETURN(1); for (;;) { - int result; - key_range start_key, end_key; - if (range) + while ((result= cur_quick->get_next()) == HA_ERR_END_OF_FILE) + { + cur_quick->range_end(); + cur_quick= cur_quick_it++; + if (!cur_quick) + break; + + if (cur_quick->file->inited != handler::NONE) + cur_quick->file->ha_index_end(); + if (cur_quick->init() || cur_quick->reset()) + DBUG_RETURN(1); + } + + if (result) { - // Already read through key - result= file->read_range_next(); if (result != HA_ERR_END_OF_FILE) + { + cur_quick->range_end(); + DBUG_RETURN(result); + } + break; + } + + if (thd->killed) + DBUG_RETURN(1); + + /* skip row if it will be retrieved by clustered PK scan */ + if (pk_quick_select && pk_quick_select->row_in_ranges()) + continue; + + cur_quick->file->position(cur_quick->record); + result= unique->unique_add((char*)cur_quick->file->ref); + if (result) + DBUG_RETURN(1); + + } + + /* ok, all row ids are in Unique */ + result= unique->get(head); + delete unique; + doing_pk_scan= FALSE; + /* start table scan */ + init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1); + /* index_merge currently doesn't support "using index" at all */ + head->file->extra(HA_EXTRA_NO_KEYREAD); + + DBUG_RETURN(result); +} + + +/* + Get next row for index_merge. + NOTES + The rows are read from + 1. rowids stored in Unique. + 2. QUICK_RANGE_SELECT with clustered primary key (if any). + The sets of rows retrieved in 1) and 2) are guaranteed to be disjoint. +*/ + +int QUICK_INDEX_MERGE_SELECT::get_next() +{ + int result; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::get_next"); + + if (doing_pk_scan) + DBUG_RETURN(pk_quick_select->get_next()); + + result= read_record.read_record(&read_record); + + if (result == -1) + { + result= HA_ERR_END_OF_FILE; + end_read_record(&read_record); + /* All rows from Unique have been retrieved, do a clustered PK scan */ + if (pk_quick_select) + { + doing_pk_scan= TRUE; + if ((result= pk_quick_select->init()) || (result= pk_quick_select->reset())) + DBUG_RETURN(result); + DBUG_RETURN(pk_quick_select->get_next()); + } + } + + DBUG_RETURN(result); +} + + +/* + Retrieve next record. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::get_next() + + NOTES + Invariant on enter/exit: all intersected selects have retrieved all index + records with rowid <= some_rowid_val and no intersected select has + retrieved any index records with rowid > some_rowid_val. + We start fresh and loop until we have retrieved the same rowid in each of + the key scans or we got an error. + + If a Clustered PK scan is present, it is used only to check if row + satisfies its condition (and never used for row retrieval). + + RETURN + 0 - Ok + other - Error code if any error occurred. +*/ + +int QUICK_ROR_INTERSECT_SELECT::get_next() +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + int error, cmp; + uint last_rowid_count=0; + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::get_next"); + + /* Get a rowid for first quick and save it as a 'candidate' */ + quick= quick_it++; + if (cpk_quick) + { + do { + error= quick->get_next(); + }while (!error && !cpk_quick->row_in_ranges()); + } + else + error= quick->get_next(); + + if (error) + DBUG_RETURN(error); + + quick->file->position(quick->record); + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + last_rowid_count= 1; + + while (last_rowid_count < quick_selects.elements) + { + if (!(quick= quick_it++)) + { + quick_it.rewind(); + quick= quick_it++; + } + + do { + if ((error= quick->get_next())) + DBUG_RETURN(error); + quick->file->position(quick->record); + cmp= head->file->cmp_ref(quick->file->ref, last_rowid); + } while (cmp < 0); + + /* Ok, current select 'caught up' and returned ref >= cur_ref */ + if (cmp > 0) + { + /* Found a row with ref > cur_ref. Make it a new 'candidate' */ + if (cpk_quick) + { + while (!cpk_quick->row_in_ranges()) + { + if ((error= quick->get_next())) + DBUG_RETURN(error); + } + } + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + last_rowid_count= 1; + } + else + { + /* current 'candidate' row confirmed by this select */ + last_rowid_count++; + } + } + + /* We get here iff we got the same row ref in all scans. */ + if (need_to_fetch_row) + error= head->file->rnd_pos(head->record[0], last_rowid); + DBUG_RETURN(error); +} + + +/* + Retrieve next record. + SYNOPSIS + QUICK_ROR_UNION_SELECT::get_next() + + NOTES + Enter/exit invariant: + For each quick select in the queue a {key,rowid} tuple has been + retrieved but the corresponding row hasn't been passed to output. + + RETURN + 0 - Ok + other - Error code if any error occurred. +*/ + +int QUICK_ROR_UNION_SELECT::get_next() +{ + int error, dup_row; + QUICK_SELECT_I *quick; + byte *tmp; + DBUG_ENTER("QUICK_ROR_UNION_SELECT::get_next"); + + do + { + if (!queue.elements) + DBUG_RETURN(HA_ERR_END_OF_FILE); + /* Ok, we have a queue with >= 1 scans */ + + quick= (QUICK_SELECT_I*)queue_top(&queue); + memcpy(cur_rowid, quick->last_rowid, rowid_length); + + /* put into queue rowid from the same stream as top element */ + if ((error= quick->get_next())) + { + if (error != HA_ERR_END_OF_FILE) + DBUG_RETURN(error); + queue_remove(&queue, 0); + } + else + { + quick->save_last_pos(); + queue_replaced(&queue); + } + + if (!have_prev_rowid) + { + /* No rows have been returned yet */ + dup_row= FALSE; + have_prev_rowid= TRUE; + } + else + dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid); + }while (dup_row); + + tmp= cur_rowid; + cur_rowid= prev_rowid; + prev_rowid= tmp; + + error= head->file->rnd_pos(quick->record, prev_rowid); + DBUG_RETURN(error); +} + +int QUICK_RANGE_SELECT::reset() +{ + uint mrange_bufsiz; + byte *mrange_buff; + DBUG_ENTER("QUICK_RANGE_SELECT::reset"); + next=0; + last_range= NULL; + in_range= FALSE; + cur_range= (QUICK_RANGE**) ranges.buffer; + + if (file->inited == handler::NONE && (error= file->ha_index_init(index))) + DBUG_RETURN(error); + + /* Do not allocate the buffers twice. */ + if (multi_range_length) + { + DBUG_ASSERT(multi_range_length == min(multi_range_count, ranges.elements)); + DBUG_RETURN(0); + } + + /* Allocate the ranges array. */ + DBUG_ASSERT(ranges.elements); + multi_range_length= min(multi_range_count, ranges.elements); + DBUG_ASSERT(multi_range_length > 0); + while (multi_range_length && ! (multi_range= (KEY_MULTI_RANGE*) + my_malloc(multi_range_length * + sizeof(KEY_MULTI_RANGE), + MYF(MY_WME)))) + { + /* Try to shrink the buffers until it is 0. */ + multi_range_length/= 2; + } + if (! multi_range) + { + multi_range_length= 0; + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + + /* Allocate the handler buffer if necessary. */ + if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER) + { + mrange_bufsiz= min(multi_range_bufsiz, + (QUICK_SELECT_I::records + 1)* head->s->reclength); + + while (mrange_bufsiz && + ! my_multi_malloc(MYF(MY_WME), + &multi_range_buff, sizeof(*multi_range_buff), + &mrange_buff, mrange_bufsiz, + NullS)) + { + /* Try to shrink the buffers until both are 0. */ + mrange_bufsiz/= 2; + } + if (! multi_range_buff) + { + my_free((char*) multi_range, MYF(0)); + multi_range= NULL; + multi_range_length= 0; + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + + /* Initialize the handler buffer. */ + multi_range_buff->buffer= mrange_buff; + multi_range_buff->buffer_end= mrange_buff + mrange_bufsiz; + multi_range_buff->end_of_used_area= mrange_buff; + } + DBUG_RETURN(0); +} + + +/* + Get next possible record using quick-struct. + + SYNOPSIS + QUICK_RANGE_SELECT::get_next() + + NOTES + Record is read into table->record[0] + + RETURN + 0 Found row + HA_ERR_END_OF_FILE No (more) rows in range + # Error code +*/ + +int QUICK_RANGE_SELECT::get_next() +{ + int result; + KEY_MULTI_RANGE *mrange; + key_range *start_key; + key_range *end_key; + DBUG_ENTER("QUICK_RANGE_SELECT::get_next"); + DBUG_ASSERT(multi_range_length && multi_range && + (cur_range >= (QUICK_RANGE**) ranges.buffer) && + (cur_range <= (QUICK_RANGE**) ranges.buffer + ranges.elements)); + + for (;;) + { + if (in_range) + { + /* We did already start to read this key. */ + result= file->read_multi_range_next(&mrange); + if (result != HA_ERR_END_OF_FILE) + { + in_range= ! result; DBUG_RETURN(result); + } } - if (!(range= it++)) - DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used + uint count= min(multi_range_length, ranges.elements - + (cur_range - (QUICK_RANGE**) ranges.buffer)); + if (count == 0) + { + /* Ranges have already been used up before. None is left for read. */ + in_range= FALSE; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + KEY_MULTI_RANGE *mrange_slot, *mrange_end; + for (mrange_slot= multi_range, mrange_end= mrange_slot+count; + mrange_slot < mrange_end; + mrange_slot++) + { + start_key= &mrange_slot->start_key; + end_key= &mrange_slot->end_key; + last_range= *(cur_range++); + + start_key->key= (const byte*) last_range->min_key; + start_key->length= last_range->min_length; + start_key->flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : + (last_range->flag & EQ_RANGE) ? + HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); + end_key->key= (const byte*) last_range->max_key; + end_key->length= last_range->max_length; + /* + We use HA_READ_AFTER_KEY here because if we are reading on a key + prefix. We want to find all keys with this prefix. + */ + end_key->flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : + HA_READ_AFTER_KEY); - start_key.key= (const byte*) range->min_key; - start_key.length= range->min_length; - start_key.flag= ((range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : - (range->flag & EQ_RANGE) ? + mrange_slot->range_flag= last_range->flag; + } + + result= file->read_multi_range_first(&mrange, multi_range, count, + sorted, multi_range_buff); + if (result != HA_ERR_END_OF_FILE) + { + in_range= ! result; + DBUG_RETURN(result); + } + in_range= FALSE; /* No matching rows; go to next set of ranges. */ + } +} + +/* + Get the next record with a different prefix. + + SYNOPSIS + QUICK_RANGE_SELECT::get_next_prefix() + prefix_length length of cur_prefix + cur_prefix prefix of a key to be searched for + + DESCRIPTION + Each subsequent call to the method retrieves the first record that has a + prefix with length prefix_length different from cur_prefix, such that the + record with the new prefix is within the ranges described by + this->ranges. The record found is stored into the buffer pointed by + this->record. + The method is useful for GROUP-BY queries with range conditions to + discover the prefix of the next group that satisfies the range conditions. + + TODO + This method is a modified copy of QUICK_RANGE_SELECT::get_next(), so both + methods should be unified into a more general one to reduce code + duplication. + + RETURN + 0 on success + HA_ERR_END_OF_FILE if returned all keys + other if some error occurred +*/ + +int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) +{ + DBUG_ENTER("QUICK_RANGE_SELECT::get_next_prefix"); + + for (;;) + { + int result; + key_range start_key, end_key; + if (last_range) + { + /* Read the next record in the same range with prefix after cur_prefix. */ + DBUG_ASSERT(cur_prefix != 0); + result= file->index_read(record, cur_prefix, prefix_length, + HA_READ_AFTER_KEY); + if (result || (file->compare_key(file->end_range) <= 0)) + DBUG_RETURN(result); + } + + uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer); + if (count == 0) + { + /* Ranges have already been used up before. None is left for read. */ + last_range= 0; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + last_range= *(cur_range++); + + start_key.key= (const byte*) last_range->min_key; + start_key.length= min(last_range->min_length, prefix_length); + start_key.flag= ((last_range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : + (last_range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); - end_key.key= (const byte*) range->max_key; - end_key.length= range->max_length; + end_key.key= (const byte*) last_range->max_key; + end_key.length= min(last_range->max_length, prefix_length); /* We use READ_AFTER_KEY here because if we are reading on a key prefix we want to find all keys with this prefix */ - end_key.flag= (range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : + end_key.flag= (last_range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); - result= file->read_range_first(range->min_length ? &start_key : 0, - range->max_length ? &end_key : 0, - test(range->flag & EQ_RANGE), + result= file->read_range_first(last_range->min_length ? &start_key : 0, + last_range->max_length ? &end_key : 0, + test(last_range->flag & EQ_RANGE), sorted); - if (range->flag == (UNIQUE_RANGE | EQ_RANGE)) - range=0; // Stop searching + if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE)) + last_range= 0; // Stop searching if (result != HA_ERR_END_OF_FILE) DBUG_RETURN(result); - range=0; // No matching rows; go to next range + last_range= 0; // No matching rows; go to next range } } -void QUICK_SELECT::reset(void) -{ - next= 0; - it.rewind(); - range= 0; - if (file->inited == handler::NONE) - file->ha_index_init(index); -} /* Get next for geometrical indexes */ -int QUICK_SELECT_GEOM::get_next() +int QUICK_RANGE_SELECT_GEOM::get_next() { - DBUG_ENTER(" QUICK_SELECT_GEOM::get_next"); + DBUG_ENTER("QUICK_RANGE_SELECT_GEOM::get_next"); for (;;) { int result; - if (range) + if (last_range) { // Already read through key - result= file->index_next_same(record, (byte*) range->min_key, - range->min_length); + result= file->index_next_same(record, (byte*) last_range->min_key, + last_range->min_length); if (result != HA_ERR_END_OF_FILE) DBUG_RETURN(result); } - if (!(range= it++)) - DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used + uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer); + if (count == 0) + { + /* Ranges have already been used up before. None is left for read. */ + last_range= 0; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + last_range= *(cur_range++); result= file->index_read(record, - (byte*) range->min_key, - range->min_length, - (ha_rkey_function)(range->flag ^ GEOM_FLAG)); + (byte*) last_range->min_key, + last_range->min_length, + (ha_rkey_function)(last_range->flag ^ GEOM_FLAG)); if (result != HA_ERR_KEY_NOT_FOUND) DBUG_RETURN(result); - range=0; // Not found, to next range + last_range= 0; // Not found, to next range } } /* + Check if current row will be retrieved by this QUICK_RANGE_SELECT + + NOTES + It is assumed that currently a scan is being done on another index + which reads all necessary parts of the index that is scanned by this + quick select. + The implementation does a binary search on sorted array of disjoint + ranges, without taking size of range into account. + + This function is used to filter out clustered PK scan rows in + index_merge quick select. + + RETURN + TRUE if current row will be retrieved by this quick select + FALSE if not +*/ + +bool QUICK_RANGE_SELECT::row_in_ranges() +{ + QUICK_RANGE *res; + uint min= 0; + uint max= ranges.elements - 1; + uint mid= (max + min)/2; + + while (min != max) + { + if (cmp_next(*(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid))) + { + /* current row value > mid->max */ + min= mid + 1; + } + else + max= mid; + mid= (min + max) / 2; + } + res= *(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid); + return (!cmp_next(res) && !cmp_prev(res)); +} + +/* This is a hack: we inherit from QUICK_SELECT so that we can use the get_next() interface, but we have to hold a pointer to the original QUICK_SELECT because its data are used all over the place. What @@ -3139,16 +7030,17 @@ int QUICK_SELECT_GEOM::get_next() for now, this seems to work right at least. */ -QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts) - : QUICK_SELECT(*q), rev_it(rev_ranges) +QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, + uint used_key_parts_arg) + : QUICK_RANGE_SELECT(*q), rev_it(rev_ranges) { QUICK_RANGE *r; - it.rewind(); - for (r = it++; r; r = it++) - { - rev_ranges.push_front(r); - } + QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer; + QUICK_RANGE **end_range= pr + ranges.elements; + for (; pr!=end_range; pr++) + rev_ranges.push_front(*pr); + /* Remove EQ_RANGE flag for keys that are not using the full key */ for (r = rev_it++; r; r = rev_it++) { @@ -3180,11 +7072,11 @@ int QUICK_SELECT_DESC::get_next() for (;;) { int result; - if (range) + if (last_range) { // Already read through key - result = ((range->flag & EQ_RANGE) - ? file->index_next_same(record, (byte*) range->min_key, - range->min_length) : + result = ((last_range->flag & EQ_RANGE) + ? file->index_next_same(record, (byte*) last_range->min_key, + last_range->min_length) : file->index_prev(record)); if (!result) { @@ -3195,56 +7087,99 @@ int QUICK_SELECT_DESC::get_next() DBUG_RETURN(result); } - if (!(range=rev_it++)) + if (!(last_range= rev_it++)) DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used - if (range->flag & NO_MAX_RANGE) // Read last record + if (last_range->flag & NO_MAX_RANGE) // Read last record { int local_error; if ((local_error=file->index_last(record))) DBUG_RETURN(local_error); // Empty table - if (cmp_prev(range) == 0) + if (cmp_prev(last_range) == 0) DBUG_RETURN(0); - range=0; // No matching records; go to next range + last_range= 0; // No match; go to next range continue; } - if (range->flag & EQ_RANGE) + if (last_range->flag & EQ_RANGE) { - result = file->index_read(record, (byte*) range->max_key, - range->max_length, HA_READ_KEY_EXACT); + result= file->index_read(record, (byte*) last_range->max_key, + last_range->max_length, HA_READ_KEY_EXACT); } else { - DBUG_ASSERT(range->flag & NEAR_MAX || range_reads_after_key(range)); - result=file->index_read(record, (byte*) range->max_key, - range->max_length, - ((range->flag & NEAR_MAX) ? - HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV)); + DBUG_ASSERT(last_range->flag & NEAR_MAX || + range_reads_after_key(last_range)); + result=file->index_read(record, (byte*) last_range->max_key, + last_range->max_length, + ((last_range->flag & NEAR_MAX) ? + HA_READ_BEFORE_KEY : + HA_READ_PREFIX_LAST_OR_PREV)); } if (result) { if (result != HA_ERR_KEY_NOT_FOUND) DBUG_RETURN(result); - range=0; // Not found, to next range + last_range= 0; // Not found, to next range continue; } - if (cmp_prev(range) == 0) + if (cmp_prev(last_range) == 0) { - if (range->flag == (UNIQUE_RANGE | EQ_RANGE)) - range = 0; // Stop searching + if (last_range->flag == (UNIQUE_RANGE | EQ_RANGE)) + last_range= 0; // Stop searching DBUG_RETURN(0); // Found key is in range } - range = 0; // To next range + last_range= 0; // To next range } } /* + Compare if found key is over max-value + Returns 0 if key <= range->max_key +*/ + +int QUICK_RANGE_SELECT::cmp_next(QUICK_RANGE *range_arg) +{ + if (range_arg->flag & NO_MAX_RANGE) + return 0; /* key can't be to large */ + + KEY_PART *key_part=key_parts; + uint store_length; + + for (char *key=range_arg->max_key, *end=key+range_arg->max_length; + key < end; + key+= store_length, key_part++) + { + int cmp; + store_length= key_part->store_length; + if (key_part->null_bit) + { + if (*key) + { + if (!key_part->field->is_null()) + return 1; + continue; + } + else if (key_part->field->is_null()) + return 0; + key++; // Skip null byte + store_length--; + } + if ((cmp=key_part->field->key_cmp((byte*) key, key_part->length)) < 0) + return 0; + if (cmp > 0) + return 1; + } + return (range_arg->flag & NEAR_MAX) ? 1 : 0; // Exact match +} + + +/* Returns 0 if found key is inside range (found key >= range->min_key). */ -int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range_arg) +int QUICK_RANGE_SELECT::cmp_prev(QUICK_RANGE *range_arg) { int cmp; if (range_arg->flag & NO_MIN_RANGE) @@ -3259,7 +7194,7 @@ int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range_arg) /* - * True if this range will require using HA_READ_AFTER_KEY + * TRUE if this range will require using HA_READ_AFTER_KEY See comment in get_next() about this */ @@ -3271,7 +7206,7 @@ bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range_arg) } -/* True if we are reading over a key that may have a NULL value */ +/* TRUE if we are reading over a key that may have a NULL value */ #ifdef NOT_USED bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, @@ -3317,10 +7252,2309 @@ bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, return 0; } #endif -void QUICK_SELECT_DESC::reset(void) -{ - rev_it.rewind(); - QUICK_SELECT::reset(); + + +void QUICK_RANGE_SELECT::add_info_string(String *str) +{ + KEY *key_info= head->key_info + index; + str->append(key_info->name); +} + +void QUICK_INDEX_MERGE_SELECT::add_info_string(String *str) +{ + QUICK_RANGE_SELECT *quick; + bool first= TRUE; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + str->append(STRING_WITH_LEN("sort_union(")); + while ((quick= it++)) + { + if (!first) + str->append(','); + else + first= FALSE; + quick->add_info_string(str); + } + if (pk_quick_select) + { + str->append(','); + pk_quick_select->add_info_string(str); + } + str->append(')'); +} + +void QUICK_ROR_INTERSECT_SELECT::add_info_string(String *str) +{ + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + str->append(STRING_WITH_LEN("intersect(")); + while ((quick= it++)) + { + KEY *key_info= head->key_info + quick->index; + if (!first) + str->append(','); + else + first= FALSE; + str->append(key_info->name); + } + if (cpk_quick) + { + KEY *key_info= head->key_info + cpk_quick->index; + str->append(','); + str->append(key_info->name); + } + str->append(')'); +} + +void QUICK_ROR_UNION_SELECT::add_info_string(String *str) +{ + bool first= TRUE; + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + str->append(STRING_WITH_LEN("union(")); + while ((quick= it++)) + { + if (!first) + str->append(','); + else + first= FALSE; + quick->add_info_string(str); + } + str->append(')'); +} + + +void QUICK_RANGE_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + KEY *key_info= head->key_info + index; + key_names->append(key_info->name); + length= longlong2str(max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); +} + +void QUICK_INDEX_MERGE_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (first) + first= FALSE; + else + { + key_names->append(','); + used_lengths->append(','); + } + + KEY *key_info= head->key_info + quick->index; + key_names->append(key_info->name); + length= longlong2str(quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); + } + if (pk_quick_select) + { + KEY *key_info= head->key_info + pk_quick_select->index; + key_names->append(','); + key_names->append(key_info->name); + length= longlong2str(pk_quick_select->max_used_key_length, buf, 10) - buf; + used_lengths->append(','); + used_lengths->append(buf, length); + } +} + +void QUICK_ROR_INTERSECT_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + KEY *key_info= head->key_info + quick->index; + if (first) + first= FALSE; + else + { + key_names->append(','); + used_lengths->append(','); + } + key_names->append(key_info->name); + length= longlong2str(quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); + } + + if (cpk_quick) + { + KEY *key_info= head->key_info + cpk_quick->index; + key_names->append(','); + key_names->append(key_info->name); + length= longlong2str(cpk_quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(','); + used_lengths->append(buf, length); + } +} + +void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + bool first= TRUE; + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (first) + first= FALSE; + else + { + used_lengths->append(','); + key_names->append(','); + } + quick->add_keys_and_lengths(key_names, used_lengths); + } +} + + +/******************************************************************************* +* Implementation of QUICK_GROUP_MIN_MAX_SELECT +*******************************************************************************/ + +static inline uint get_field_keypart(KEY *index, Field *field); +static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, + PARAM *param, uint *param_idx); +static bool +get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, + KEY_PART_INFO *first_non_group_part, + KEY_PART_INFO *min_max_arg_part, + KEY_PART_INFO *last_part, THD *thd, + byte *key_infix, uint *key_infix_len, + KEY_PART_INFO **first_non_infix_part); +static bool +check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, + Field::imagetype image_type); + +static void +cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, + uint group_key_parts, SEL_TREE *range_tree, + SEL_ARG *index_tree, ha_rows quick_prefix_records, + bool have_min, bool have_max, + double *read_cost, ha_rows *records); + + +/* + Test if this access method is applicable to a GROUP query with MIN/MAX + functions, and if so, construct a new TRP object. + + SYNOPSIS + get_best_group_min_max() + param Parameter from test_quick_select + sel_tree Range tree generated by get_mm_tree + + DESCRIPTION + Test whether a query can be computed via a QUICK_GROUP_MIN_MAX_SELECT. + Queries computable via a QUICK_GROUP_MIN_MAX_SELECT must satisfy the + following conditions: + A) Table T has at least one compound index I of the form: + I = <A_1, ...,A_k, [B_1,..., B_m], C, [D_1,...,D_n]> + B) Query conditions: + B0. Q is over a single table T. + B1. The attributes referenced by Q are a subset of the attributes of I. + B2. All attributes QA in Q can be divided into 3 overlapping groups: + - SA = {S_1, ..., S_l, [C]} - from the SELECT clause, where C is + referenced by any number of MIN and/or MAX functions if present. + - WA = {W_1, ..., W_p} - from the WHERE clause + - GA = <G_1, ..., G_k> - from the GROUP BY clause (if any) + = SA - if Q is a DISTINCT query (based on the + equivalence of DISTINCT and GROUP queries. + - NGA = QA - (GA union C) = {NG_1, ..., NG_m} - the ones not in + GROUP BY and not referenced by MIN/MAX functions. + with the following properties specified below. + B3. If Q has a GROUP BY WITH ROLLUP clause the access method is not + applicable. + + SA1. There is at most one attribute in SA referenced by any number of + MIN and/or MAX functions which, which if present, is denoted as C. + SA2. The position of the C attribute in the index is after the last A_k. + SA3. The attribute C can be referenced in the WHERE clause only in + predicates of the forms: + - (C {< | <= | > | >= | =} const) + - (const {< | <= | > | >= | =} C) + - (C between const_i and const_j) + - C IS NULL + - C IS NOT NULL + - C != const + SA4. If Q has a GROUP BY clause, there are no other aggregate functions + except MIN and MAX. For queries with DISTINCT, aggregate functions + are allowed. + SA5. The select list in DISTINCT queries should not contain expressions. + GA1. If Q has a GROUP BY clause, then GA is a prefix of I. That is, if + G_i = A_j => i = j. + GA2. If Q has a DISTINCT clause, then there is a permutation of SA that + forms a prefix of I. This permutation is used as the GROUP clause + when the DISTINCT query is converted to a GROUP query. + GA3. The attributes in GA may participate in arbitrary predicates, divided + into two groups: + - RNG(G_1,...,G_q ; where q <= k) is a range condition over the + attributes of a prefix of GA + - PA(G_i1,...G_iq) is an arbitrary predicate over an arbitrary subset + of GA. Since P is applied to only GROUP attributes it filters some + groups, and thus can be applied after the grouping. + GA4. There are no expressions among G_i, just direct column references. + NGA1.If in the index I there is a gap between the last GROUP attribute G_k, + and the MIN/MAX attribute C, then NGA must consist of exactly the index + attributes that constitute the gap. As a result there is a permutation + of NGA that coincides with the gap in the index <B_1, ..., B_m>. + NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of + equality conditions for all NG_i of the form (NG_i = const) or + (const = NG_i), such that each NG_i is referenced in exactly one + conjunct. Informally, the predicates provide constants to fill the + gap in the index. + WA1. There are no other attributes in the WHERE clause except the ones + referenced in predicates RNG, PA, PC, EQ defined above. Therefore + WA is subset of (GA union NGA union C) for GA,NGA,C that pass the above + tests. By transitivity then it also follows that each WA_i participates + in the index I (if this was already tested for GA, NGA and C). + + C) Overall query form: + SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)]) + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND EQ(B_1,...,B_m)] + [AND PC(C)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k + [HAVING PH(A_1, ..., B_1,..., C)] + where EXPR(...) is an arbitrary expression over some or all SELECT fields, + or: + SELECT DISTINCT A_i1,...,A_ik + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)]; + + NOTES + If the current query satisfies the conditions above, and if + (mem_root! = NULL), then the function constructs and returns a new TRP + object, that is later used to construct a new QUICK_GROUP_MIN_MAX_SELECT. + If (mem_root == NULL), then the function only tests whether the current + query satisfies the conditions above, and, if so, sets + is_applicable = TRUE. + + Queries with DISTINCT for which index access can be used are transformed + into equivalent group-by queries of the form: + + SELECT A_1,...,A_k FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k; + + The group-by list is a permutation of the select attributes, according + to their order in the index. + + TODO + - What happens if the query groups by the MIN/MAX field, and there is no + other field as in: "select min(a) from t1 group by a" ? + - We assume that the general correctness of the GROUP-BY query was checked + before this point. Is this correct, or do we have to check it completely? + - Lift the limitation in condition (B3), that is, make this access method + applicable to ROLLUP queries. + + RETURN + If mem_root != NULL + - valid TRP_GROUP_MIN_MAX object if this QUICK class can be used for + the query + - NULL o/w. + If mem_root == NULL + - NULL +*/ + +static TRP_GROUP_MIN_MAX * +get_best_group_min_max(PARAM *param, SEL_TREE *tree) +{ + THD *thd= param->thd; + JOIN *join= thd->lex->current_select->join; + TABLE *table= param->table; + bool have_min= FALSE; /* TRUE if there is a MIN function. */ + bool have_max= FALSE; /* TRUE if there is a MAX function. */ + Item_field *min_max_arg_item= NULL;/* The argument of all MIN/MAX functions.*/ + KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */ + uint group_prefix_len= 0; /* Length (in bytes) of the key prefix. */ + KEY *index_info= NULL; /* The index chosen for data access. */ + uint index= 0; /* The id of the chosen index. */ + uint group_key_parts= 0; /* Number of index key parts in the group prefix. */ + uint used_key_parts= 0; /* Number of index key parts used for access. */ + byte key_infix[MAX_KEY_LENGTH]; /* Constants from equality predicates.*/ + uint key_infix_len= 0; /* Length of key_infix. */ + TRP_GROUP_MIN_MAX *read_plan= NULL; /* The eventually constructed TRP. */ + uint key_part_nr; + ORDER *tmp_group; + Item *item; + Item_field *item_field; + DBUG_ENTER("get_best_group_min_max"); + + /* Perform few 'cheap' tests whether this access method is applicable. */ + if (!join) + DBUG_RETURN(NULL); /* This is not a select statement. */ + if ((join->tables != 1) || /* The query must reference one table. */ + ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */ + (!join->select_distinct)) || + (join->select_lex->olap == ROLLUP_TYPE)) /* Check (B3) for ROLLUP */ + DBUG_RETURN(NULL); + if (table->s->keys == 0) /* There are no indexes to use. */ + DBUG_RETURN(NULL); + + /* Analyze the query in more detail. */ + List_iterator<Item> select_items_it(join->fields_list); + + /* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/ + if (join->make_sum_func_list(join->all_fields, join->fields_list, 1)) + DBUG_RETURN(NULL); + if (join->sum_funcs[0]) + { + Item_sum *min_max_item; + Item_sum **func_ptr= join->sum_funcs; + while ((min_max_item= *(func_ptr++))) + { + if (min_max_item->sum_func() == Item_sum::MIN_FUNC) + have_min= TRUE; + else if (min_max_item->sum_func() == Item_sum::MAX_FUNC) + have_max= TRUE; + else + DBUG_RETURN(NULL); + + /* The argument of MIN/MAX. */ + Item *expr= min_max_item->args[0]->real_item(); + if (expr->type() == Item::FIELD_ITEM) /* Is it an attribute? */ + { + if (! min_max_arg_item) + min_max_arg_item= (Item_field*) expr; + else if (! min_max_arg_item->eq(expr, 1)) + DBUG_RETURN(NULL); + } + else + DBUG_RETURN(NULL); + } + } + + /* Check (SA5). */ + if (join->select_distinct) + { + while ((item= select_items_it++)) + { + if (item->type() != Item::FIELD_ITEM) + DBUG_RETURN(NULL); + } + } + + /* Check (GA4) - that there are no expressions among the group attributes. */ + for (tmp_group= join->group_list; tmp_group; tmp_group= tmp_group->next) + { + if ((*tmp_group->item)->type() != Item::FIELD_ITEM) + DBUG_RETURN(NULL); + } + + /* + Check that table has at least one compound index such that the conditions + (GA1,GA2) are all TRUE. If there is more than one such index, select the + first one. Here we set the variables: group_prefix_len and index_info. + */ + KEY *cur_index_info= table->key_info; + KEY *cur_index_info_end= cur_index_info + table->s->keys; + KEY_PART_INFO *cur_part= NULL; + KEY_PART_INFO *end_part; /* Last part for loops. */ + /* Last index part. */ + KEY_PART_INFO *last_part= NULL; + KEY_PART_INFO *first_non_group_part= NULL; + KEY_PART_INFO *first_non_infix_part= NULL; + uint key_infix_parts= 0; + uint cur_group_key_parts= 0; + uint cur_group_prefix_len= 0; + /* Cost-related variables for the best index so far. */ + double best_read_cost= DBL_MAX; + ha_rows best_records= 0; + SEL_ARG *best_index_tree= NULL; + ha_rows best_quick_prefix_records= 0; + uint best_param_idx= 0; + double cur_read_cost= DBL_MAX; + ha_rows cur_records; + SEL_ARG *cur_index_tree= NULL; + ha_rows cur_quick_prefix_records= 0; + uint cur_param_idx=MAX_KEY; + key_map cur_used_key_parts; + uint pk= param->table->s->primary_key; + + for (uint cur_index= 0 ; cur_index_info != cur_index_info_end ; + cur_index_info++, cur_index++) + { + /* Check (B1) - if current index is covering. */ + if (!table->used_keys.is_set(cur_index)) + goto next_index; + + /* + If the current storage manager is such that it appends the primary key to + each index, then the above condition is insufficient to check if the + index is covering. In such cases it may happen that some fields are + covered by the PK index, but not by the current index. Since we can't + use the concatenation of both indexes for index lookup, such an index + does not qualify as covering in our case. If this is the case, below + we check that all query fields are indeed covered by 'cur_index'. + */ + if (pk < MAX_KEY && cur_index != pk && + (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX)) + { + /* For each table field */ + for (uint i= 0; i < table->s->fields; i++) + { + Field *cur_field= table->field[i]; + /* + If the field is used in the current query, check that the + field is covered by some keypart of the current index. + */ + if (thd->query_id == cur_field->query_id) + { + KEY_PART_INFO *key_part= cur_index_info->key_part; + KEY_PART_INFO *key_part_end= key_part + cur_index_info->key_parts; + for (;;) + { + if (key_part->field == cur_field) + break; + if (++key_part == key_part_end) + goto next_index; // Field was not part of key + } + } + } + } + + /* + Check (GA1) for GROUP BY queries. + */ + if (join->group_list) + { + cur_part= cur_index_info->key_part; + end_part= cur_part + cur_index_info->key_parts; + /* Iterate in parallel over the GROUP list and the index parts. */ + for (tmp_group= join->group_list; tmp_group && (cur_part != end_part); + tmp_group= tmp_group->next, cur_part++) + { + /* + TODO: + tmp_group::item is an array of Item, is it OK to consider only the + first Item? If so, then why? What is the array for? + */ + /* Above we already checked that all group items are fields. */ + DBUG_ASSERT((*tmp_group->item)->type() == Item::FIELD_ITEM); + Item_field *group_field= (Item_field *) (*tmp_group->item); + if (group_field->field->eq(cur_part->field)) + { + cur_group_prefix_len+= cur_part->store_length; + ++cur_group_key_parts; + } + else + goto next_index; + } + } + /* + Check (GA2) if this is a DISTINCT query. + If GA2, then Store a new ORDER object in group_fields_array at the + position of the key part of item_field->field. Thus we get the ORDER + objects for each field ordered as the corresponding key parts. + Later group_fields_array of ORDER objects is used to convert the query + to a GROUP query. + */ + else if (join->select_distinct) + { + select_items_it.rewind(); + cur_used_key_parts.clear_all(); + uint max_key_part= 0; + while ((item= select_items_it++)) + { + item_field= (Item_field*) item; /* (SA5) already checked above. */ + /* Find the order of the key part in the index. */ + key_part_nr= get_field_keypart(cur_index_info, item_field->field); + /* + Check if this attribute was already present in the select list. + If it was present, then its corresponding key part was alredy used. + */ + if (cur_used_key_parts.is_set(key_part_nr)) + continue; + if (key_part_nr < 1 || key_part_nr > join->fields_list.elements) + goto next_index; + cur_part= cur_index_info->key_part + key_part_nr - 1; + cur_group_prefix_len+= cur_part->store_length; + cur_used_key_parts.set_bit(key_part_nr); + ++cur_group_key_parts; + max_key_part= max(max_key_part,key_part_nr); + } + /* + Check that used key parts forms a prefix of the index. + To check this we compare bits in all_parts and cur_parts. + all_parts have all bits set from 0 to (max_key_part-1). + cur_parts have bits set for only used keyparts. + */ + ulonglong all_parts, cur_parts; + all_parts= (1<<max_key_part) - 1; + cur_parts= cur_used_key_parts.to_ulonglong() >> 1; + if (all_parts != cur_parts) + goto next_index; + } + else + DBUG_ASSERT(FALSE); + + /* Check (SA2). */ + if (min_max_arg_item) + { + key_part_nr= get_field_keypart(cur_index_info, min_max_arg_item->field); + if (key_part_nr <= cur_group_key_parts) + goto next_index; + min_max_arg_part= cur_index_info->key_part + key_part_nr - 1; + } + + /* + Check (NGA1, NGA2) and extract a sequence of constants to be used as part + of all search keys. + */ + + /* + If there is MIN/MAX, each keypart between the last group part and the + MIN/MAX part must participate in one equality with constants, and all + keyparts after the MIN/MAX part must not be referenced in the query. + + If there is no MIN/MAX, the keyparts after the last group part can be + referenced only in equalities with constants, and the referenced keyparts + must form a sequence without any gaps that starts immediately after the + last group keypart. + */ + last_part= cur_index_info->key_part + cur_index_info->key_parts; + first_non_group_part= (cur_group_key_parts < cur_index_info->key_parts) ? + cur_index_info->key_part + cur_group_key_parts : + NULL; + first_non_infix_part= min_max_arg_part ? + (min_max_arg_part < last_part) ? + min_max_arg_part + 1 : + NULL : + NULL; + if (first_non_group_part && + (!min_max_arg_part || (min_max_arg_part - first_non_group_part > 0))) + { + if (tree) + { + uint dummy; + SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param, + &dummy); + if (!get_constant_key_infix(cur_index_info, index_range_tree, + first_non_group_part, min_max_arg_part, + last_part, thd, key_infix, &key_infix_len, + &first_non_infix_part)) + goto next_index; + } + else if (min_max_arg_part && + (min_max_arg_part - first_non_group_part > 0)) + { + /* + There is a gap but no range tree, thus no predicates at all for the + non-group keyparts. + */ + goto next_index; + } + else if (first_non_group_part && join->conds) + { + /* + If there is no MIN/MAX function in the query, but some index + key part is referenced in the WHERE clause, then this index + cannot be used because the WHERE condition over the keypart's + field cannot be 'pushed' to the index (because there is no + range 'tree'), and the WHERE clause must be evaluated before + GROUP BY/DISTINCT. + */ + /* + Store the first and last keyparts that need to be analyzed + into one array that can be passed as parameter. + */ + KEY_PART_INFO *key_part_range[2]; + key_part_range[0]= first_non_group_part; + key_part_range[1]= last_part; + + /* Check if cur_part is referenced in the WHERE clause. */ + if (join->conds->walk(&Item::find_item_in_field_list_processor, + (byte*) key_part_range)) + goto next_index; + } + } + + /* + Test (WA1) partially - that no other keypart after the last infix part is + referenced in the query. + */ + if (first_non_infix_part) + { + for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++) + { + if (cur_part->field->query_id == thd->query_id) + goto next_index; + } + } + + /* If we got to this point, cur_index_info passes the test. */ + key_infix_parts= key_infix_len ? + (first_non_infix_part - first_non_group_part) : 0; + used_key_parts= cur_group_key_parts + key_infix_parts; + + /* Compute the cost of using this index. */ + if (tree) + { + /* Find the SEL_ARG sub-tree that corresponds to the chosen index. */ + cur_index_tree= get_index_range_tree(cur_index, tree, param, + &cur_param_idx); + /* Check if this range tree can be used for prefix retrieval. */ + cur_quick_prefix_records= check_quick_select(param, cur_param_idx, + cur_index_tree); + } + cost_group_min_max(table, cur_index_info, used_key_parts, + cur_group_key_parts, tree, cur_index_tree, + cur_quick_prefix_records, have_min, have_max, + &cur_read_cost, &cur_records); + /* + If cur_read_cost is lower than best_read_cost use cur_index. + Do not compare doubles directly because they may have different + representations (64 vs. 80 bits). + */ + if (cur_read_cost < best_read_cost - (DBL_EPSILON * cur_read_cost)) + { + DBUG_ASSERT(tree != 0 || cur_param_idx == MAX_KEY); + index_info= cur_index_info; + index= cur_index; + best_read_cost= cur_read_cost; + best_records= cur_records; + best_index_tree= cur_index_tree; + best_quick_prefix_records= cur_quick_prefix_records; + best_param_idx= cur_param_idx; + group_key_parts= cur_group_key_parts; + group_prefix_len= cur_group_prefix_len; + } + + next_index: + cur_group_key_parts= 0; + cur_group_prefix_len= 0; + } + if (!index_info) /* No usable index found. */ + DBUG_RETURN(NULL); + + /* Check (SA3) for the where clause. */ + if (join->conds && min_max_arg_item && + !check_group_min_max_predicates(join->conds, min_max_arg_item, + (index_info->flags & HA_SPATIAL) ? + Field::itMBR : Field::itRAW)) + DBUG_RETURN(NULL); + + /* The query passes all tests, so construct a new TRP object. */ + read_plan= new (param->mem_root) + TRP_GROUP_MIN_MAX(have_min, have_max, min_max_arg_part, + group_prefix_len, used_key_parts, + group_key_parts, index_info, index, + key_infix_len, + (key_infix_len > 0) ? key_infix : NULL, + tree, best_index_tree, best_param_idx, + best_quick_prefix_records); + if (read_plan) + { + if (tree && read_plan->quick_prefix_records == 0) + DBUG_RETURN(NULL); + + read_plan->read_cost= best_read_cost; + read_plan->records= best_records; + + DBUG_PRINT("info", + ("Returning group min/max plan: cost: %g, records: %lu", + read_plan->read_cost, (ulong) read_plan->records)); + } + + DBUG_RETURN(read_plan); +} + + +/* + Check that the MIN/MAX attribute participates only in range predicates + with constants. + + SYNOPSIS + check_group_min_max_predicates() + cond tree (or subtree) describing all or part of the WHERE + clause being analyzed + min_max_arg_item the field referenced by the MIN/MAX function(s) + min_max_arg_part the keypart of the MIN/MAX argument if any + + DESCRIPTION + The function walks recursively over the cond tree representing a WHERE + clause, and checks condition (SA3) - if a field is referenced by a MIN/MAX + aggregate function, it is referenced only by one of the following + predicates: {=, !=, <, <=, >, >=, between, is null, is not null}. + + RETURN + TRUE if cond passes the test + FALSE o/w +*/ + +static bool +check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, + Field::imagetype image_type) +{ + DBUG_ENTER("check_group_min_max_predicates"); + DBUG_ASSERT(cond && min_max_arg_item); + + cond= cond->real_item(); + Item::Type cond_type= cond->type(); + if (cond_type == Item::COND_ITEM) /* 'AND' or 'OR' */ + { + DBUG_PRINT("info", ("Analyzing: %s", ((Item_func*) cond)->func_name())); + List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); + Item *and_or_arg; + while ((and_or_arg= li++)) + { + if (!check_group_min_max_predicates(and_or_arg, min_max_arg_item, + image_type)) + DBUG_RETURN(FALSE); + } + DBUG_RETURN(TRUE); + } + + /* + TODO: + This is a very crude fix to handle sub-selects in the WHERE clause + (Item_subselect objects). With the test below we rule out from the + optimization all queries with subselects in the WHERE clause. What has to + be done, is that here we should analyze whether the subselect references + the MIN/MAX argument field, and disallow the optimization only if this is + so. + */ + if (cond_type == Item::SUBSELECT_ITEM) + DBUG_RETURN(FALSE); + + /* We presume that at this point there are no other Items than functions. */ + DBUG_ASSERT(cond_type == Item::FUNC_ITEM); + + /* Test if cond references only group-by or non-group fields. */ + Item_func *pred= (Item_func*) cond; + Item **arguments= pred->arguments(); + Item *cur_arg; + DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); + for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++) + { + cur_arg= arguments[arg_idx]->real_item(); + DBUG_PRINT("info", ("cur_arg: %s", cur_arg->full_name())); + if (cur_arg->type() == Item::FIELD_ITEM) + { + if (min_max_arg_item->eq(cur_arg, 1)) + { + /* + If pred references the MIN/MAX argument, check whether pred is a range + condition that compares the MIN/MAX argument with a constant. + */ + Item_func::Functype pred_type= pred->functype(); + if (pred_type != Item_func::EQUAL_FUNC && + pred_type != Item_func::LT_FUNC && + pred_type != Item_func::LE_FUNC && + pred_type != Item_func::GT_FUNC && + pred_type != Item_func::GE_FUNC && + pred_type != Item_func::BETWEEN && + pred_type != Item_func::ISNULL_FUNC && + pred_type != Item_func::ISNOTNULL_FUNC && + pred_type != Item_func::EQ_FUNC && + pred_type != Item_func::NE_FUNC) + DBUG_RETURN(FALSE); + + /* Check that pred compares min_max_arg_item with a constant. */ + Item *args[3]; + bzero(args, 3 * sizeof(Item*)); + bool inv; + /* Test if this is a comparison of a field and a constant. */ + if (!simple_pred(pred, args, &inv)) + DBUG_RETURN(FALSE); + + /* Check for compatible string comparisons - similar to get_mm_leaf. */ + if (args[0] && args[1] && !args[2] && // this is a binary function + min_max_arg_item->result_type() == STRING_RESULT && + /* + Don't use an index when comparing strings of different collations. + */ + ((args[1]->result_type() == STRING_RESULT && + image_type == Field::itRAW && + ((Field_str*) min_max_arg_item->field)->charset() != + pred->compare_collation()) + || + /* + We can't always use indexes when comparing a string index to a + number. + */ + (args[1]->result_type() != STRING_RESULT && + min_max_arg_item->field->cmp_type() != args[1]->result_type()))) + DBUG_RETURN(FALSE); + } + } + else if (cur_arg->type() == Item::FUNC_ITEM) + { + if (!check_group_min_max_predicates(cur_arg, min_max_arg_item, + image_type)) + DBUG_RETURN(FALSE); + } + else if (cur_arg->const_item()) + { + DBUG_RETURN(TRUE); + } + else + DBUG_RETURN(FALSE); + } + + DBUG_RETURN(TRUE); +} + + +/* + Extract a sequence of constants from a conjunction of equality predicates. + + SYNOPSIS + get_constant_key_infix() + index_info [in] Descriptor of the chosen index. + index_range_tree [in] Range tree for the chosen index + first_non_group_part [in] First index part after group attribute parts + min_max_arg_part [in] The keypart of the MIN/MAX argument if any + last_part [in] Last keypart of the index + thd [in] Current thread + key_infix [out] Infix of constants to be used for index lookup + key_infix_len [out] Lenghth of the infix + first_non_infix_part [out] The first keypart after the infix (if any) + + DESCRIPTION + Test conditions (NGA1, NGA2) from get_best_group_min_max(). Namely, + for each keypart field NGF_i not in GROUP-BY, check that there is a + constant equality predicate among conds with the form (NGF_i = const_ci) or + (const_ci = NGF_i). + Thus all the NGF_i attributes must fill the 'gap' between the last group-by + attribute and the MIN/MAX attribute in the index (if present). If these + conditions hold, copy each constant from its corresponding predicate into + key_infix, in the order its NG_i attribute appears in the index, and update + key_infix_len with the total length of the key parts in key_infix. + + RETURN + TRUE if the index passes the test + FALSE o/w +*/ + +static bool +get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, + KEY_PART_INFO *first_non_group_part, + KEY_PART_INFO *min_max_arg_part, + KEY_PART_INFO *last_part, THD *thd, + byte *key_infix, uint *key_infix_len, + KEY_PART_INFO **first_non_infix_part) +{ + SEL_ARG *cur_range; + KEY_PART_INFO *cur_part; + /* End part for the first loop below. */ + KEY_PART_INFO *end_part= min_max_arg_part ? min_max_arg_part : last_part; + + *key_infix_len= 0; + byte *key_ptr= key_infix; + for (cur_part= first_non_group_part; cur_part != end_part; cur_part++) + { + /* + Find the range tree for the current keypart. We assume that + index_range_tree points to the leftmost keypart in the index. + */ + for (cur_range= index_range_tree; cur_range; + cur_range= cur_range->next_key_part) + { + if (cur_range->field->eq(cur_part->field)) + break; + } + if (!cur_range) + { + if (min_max_arg_part) + return FALSE; /* The current keypart has no range predicates at all. */ + else + { + *first_non_infix_part= cur_part; + return TRUE; + } + } + + /* Check that the current range tree is a single point interval. */ + if (cur_range->prev || cur_range->next) + return FALSE; /* This is not the only range predicate for the field. */ + if ((cur_range->min_flag & NO_MIN_RANGE) || + (cur_range->max_flag & NO_MAX_RANGE) || + (cur_range->min_flag & NEAR_MIN) || (cur_range->max_flag & NEAR_MAX)) + return FALSE; + + uint field_length= cur_part->store_length; + if ((cur_range->maybe_null && + cur_range->min_value[0] && cur_range->max_value[0]) + || + (memcmp(cur_range->min_value, cur_range->max_value, field_length) == 0)) + { /* cur_range specifies 'IS NULL' or an equality condition. */ + memcpy(key_ptr, cur_range->min_value, field_length); + key_ptr+= field_length; + *key_infix_len+= field_length; + } + else + return FALSE; + } + + if (!min_max_arg_part && (cur_part == last_part)) + *first_non_infix_part= last_part; + + return TRUE; +} + + +/* + Find the key part referenced by a field. + + SYNOPSIS + get_field_keypart() + index descriptor of an index + field field that possibly references some key part in index + + NOTES + The return value can be used to get a KEY_PART_INFO pointer by + part= index->key_part + get_field_keypart(...) - 1; + + RETURN + Positive number which is the consecutive number of the key part, or + 0 if field does not reference any index field. +*/ + +static inline uint +get_field_keypart(KEY *index, Field *field) +{ + KEY_PART_INFO *part, *end; + + for (part= index->key_part, end= part + index->key_parts; part < end; part++) + { + if (field->eq(part->field)) + return part - index->key_part + 1; + } + return 0; +} + + +/* + Find the SEL_ARG sub-tree that corresponds to the chosen index. + + SYNOPSIS + get_index_range_tree() + index [in] The ID of the index being looked for + range_tree[in] Tree of ranges being searched + param [in] PARAM from SQL_SELECT::test_quick_select + param_idx [out] Index in the array PARAM::key that corresponds to 'index' + + DESCRIPTION + + A SEL_TREE contains range trees for all usable indexes. This procedure + finds the SEL_ARG sub-tree for 'index'. The members of a SEL_TREE are + ordered in the same way as the members of PARAM::key, thus we first find + the corresponding index in the array PARAM::key. This index is returned + through the variable param_idx, to be used later as argument of + check_quick_select(). + + RETURN + Pointer to the SEL_ARG subtree that corresponds to index. +*/ + +SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, + uint *param_idx) +{ + uint idx= 0; /* Index nr in param->key_parts */ + while (idx < param->keys) + { + if (index == param->real_keynr[idx]) + break; + idx++; + } + *param_idx= idx; + return(range_tree->keys[idx]); +} + + +/* + Compute the cost of a quick_group_min_max_select for a particular index. + + SYNOPSIS + cost_group_min_max() + table [in] The table being accessed + index_info [in] The index used to access the table + used_key_parts [in] Number of key parts used to access the index + group_key_parts [in] Number of index key parts in the group prefix + range_tree [in] Tree of ranges for all indexes + index_tree [in] The range tree for the current index + quick_prefix_records [in] Number of records retrieved by the internally + used quick range select if any + have_min [in] True if there is a MIN function + have_max [in] True if there is a MAX function + read_cost [out] The cost to retrieve rows via this quick select + records [out] The number of rows retrieved + + DESCRIPTION + This method computes the access cost of a TRP_GROUP_MIN_MAX instance and + the number of rows returned. It updates this->read_cost and this->records. + + NOTES + The cost computation distinguishes several cases: + 1) No equality predicates over non-group attributes (thus no key_infix). + If groups are bigger than blocks on the average, then we assume that it + is very unlikely that block ends are aligned with group ends, thus even + if we look for both MIN and MAX keys, all pairs of neighbor MIN/MAX + keys, except for the first MIN and the last MAX keys, will be in the + same block. If groups are smaller than blocks, then we are going to + read all blocks. + 2) There are equality predicates over non-group attributes. + In this case the group prefix is extended by additional constants, and + as a result the min/max values are inside sub-groups of the original + groups. The number of blocks that will be read depends on whether the + ends of these sub-groups will be contained in the same or in different + blocks. We compute the probability for the two ends of a subgroup to be + in two different blocks as the ratio of: + - the number of positions of the left-end of a subgroup inside a group, + such that the right end of the subgroup is past the end of the buffer + containing the left-end, and + - the total number of possible positions for the left-end of the + subgroup, which is the number of keys in the containing group. + We assume it is very unlikely that two ends of subsequent subgroups are + in the same block. + 3) The are range predicates over the group attributes. + Then some groups may be filtered by the range predicates. We use the + selectivity of the range predicates to decide how many groups will be + filtered. + + TODO + - Take into account the optional range predicates over the MIN/MAX + argument. + - Check if we have a PK index and we use all cols - then each key is a + group, and it will be better to use an index scan. + + RETURN + None +*/ + +void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, + uint group_key_parts, SEL_TREE *range_tree, + SEL_ARG *index_tree, ha_rows quick_prefix_records, + bool have_min, bool have_max, + double *read_cost, ha_rows *records) +{ + uint table_records; + uint num_groups; + uint num_blocks; + uint keys_per_block; + uint keys_per_group; + uint keys_per_subgroup; /* Average number of keys in sub-groups */ + /* formed by a key infix. */ + double p_overlap; /* Probability that a sub-group overlaps two blocks. */ + double quick_prefix_selectivity; + double io_cost; + double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */ + DBUG_ENTER("cost_group_min_max"); + + table_records= table->file->records; + keys_per_block= (table->file->block_size / 2 / + (index_info->key_length + table->file->ref_length) + + 1); + num_blocks= (table_records / keys_per_block) + 1; + + /* Compute the number of keys in a group. */ + keys_per_group= index_info->rec_per_key[group_key_parts - 1]; + if (keys_per_group == 0) /* If there is no statistics try to guess */ + /* each group contains 10% of all records */ + keys_per_group= (table_records / 10) + 1; + num_groups= (table_records / keys_per_group) + 1; + + /* Apply the selectivity of the quick select for group prefixes. */ + if (range_tree && (quick_prefix_records != HA_POS_ERROR)) + { + quick_prefix_selectivity= (double) quick_prefix_records / + (double) table_records; + num_groups= (uint) rint(num_groups * quick_prefix_selectivity); + set_if_bigger(num_groups, 1); + } + + if (used_key_parts > group_key_parts) + { /* + Compute the probability that two ends of a subgroup are inside + different blocks. + */ + keys_per_subgroup= index_info->rec_per_key[used_key_parts - 1]; + if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */ + p_overlap= 1.0; /* a block, it will overlap at least two blocks. */ + else + { + double blocks_per_group= (double) num_blocks / (double) num_groups; + p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group; + p_overlap= min(p_overlap, 1.0); + } + io_cost= (double) min(num_groups * (1 + p_overlap), num_blocks); + } + else + io_cost= (keys_per_group > keys_per_block) ? + (have_min && have_max) ? (double) (num_groups + 1) : + (double) num_groups : + (double) num_blocks; + + /* + TODO: If there is no WHERE clause and no other expressions, there should be + no CPU cost. We leave it here to make this cost comparable to that of index + scan as computed in SQL_SELECT::test_quick_select(). + */ + cpu_cost= (double) num_groups / TIME_FOR_COMPARE; + + *read_cost= io_cost + cpu_cost; + *records= num_groups; + + DBUG_PRINT("info", + ("table rows: %u keys/block: %u keys/group: %u result rows: %lu blocks: %u", + table_records, keys_per_block, keys_per_group, (ulong) *records, + num_blocks)); + DBUG_VOID_RETURN; +} + + +/* + Construct a new quick select object for queries with group by with min/max. + + SYNOPSIS + TRP_GROUP_MIN_MAX::make_quick() + param Parameter from test_quick_select + retrieve_full_rows ignored + parent_alloc Memory pool to use, if any. + + NOTES + Make_quick ignores the retrieve_full_rows parameter because + QUICK_GROUP_MIN_MAX_SELECT always performs 'index only' scans. + The other parameter are ignored as well because all necessary + data to create the QUICK object is computed at this TRP creation + time. + + RETURN + New QUICK_GROUP_MIN_MAX_SELECT object if successfully created, + NULL o/w. +*/ + +QUICK_SELECT_I * +TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_GROUP_MIN_MAX_SELECT *quick; + DBUG_ENTER("TRP_GROUP_MIN_MAX::make_quick"); + + quick= new QUICK_GROUP_MIN_MAX_SELECT(param->table, + param->thd->lex->current_select->join, + have_min, have_max, min_max_arg_part, + group_prefix_len, used_key_parts, + index_info, index, read_cost, records, + key_infix_len, key_infix, + parent_alloc); + if (!quick) + DBUG_RETURN(NULL); + + if (quick->init()) + { + delete quick; + DBUG_RETURN(NULL); + } + + if (range_tree) + { + DBUG_ASSERT(quick_prefix_records > 0); + if (quick_prefix_records == HA_POS_ERROR) + quick->quick_prefix_select= NULL; /* Can't construct a quick select. */ + else + /* Make a QUICK_RANGE_SELECT to be used for group prefix retrieval. */ + quick->quick_prefix_select= get_quick_select(param, param_idx, + index_tree, + &quick->alloc); + + /* + Extract the SEL_ARG subtree that contains only ranges for the MIN/MAX + attribute, and create an array of QUICK_RANGES to be used by the + new quick select. + */ + if (min_max_arg_part) + { + SEL_ARG *min_max_range= index_tree; + while (min_max_range) /* Find the tree for the MIN/MAX key part. */ + { + if (min_max_range->field->eq(min_max_arg_part->field)) + break; + min_max_range= min_max_range->next_key_part; + } + /* Scroll to the leftmost interval for the MIN/MAX argument. */ + while (min_max_range && min_max_range->prev) + min_max_range= min_max_range->prev; + /* Create an array of QUICK_RANGEs for the MIN/MAX argument. */ + while (min_max_range) + { + if (quick->add_range(min_max_range)) + { + delete quick; + quick= NULL; + DBUG_RETURN(NULL); + } + min_max_range= min_max_range->next; + } + } + } + else + quick->quick_prefix_select= NULL; + + quick->update_key_stat(); + quick->adjust_prefix_ranges(); + + DBUG_RETURN(quick); +} + + +/* + Construct new quick select for group queries with min/max. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::QUICK_GROUP_MIN_MAX_SELECT() + table The table being accessed + join Descriptor of the current query + have_min TRUE if the query selects a MIN function + have_max TRUE if the query selects a MAX function + min_max_arg_part The only argument field of all MIN/MAX functions + group_prefix_len Length of all key parts in the group prefix + prefix_key_parts All key parts in the group prefix + index_info The index chosen for data access + use_index The id of index_info + read_cost Cost of this access method + records Number of records returned + key_infix_len Length of the key infix appended to the group prefix + key_infix Infix of constants from equality predicates + parent_alloc Memory pool for this and quick_prefix_select data + + RETURN + None +*/ + +QUICK_GROUP_MIN_MAX_SELECT:: +QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, + bool have_max_arg, + KEY_PART_INFO *min_max_arg_part_arg, + uint group_prefix_len_arg, + uint used_key_parts_arg, KEY *index_info_arg, + uint use_index, double read_cost_arg, + ha_rows records_arg, uint key_infix_len_arg, + byte *key_infix_arg, MEM_ROOT *parent_alloc) + :join(join_arg), index_info(index_info_arg), + group_prefix_len(group_prefix_len_arg), have_min(have_min_arg), + have_max(have_max_arg), seen_first_key(FALSE), + min_max_arg_part(min_max_arg_part_arg), key_infix(key_infix_arg), + key_infix_len(key_infix_len_arg), min_functions_it(NULL), + max_functions_it(NULL) +{ + head= table; + file= head->file; + index= use_index; + record= head->record[0]; + tmp_record= head->record[1]; + read_time= read_cost_arg; + records= records_arg; + used_key_parts= used_key_parts_arg; + real_prefix_len= group_prefix_len + key_infix_len; + group_prefix= NULL; + min_max_arg_len= min_max_arg_part ? min_max_arg_part->store_length : 0; + + /* + We can't have parent_alloc set as the init function can't handle this case + yet. + */ + DBUG_ASSERT(!parent_alloc); + if (!parent_alloc) + { + init_sql_alloc(&alloc, join->thd->variables.range_alloc_block_size, 0); + join->thd->mem_root= &alloc; + } + else + bzero(&alloc, sizeof(MEM_ROOT)); // ensure that it's not used +} + + +/* + Do post-constructor initialization. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::init() + + DESCRIPTION + The method performs initialization that cannot be done in the constructor + such as memory allocations that may fail. It allocates memory for the + group prefix and inifix buffers, and for the lists of MIN/MAX item to be + updated during execution. + + RETURN + 0 OK + other Error code +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::init() +{ + if (group_prefix) /* Already initialized. */ + return 0; + + if (!(last_prefix= (byte*) alloc_root(&alloc, group_prefix_len))) + return 1; + /* + We may use group_prefix to store keys with all select fields, so allocate + enough space for it. + */ + if (!(group_prefix= (byte*) alloc_root(&alloc, + real_prefix_len + min_max_arg_len))) + return 1; + + if (key_infix_len > 0) + { + /* + The memory location pointed to by key_infix will be deleted soon, so + allocate a new buffer and copy the key_infix into it. + */ + byte *tmp_key_infix= (byte*) alloc_root(&alloc, key_infix_len); + if (!tmp_key_infix) + return 1; + memcpy(tmp_key_infix, this->key_infix, key_infix_len); + this->key_infix= tmp_key_infix; + } + + if (min_max_arg_part) + { + if (my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16)) + return 1; + + if (have_min) + { + if (!(min_functions= new List<Item_sum>)) + return 1; + } + else + min_functions= NULL; + if (have_max) + { + if (!(max_functions= new List<Item_sum>)) + return 1; + } + else + max_functions= NULL; + + Item_sum *min_max_item; + Item_sum **func_ptr= join->sum_funcs; + while ((min_max_item= *(func_ptr++))) + { + if (have_min && (min_max_item->sum_func() == Item_sum::MIN_FUNC)) + min_functions->push_back(min_max_item); + else if (have_max && (min_max_item->sum_func() == Item_sum::MAX_FUNC)) + max_functions->push_back(min_max_item); + } + + if (have_min) + { + if (!(min_functions_it= new List_iterator<Item_sum>(*min_functions))) + return 1; + } + + if (have_max) + { + if (!(max_functions_it= new List_iterator<Item_sum>(*max_functions))) + return 1; + } + } + else + min_max_ranges.elements= 0; + + return 0; +} + + +QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT() +{ + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT"); + if (file->inited != handler::NONE) + file->ha_index_end(); + if (min_max_arg_part) + delete_dynamic(&min_max_ranges); + free_root(&alloc,MYF(0)); + delete min_functions_it; + delete max_functions_it; + delete quick_prefix_select; + DBUG_VOID_RETURN; +} + + +/* + Eventually create and add a new quick range object. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::add_range() + sel_range Range object from which a + + NOTES + Construct a new QUICK_RANGE object from a SEL_ARG object, and + add it to the array min_max_ranges. If sel_arg is an infinite + range, e.g. (x < 5 or x > 4), then skip it and do not construct + a quick range. + + RETURN + FALSE on success + TRUE otherwise +*/ + +bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range) +{ + QUICK_RANGE *range; + uint range_flag= sel_range->min_flag | sel_range->max_flag; + + /* Skip (-inf,+inf) ranges, e.g. (x < 5 or x > 4). */ + if ((range_flag & NO_MIN_RANGE) && (range_flag & NO_MAX_RANGE)) + return FALSE; + + if (!(sel_range->min_flag & NO_MIN_RANGE) && + !(sel_range->max_flag & NO_MAX_RANGE)) + { + if (sel_range->maybe_null && + sel_range->min_value[0] && sel_range->max_value[0]) + range_flag|= NULL_RANGE; /* IS NULL condition */ + else if (memcmp(sel_range->min_value, sel_range->max_value, + min_max_arg_len) == 0) + range_flag|= EQ_RANGE; /* equality condition */ + } + range= new QUICK_RANGE(sel_range->min_value, min_max_arg_len, + sel_range->max_value, min_max_arg_len, + range_flag); + if (!range) + return TRUE; + if (insert_dynamic(&min_max_ranges, (gptr)&range)) + return TRUE; + return FALSE; +} + + +/* + Opens the ranges if there are more conditions in quick_prefix_select than + the ones used for jumping through the prefixes. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::adjust_prefix_ranges() + + NOTES + quick_prefix_select is made over the conditions on the whole key. + It defines a number of ranges of length x. + However when jumping through the prefixes we use only the the first + few most significant keyparts in the range key. However if there + are more keyparts to follow the ones we are using we must make the + condition on the key inclusive (because x < "ab" means + x[0] < 'a' OR (x[0] == 'a' AND x[1] < 'b'). + To achive the above we must turn off the NEAR_MIN/NEAR_MAX +*/ +void QUICK_GROUP_MIN_MAX_SELECT::adjust_prefix_ranges () +{ + if (quick_prefix_select && + group_prefix_len < quick_prefix_select->max_used_key_length) + { + DYNAMIC_ARRAY *arr; + uint inx; + + for (inx= 0, arr= &quick_prefix_select->ranges; inx < arr->elements; inx++) + { + QUICK_RANGE *range; + + get_dynamic(arr, (gptr)&range, inx); + range->flag &= ~(NEAR_MIN | NEAR_MAX); + } + } +} + + +/* + Determine the total number and length of the keys that will be used for + index lookup. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_key_stat() + + DESCRIPTION + The total length of the keys used for index lookup depends on whether + there are any predicates referencing the min/max argument, and/or if + the min/max argument field can be NULL. + This function does an optimistic analysis whether the search key might + be extended by a constant for the min/max keypart. It is 'optimistic' + because during actual execution it may happen that a particular range + is skipped, and then a shorter key will be used. However this is data + dependent and can't be easily estimated here. + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_key_stat() +{ + max_used_key_length= real_prefix_len; + if (min_max_ranges.elements > 0) + { + QUICK_RANGE *cur_range; + if (have_min) + { /* Check if the right-most range has a lower boundary. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, + min_max_ranges.elements - 1); + if (!(cur_range->flag & NO_MIN_RANGE)) + { + max_used_key_length+= min_max_arg_len; + used_key_parts++; + return; + } + } + if (have_max) + { /* Check if the left-most range has an upper boundary. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, 0); + if (!(cur_range->flag & NO_MAX_RANGE)) + { + max_used_key_length+= min_max_arg_len; + used_key_parts++; + return; + } + } + } + else if (have_min && min_max_arg_part && + min_max_arg_part->field->real_maybe_null()) + { + /* + If a MIN/MAX argument value is NULL, we can quickly determine + that we're in the beginning of the next group, because NULLs + are always < any other value. This allows us to quickly + determine the end of the current group and jump to the next + group (see next_min()) and thus effectively increases the + usable key length. + */ + max_used_key_length+= min_max_arg_len; + used_key_parts++; + } +} + + +/* + Initialize a quick group min/max select for key retrieval. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::reset() + + DESCRIPTION + Initialize the index chosen for access and find and store the prefix + of the last group. The method is expensive since it performs disk access. + + RETURN + 0 OK + other Error code +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::reset(void) +{ + int result; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset"); + + file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */ + if ((result= file->ha_index_init(index))) + DBUG_RETURN(result); + if (quick_prefix_select && quick_prefix_select->reset()) + DBUG_RETURN(1); + result= file->index_last(record); + if (result == HA_ERR_END_OF_FILE) + DBUG_RETURN(0); + /* Save the prefix of the last group. */ + key_copy(last_prefix, record, index_info, group_prefix_len); + + DBUG_RETURN(0); +} + + + +/* + Get the next key containing the MIN and/or MAX key for the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::get_next() + + DESCRIPTION + The method finds the next subsequent group of records that satisfies the + query conditions and finds the keys that contain the MIN/MAX values for + the key part referenced by the MIN/MAX function(s). Once a group and its + MIN/MAX values are found, store these values in the Item_sum objects for + the MIN/MAX functions. The rest of the values in the result row are stored + in the Item_field::result_field of each select field. If the query does + not contain MIN and/or MAX functions, then the function only finds the + group prefix, which is a query answer itself. + + NOTES + If both MIN and MAX are computed, then we use the fact that if there is + no MIN key, there can't be a MAX key as well, so we can skip looking + for a MAX key in this case. + + RETURN + 0 on success + HA_ERR_END_OF_FILE if returned all keys + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::get_next() +{ + int min_res= 0; + int max_res= 0; +#ifdef HPUX11 + /* + volatile is required by a bug in the HP compiler due to which the + last test of result fails. + */ + volatile int result; +#else + int result; +#endif + int is_last_prefix= 0; + + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::get_next"); + + /* + Loop until a group is found that satisfies all query conditions or the last + group is reached. + */ + do + { + result= next_prefix(); + /* + Check if this is the last group prefix. Notice that at this point + this->record contains the current prefix in record format. + */ + if (!result) + { + is_last_prefix= key_cmp(index_info->key_part, last_prefix, + group_prefix_len); + DBUG_ASSERT(is_last_prefix <= 0); + } + else + { + if (result == HA_ERR_KEY_NOT_FOUND) + continue; + break; + } + + if (have_min) + { + min_res= next_min(); + if (min_res == 0) + update_min_result(); + } + /* If there is no MIN in the group, there is no MAX either. */ + if ((have_max && !have_min) || + (have_max && have_min && (min_res == 0))) + { + max_res= next_max(); + if (max_res == 0) + update_max_result(); + /* If a MIN was found, a MAX must have been found as well. */ + DBUG_ASSERT((have_max && !have_min) || + (have_max && have_min && (max_res == 0))); + } + /* + If this is just a GROUP BY or DISTINCT without MIN or MAX and there + are equality predicates for the key parts after the group, find the + first sub-group with the extended prefix. + */ + if (!have_min && !have_max && key_infix_len > 0) + result= file->index_read(record, group_prefix, real_prefix_len, + HA_READ_KEY_EXACT); + + result= have_min ? min_res : have_max ? max_res : result; + } + while (result == HA_ERR_KEY_NOT_FOUND && is_last_prefix != 0); + + if (result == 0) + /* + Partially mimic the behavior of end_select_send. Copy the + field data from Item_field::field into Item_field::result_field + of each non-aggregated field (the group fields, and optionally + other fields in non-ANSI SQL mode). + */ + copy_fields(&join->tmp_table_param); + else if (result == HA_ERR_KEY_NOT_FOUND) + result= HA_ERR_END_OF_FILE; + + DBUG_RETURN(result); +} + + +/* + Retrieve the minimal key in the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_min() + + DESCRIPTION + Find the minimal key within this group such that the key satisfies the query + conditions and NULL semantics. The found key is loaded into this->record. + + IMPLEMENTATION + Depending on the values of min_max_ranges.elements, key_infix_len, and + whether there is a NULL in the MIN field, this function may directly + return without any data access. In this case we use the key loaded into + this->record by the call to this->next_prefix() just before this call. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions. + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_min() +{ + int result= 0; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_min"); + + /* Find the MIN key using the eventually extended group prefix. */ + if (min_max_ranges.elements > 0) + { + if ((result= next_min_in_range())) + DBUG_RETURN(result); + } + else + { + /* Apply the constant equality conditions to the non-group select fields */ + if (key_infix_len > 0) + { + if ((result= file->index_read(record, group_prefix, real_prefix_len, + HA_READ_KEY_EXACT))) + DBUG_RETURN(result); + } + + /* + If the min/max argument field is NULL, skip subsequent rows in the same + group with NULL in it. Notice that: + - if the first row in a group doesn't have a NULL in the field, no row + in the same group has (because NULL < any other value), + - min_max_arg_part->field->ptr points to some place in 'record'. + */ + if (min_max_arg_part && min_max_arg_part->field->is_null()) + { + /* Find the first subsequent record without NULL in the MIN/MAX field. */ + key_copy(tmp_record, record, index_info, 0); + result= file->index_read(record, tmp_record, + real_prefix_len + min_max_arg_len, + HA_READ_AFTER_KEY); + /* + Check if the new record belongs to the current group by comparing its + prefix with the group's prefix. If it is from the next group, then the + whole group has NULLs in the MIN/MAX field, so use the first record in + the group as a result. + TODO: + It is possible to reuse this new record as the result candidate for the + next call to next_min(), and to save one lookup in the next call. For + this add a new member 'this->next_group_prefix'. + */ + if (!result) + { + if (key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + key_restore(record, tmp_record, index_info, 0); + } + else if (result == HA_ERR_KEY_NOT_FOUND) + result= 0; /* There is a result in any case. */ + } + } + + /* + If the MIN attribute is non-nullable, this->record already contains the + MIN key in the group, so just return. + */ + DBUG_RETURN(result); +} + + +/* + Retrieve the maximal key in the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_max() + + DESCRIPTION + Lookup the maximal key of the group, and store it into this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions. + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_max() +{ + int result; + + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_max"); + + /* Get the last key in the (possibly extended) group. */ + if (min_max_ranges.elements > 0) + result= next_max_in_range(); + else + result= file->index_read(record, group_prefix, real_prefix_len, + HA_READ_PREFIX_LAST); + DBUG_RETURN(result); +} + + +/* + Determine the prefix of the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_prefix() + + DESCRIPTION + Determine the prefix of the next group that satisfies the query conditions. + If there is a range condition referencing the group attributes, use a + QUICK_RANGE_SELECT object to retrieve the *first* key that satisfies the + condition. If there is a key infix of constants, append this infix + immediately after the group attributes. The possibly extended prefix is + stored in this->group_prefix. The first key of the found group is stored in + this->record, on which relies this->next_min(). + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the formed prefix + HA_ERR_END_OF_FILE if there are no more keys + other if some error occurred +*/ +int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() +{ + int result; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_prefix"); + + if (quick_prefix_select) + { + byte *cur_prefix= seen_first_key ? group_prefix : NULL; + if ((result= quick_prefix_select->get_next_prefix(group_prefix_len, + cur_prefix))) + DBUG_RETURN(result); + seen_first_key= TRUE; + } + else + { + if (!seen_first_key) + { + result= file->index_first(record); + if (result) + DBUG_RETURN(result); + seen_first_key= TRUE; + } + else + { + /* Load the first key in this group into record. */ + result= file->index_read(record, group_prefix, group_prefix_len, + HA_READ_AFTER_KEY); + if (result) + DBUG_RETURN(result); + } + } + + /* Save the prefix of this group for subsequent calls. */ + key_copy(group_prefix, record, index_info, group_prefix_len); + /* Append key_infix to group_prefix. */ + if (key_infix_len > 0) + memcpy(group_prefix + group_prefix_len, + key_infix, key_infix_len); + + DBUG_RETURN(0); +} + + +/* + Find the minimal key in a group that satisfies some range conditions for the + min/max argument field. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() + + DESCRIPTION + Given the sequence of ranges min_max_ranges, find the minimal key that is + in the left-most possible range. If there is no such key, then the current + group does not have a MIN key that satisfies the WHERE clause. If a key is + found, its value is stored in this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of + the ranges + other if some error +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() +{ + ha_rkey_function find_flag; + uint search_prefix_len; + QUICK_RANGE *cur_range; + bool found_null= FALSE; + int result= HA_ERR_KEY_NOT_FOUND; + + DBUG_ASSERT(min_max_ranges.elements > 0); + + for (uint range_idx= 0; range_idx < min_max_ranges.elements; range_idx++) + { /* Search from the left-most range to the right. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, range_idx); + + /* + If the current value for the min/max argument is bigger than the right + boundary of cur_range, there is no need to check this range. + */ + if (range_idx != 0 && !(cur_range->flag & NO_MAX_RANGE) && + (key_cmp(min_max_arg_part, (const byte*) cur_range->max_key, + min_max_arg_len) == 1)) + continue; + + if (cur_range->flag & NO_MIN_RANGE) + { + find_flag= HA_READ_KEY_EXACT; + search_prefix_len= real_prefix_len; + } + else + { + /* Extend the search key with the lower boundary for this range. */ + memcpy(group_prefix + real_prefix_len, cur_range->min_key, + cur_range->min_length); + search_prefix_len= real_prefix_len + min_max_arg_len; + find_flag= (cur_range->flag & (EQ_RANGE | NULL_RANGE)) ? + HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MIN) ? + HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT; + } + + result= file->index_read(record, group_prefix, search_prefix_len, + find_flag); + if ((result == HA_ERR_KEY_NOT_FOUND) && + (cur_range->flag & (EQ_RANGE | NULL_RANGE))) + continue; /* Check the next range. */ + else if (result) + { + /* + In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE, + HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this + range, it can't succeed for any other subsequent range. + */ + break; + } + + /* A key was found. */ + if (cur_range->flag & EQ_RANGE) + break; /* No need to perform the checks below for equal keys. */ + + if (cur_range->flag & NULL_RANGE) + { + /* + Remember this key, and continue looking for a non-NULL key that + satisfies some other condition. + */ + memcpy(tmp_record, record, head->s->rec_buff_length); + found_null= TRUE; + continue; + } + + /* Check if record belongs to the current group. */ + if (key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + + /* If there is an upper limit, check if the found key is in the range. */ + if ( !(cur_range->flag & NO_MAX_RANGE) ) + { + /* Compose the MAX key for the range. */ + byte *max_key= (byte*) my_alloca(real_prefix_len + min_max_arg_len); + memcpy(max_key, group_prefix, real_prefix_len); + memcpy(max_key + real_prefix_len, cur_range->max_key, + cur_range->max_length); + /* Compare the found key with max_key. */ + int cmp_res= key_cmp(index_info->key_part, max_key, + real_prefix_len + min_max_arg_len); + if (!((cur_range->flag & NEAR_MAX) && (cmp_res == -1) || + (cmp_res <= 0))) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + } + /* If we got to this point, the current key qualifies as MIN. */ + DBUG_ASSERT(result == 0); + break; + } + /* + If there was a key with NULL in the MIN/MAX field, and there was no other + key without NULL from the same group that satisfies some other condition, + then use the key with the NULL. + */ + if (found_null && result) + { + memcpy(record, tmp_record, head->s->rec_buff_length); + result= 0; + } + return result; +} + + +/* + Find the maximal key in a group that satisfies some range conditions for the + min/max argument field. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() + + DESCRIPTION + Given the sequence of ranges min_max_ranges, find the maximal key that is + in the right-most possible range. If there is no such key, then the current + group does not have a MAX key that satisfies the WHERE clause. If a key is + found, its value is stored in this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of + the ranges + other if some error +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() +{ + ha_rkey_function find_flag; + uint search_prefix_len; + QUICK_RANGE *cur_range; + int result; + + DBUG_ASSERT(min_max_ranges.elements > 0); + + for (uint range_idx= min_max_ranges.elements; range_idx > 0; range_idx--) + { /* Search from the right-most range to the left. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, range_idx - 1); + + /* + If the current value for the min/max argument is smaller than the left + boundary of cur_range, there is no need to check this range. + */ + if (range_idx != min_max_ranges.elements && + !(cur_range->flag & NO_MIN_RANGE) && + (key_cmp(min_max_arg_part, (const byte*) cur_range->min_key, + min_max_arg_len) == -1)) + continue; + + if (cur_range->flag & NO_MAX_RANGE) + { + find_flag= HA_READ_PREFIX_LAST; + search_prefix_len= real_prefix_len; + } + else + { + /* Extend the search key with the upper boundary for this range. */ + memcpy(group_prefix + real_prefix_len, cur_range->max_key, + cur_range->max_length); + search_prefix_len= real_prefix_len + min_max_arg_len; + find_flag= (cur_range->flag & EQ_RANGE) ? + HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MAX) ? + HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV; + } + + result= file->index_read(record, group_prefix, search_prefix_len, + find_flag); + + if ((result == HA_ERR_KEY_NOT_FOUND) && (cur_range->flag & EQ_RANGE)) + continue; /* Check the next range. */ + if (result) + { + /* + In no key was found with this upper bound, there certainly are no keys + in the ranges to the left. + */ + return result; + } + /* A key was found. */ + if (cur_range->flag & EQ_RANGE) + return 0; /* No need to perform the checks below for equal keys. */ + + /* Check if record belongs to the current group. */ + if (key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + continue; // Row not found + + /* If there is a lower limit, check if the found key is in the range. */ + if ( !(cur_range->flag & NO_MIN_RANGE) ) + { + /* Compose the MIN key for the range. */ + byte *min_key= (byte*) my_alloca(real_prefix_len + min_max_arg_len); + memcpy(min_key, group_prefix, real_prefix_len); + memcpy(min_key + real_prefix_len, cur_range->min_key, + cur_range->min_length); + /* Compare the found key with min_key. */ + int cmp_res= key_cmp(index_info->key_part, min_key, + real_prefix_len + min_max_arg_len); + if (!((cur_range->flag & NEAR_MIN) && (cmp_res == 1) || + (cmp_res >= 0))) + continue; + } + /* If we got to this point, the current key qualifies as MAX. */ + return result; + } + return HA_ERR_KEY_NOT_FOUND; +} + + +/* + Update all MIN function results with the newly found value. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_min_result() + + DESCRIPTION + The method iterates through all MIN functions and updates the result value + of each function by calling Item_sum::reset(), which in turn picks the new + result value from this->head->record[0], previously updated by + next_min(). The updated value is stored in a member variable of each of the + Item_sum objects, depending on the value type. + + IMPLEMENTATION + The update must be done separately for MIN and MAX, immediately after + next_min() was called and before next_max() is called, because both MIN and + MAX take their result value from the same buffer this->head->record[0] + (i.e. this->record). + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_min_result() +{ + Item_sum *min_func; + + min_functions_it->rewind(); + while ((min_func= (*min_functions_it)++)) + min_func->reset(); +} + + +/* + Update all MAX function results with the newly found value. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_max_result() + + DESCRIPTION + The method iterates through all MAX functions and updates the result value + of each function by calling Item_sum::reset(), which in turn picks the new + result value from this->head->record[0], previously updated by + next_max(). The updated value is stored in a member variable of each of the + Item_sum objects, depending on the value type. + + IMPLEMENTATION + The update must be done separately for MIN and MAX, immediately after + next_max() was called, because both MIN and MAX take their result value + from the same buffer this->head->record[0] (i.e. this->record). + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_max_result() +{ + Item_sum *max_func; + + max_functions_it->rewind(); + while ((max_func= (*max_functions_it)++)) + max_func->reset(); +} + + +/* + Append comma-separated list of keys this quick select uses to key_names; + append comma-separated list of corresponding used lengths to used_lengths. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths() + key_names [out] Names of used indexes + used_lengths [out] Corresponding lengths of the index names + + DESCRIPTION + This method is used by select_describe to extract the names of the + indexes used by a quick select. + +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + key_names->append(index_info->name); + length= longlong2str(max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); +} + + +#ifndef DBUG_OFF + +static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, + const char *msg) +{ + SEL_ARG **key,**end; + int idx; + char buff[1024]; + DBUG_ENTER("print_sel_tree"); + if (! _db_on_) + DBUG_VOID_RETURN; + + String tmp(buff,sizeof(buff),&my_charset_bin); + tmp.length(0); + for (idx= 0,key=tree->keys, end=key+param->keys ; + key != end ; + key++,idx++) + { + if (tree_map->is_set(idx)) + { + uint keynr= param->real_keynr[idx]; + if (tmp.length()) + tmp.append(','); + tmp.append(param->table->key_info[keynr].name); + } + } + if (!tmp.length()) + tmp.append(STRING_WITH_LEN("(empty)")); + + DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr())); + + DBUG_VOID_RETURN; +} + + +static void print_ror_scans_arr(TABLE *table, const char *msg, + struct st_ror_scan_info **start, + struct st_ror_scan_info **end) +{ + DBUG_ENTER("print_ror_scans"); + if (! _db_on_) + DBUG_VOID_RETURN; + + char buff[1024]; + String tmp(buff,sizeof(buff),&my_charset_bin); + tmp.length(0); + for (;start != end; start++) + { + if (tmp.length()) + tmp.append(','); + tmp.append(table->key_info[(*start)->keynr].name); + } + if (!tmp.length()) + tmp.append(STRING_WITH_LEN("(empty)")); + DBUG_PRINT("info", ("ROR key scans (%s): %s", msg, tmp.ptr())); + DBUG_VOID_RETURN; } /***************************************************************************** @@ -3330,8 +9564,6 @@ void QUICK_SELECT_DESC::reset(void) ** of locking the DEBUG stream ! *****************************************************************************/ -#ifndef DBUG_OFF - static void print_key(KEY_PART *key_part,const char *key,uint used_length) { @@ -3355,8 +9587,11 @@ print_key(KEY_PART *key_part,const char *key,uint used_length) key++; // Skip null byte store_length--; } - field->set_key_image((char*) key, key_part->length, field->charset()); - field->val_str(&tmp); + field->set_key_image((char*) key, key_part->length); + if (field->type() == MYSQL_TYPE_BIT) + (void) field->val_int_as_str(&tmp, 1); + else + field->val_str(&tmp); fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE); if (key+store_length < key_end) fputc('/',DBUG_FILE); @@ -3364,51 +9599,156 @@ print_key(KEY_PART *key_part,const char *key,uint used_length) } -static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg) +static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg) { - QUICK_RANGE *range; char buf[MAX_KEY/8+1]; - DBUG_ENTER("print_param"); + DBUG_ENTER("print_quick"); if (! _db_on_ || !quick) DBUG_VOID_RETURN; - - List_iterator<QUICK_RANGE> li(quick->ranges); DBUG_LOCK_FILE; - fprintf(DBUG_FILE,"Used quick_range on key: %d (other_keys: 0x%s):\n", - quick->index, needed_reg->print(buf)); - while ((range=li++)) + + quick->dbug_dump(0, TRUE); + fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf)); + + DBUG_UNLOCK_FILE; + DBUG_VOID_RETURN; +} + + +void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose) +{ + /* purecov: begin inspected */ + fprintf(DBUG_FILE, "%*squick range select, key %s, length: %d\n", + indent, "", head->key_info[index].name, max_used_key_length); + + if (verbose) { - if (!(range->flag & NO_MIN_RANGE)) + QUICK_RANGE *range; + QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer; + QUICK_RANGE **end_range= pr + ranges.elements; + for (; pr != end_range; ++pr) { - print_key(quick->key_parts,range->min_key,range->min_length); - if (range->flag & NEAR_MIN) - fputs(" < ",DBUG_FILE); - else - fputs(" <= ",DBUG_FILE); - } - fputs("X",DBUG_FILE); + fprintf(DBUG_FILE, "%*s", indent + 2, ""); + range= *pr; + if (!(range->flag & NO_MIN_RANGE)) + { + print_key(key_parts,range->min_key,range->min_length); + if (range->flag & NEAR_MIN) + fputs(" < ",DBUG_FILE); + else + fputs(" <= ",DBUG_FILE); + } + fputs("X",DBUG_FILE); - if (!(range->flag & NO_MAX_RANGE)) - { - if (range->flag & NEAR_MAX) - fputs(" < ",DBUG_FILE); - else - fputs(" <= ",DBUG_FILE); - print_key(quick->key_parts,range->max_key,range->max_length); + if (!(range->flag & NO_MAX_RANGE)) + { + if (range->flag & NEAR_MAX) + fputs(" < ",DBUG_FILE); + else + fputs(" <= ",DBUG_FILE); + print_key(key_parts,range->max_key,range->max_length); + } + fputs("\n",DBUG_FILE); } - fputs("\n",DBUG_FILE); } - DBUG_UNLOCK_FILE; - DBUG_VOID_RETURN; + /* purecov: end */ } -#endif +void QUICK_INDEX_MERGE_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + fprintf(DBUG_FILE, "%*squick index_merge select\n", indent, ""); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + if (pk_quick_select) + { + fprintf(DBUG_FILE, "%*sclustered PK quick:\n", indent, ""); + pk_quick_select->dbug_dump(indent+2, verbose); + } + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + +void QUICK_ROR_INTERSECT_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + fprintf(DBUG_FILE, "%*squick ROR-intersect select, %scovering\n", + indent, "", need_to_fetch_row? "":"non-"); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + if (cpk_quick) + { + fprintf(DBUG_FILE, "%*sclustered PK quick:\n", indent, ""); + cpk_quick->dbug_dump(indent+2, verbose); + } + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + +void QUICK_ROR_UNION_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + QUICK_SELECT_I *quick; + fprintf(DBUG_FILE, "%*squick ROR-union select\n", indent, ""); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + + +/* + Print quick select information to DBUG_FILE. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::dbug_dump() + indent Indentation offset + verbose If TRUE show more detailed output. + + DESCRIPTION + Print the contents of this quick select to DBUG_FILE. The method also + calls dbug_dump() for the used quick select if any. + + IMPLEMENTATION + Caller is responsible for locking DBUG_FILE before this call and unlocking + it afterwards. + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose) +{ + fprintf(DBUG_FILE, + "%*squick_group_min_max_select: index %s (%d), length: %d\n", + indent, "", index_info->name, index, max_used_key_length); + if (key_infix_len > 0) + { + fprintf(DBUG_FILE, "%*susing key_infix with length %d:\n", + indent, "", key_infix_len); + } + if (quick_prefix_select) + { + fprintf(DBUG_FILE, "%*susing quick_range_select:\n", indent, ""); + quick_prefix_select->dbug_dump(indent + 2, verbose); + } + if (min_max_ranges.elements > 0) + { + fprintf(DBUG_FILE, "%*susing %d quick_ranges for MIN/MAX:\n", + indent, "", min_max_ranges.elements); + } +} + + +#endif /* NOT_USED */ /***************************************************************************** -** Instansiate templates +** Instantiate templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List<QUICK_RANGE>; template class List_iterator<QUICK_RANGE>; #endif diff --git a/sql/opt_range.h b/sql/opt_range.h index 4b20d1fe0fe..3a737323eb7 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -24,16 +23,6 @@ #pragma interface /* gcc class implementation */ #endif -#define NO_MIN_RANGE 1 -#define NO_MAX_RANGE 2 -#define NEAR_MIN 4 -#define NEAR_MAX 8 -#define UNIQUE_RANGE 16 -#define EQ_RANGE 32 -#define NULL_RANGE 64 -#define GEOM_FLAG 128 - - typedef struct st_key_part { uint16 key,part, store_length, length; uint8 null_bit, flag; @@ -66,63 +55,622 @@ class QUICK_RANGE :public Sql_alloc { }; -class QUICK_SELECT { +/* + Quick select interface. + This class is a parent for all QUICK_*_SELECT and FT_SELECT classes. + + The usage scenario is as follows: + 1. Create quick select + quick= new QUICK_XXX_SELECT(...); + + 2. Perform lightweight initialization. This can be done in 2 ways: + 2.a: Regular initialization + if (quick->init()) + { + //the only valid action after failed init() call is delete + delete quick; + } + 2.b: Special initialization for quick selects merged by QUICK_ROR_*_SELECT + if (quick->init_ror_merged_scan()) + delete quick; + + 3. Perform zero, one, or more scans. + while (...) + { + // initialize quick select for scan. This may allocate + // buffers and/or prefetch rows. + if (quick->reset()) + { + //the only valid action after failed reset() call is delete + delete quick; + //abort query + } + + // perform the scan + do + { + res= quick->get_next(); + } while (res && ...) + } + + 4. Delete the select: + delete quick; + +*/ + +class QUICK_SELECT_I +{ +public: + bool sorted; + ha_rows records; /* estimate of # of records to be retrieved */ + double read_time; /* time to perform this retrieval */ + TABLE *head; + /* + Index this quick select uses, or MAX_KEY for quick selects + that use several indexes + */ + uint index; + + /* + Total length of first used_key_parts parts of the key. + Applicable if index!= MAX_KEY. + */ + uint max_used_key_length; + + /* + Max. number of (first) key parts this quick select uses for retrieval. + eg. for "(key1p1=c1 AND key1p2=c2) OR key1p1=c2" used_key_parts == 2. + Applicable if index!= MAX_KEY. + */ + uint used_key_parts; + + QUICK_SELECT_I(); + virtual ~QUICK_SELECT_I(){}; + + /* + Do post-constructor initialization. + SYNOPSIS + init() + + init() performs initializations that should have been in constructor if + it was possible to return errors from constructors. The join optimizer may + create and then delete quick selects without retrieving any rows so init() + must not contain any IO or CPU intensive code. + + If init() call fails the only valid action is to delete this quick select, + reset() and get_next() must not be called. + + RETURN + 0 OK + other Error code + */ + virtual int init() = 0; + + /* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + + reset() should be called when it is certain that row retrieval will be + necessary. This call may do heavyweight initialization like buffering first + N records etc. If reset() call fails get_next() must not be called. + Note that reset() may be called several times if + * the quick select is executed in a subselect + * a JOIN buffer is used + + RETURN + 0 OK + other Error code + */ + virtual int reset(void) = 0; + + virtual int get_next() = 0; /* get next record to retrieve */ + + /* Range end should be called when we have looped over the whole index */ + virtual void range_end() {} + + virtual bool reverse_sorted() = 0; + virtual bool unique_key_range() { return false; } + + enum { + QS_TYPE_RANGE = 0, + QS_TYPE_INDEX_MERGE = 1, + QS_TYPE_RANGE_DESC = 2, + QS_TYPE_FULLTEXT = 3, + QS_TYPE_ROR_INTERSECT = 4, + QS_TYPE_ROR_UNION = 5, + QS_TYPE_GROUP_MIN_MAX = 6 + }; + + /* Get type of this quick select - one of the QS_TYPE_* values */ + virtual int get_type() = 0; + + /* + Initialize this quick select as a merged scan inside a ROR-union or a ROR- + intersection scan. The caller must not additionally call init() if this + function is called. + SYNOPSIS + init_ror_merged_scan() + reuse_handler If true, the quick select may use table->handler, otherwise + it must create and use a separate handler object. + RETURN + 0 Ok + other Error + */ + virtual int init_ror_merged_scan(bool reuse_handler) + { DBUG_ASSERT(0); return 1; } + + /* + Save ROWID of last retrieved row in file->ref. This used in ROR-merging. + */ + virtual void save_last_pos(){}; + + /* + Append comma-separated list of keys this quick select uses to key_names; + append comma-separated list of corresponding used lengths to used_lengths. + This is used by select_describe. + */ + virtual void add_keys_and_lengths(String *key_names, + String *used_lengths)=0; + + /* + Append text representation of quick select structure (what and how is + merged) to str. The result is added to "Extra" field in EXPLAIN output. + This function is implemented only by quick selects that merge other quick + selects output and/or can produce output suitable for merging. + */ + virtual void add_info_string(String *str) {}; + /* + Return 1 if any index used by this quick select + a) uses field that is listed in passed field list or + b) is automatically updated (like a timestamp) + c) can be updated by one of before update triggers defined on table + */ + virtual bool is_keys_used(List<Item> *fields); + + /* + rowid of last row retrieved by this quick select. This is used only when + doing ROR-index_merge selects + */ + byte *last_rowid; + + /* + Table record buffer used by this quick select. + */ + byte *record; +#ifndef DBUG_OFF + /* + Print quick select information to DBUG_FILE. Caller is responsible + for locking DBUG_FILE before this call and unlocking it afterwards. + */ + virtual void dbug_dump(int indent, bool verbose)= 0; +#endif +}; + + +struct st_qsel_param; +class SEL_ARG; + +/* + Quick select that does a range scan on a single key. The records are + returned in key order. +*/ +class QUICK_RANGE_SELECT : public QUICK_SELECT_I +{ +protected: + bool next,dont_free; public: - bool next,dont_free,sorted; int error; - uint index, max_used_key_length, used_key_parts; - TABLE *head; +protected: handler *file; - byte *record; - List<QUICK_RANGE> ranges; - List_iterator<QUICK_RANGE> it; - QUICK_RANGE *range; - MEM_ROOT alloc; + /* + If true, this quick select has its "own" handler object which should be + closed no later then this quick select is deleted. + */ + bool free_file; + bool in_range; + uint multi_range_count; /* copy from thd->variables.multi_range_count */ + uint multi_range_length; /* the allocated length for the array */ + uint multi_range_bufsiz; /* copy from thd->variables.read_rnd_buff_size */ + KEY_MULTI_RANGE *multi_range; /* the multi-range array (allocated and + freed by QUICK_RANGE_SELECT) */ + HANDLER_BUFFER *multi_range_buff; /* the handler buffer (allocated and + freed by QUICK_RANGE_SELECT) */ +protected: + friend class TRP_ROR_INTERSECT; + friend + QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + struct st_table_ref *ref, + ha_rows records); + friend bool get_quick_keys(struct st_qsel_param *param, + QUICK_RANGE_SELECT *quick,KEY_PART *key, + SEL_ARG *key_tree, + char *min_key, uint min_key_flag, + char *max_key, uint max_key_flag); + friend QUICK_RANGE_SELECT *get_quick_select(struct st_qsel_param*,uint idx, + SEL_ARG *key_tree, + MEM_ROOT *alloc); + friend class QUICK_SELECT_DESC; + friend class QUICK_INDEX_MERGE_SELECT; + friend class QUICK_ROR_INTERSECT_SELECT; + friend class QUICK_GROUP_MIN_MAX_SELECT; + + DYNAMIC_ARRAY ranges; /* ordered array of range ptrs */ + QUICK_RANGE **cur_range; /* current element in ranges */ + + QUICK_RANGE *last_range; KEY_PART *key_parts; KEY_PART_INFO *key_part_info; - ha_rows records; - double read_time; + int cmp_next(QUICK_RANGE *range); + int cmp_prev(QUICK_RANGE *range); + bool row_in_ranges(); +public: + MEM_ROOT alloc; + + QUICK_RANGE_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0, + MEM_ROOT *parent_alloc=NULL); + ~QUICK_RANGE_SELECT(); - QUICK_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0); - virtual ~QUICK_SELECT(); - virtual void reset(void); - int init() + int init(); + int reset(void); + int get_next(); + void range_end(); + int get_next_prefix(uint prefix_length, byte *cur_prefix); + bool reverse_sorted() { return 0; } + bool unique_key_range(); + int init_ror_merged_scan(bool reuse_handler); + void save_last_pos() + { file->position(record); } + int get_type() { return QS_TYPE_RANGE; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif +private: + /* Used only by QUICK_SELECT_DESC */ + QUICK_RANGE_SELECT(const QUICK_RANGE_SELECT& org) : QUICK_SELECT_I() { - key_part_info= head->key_info[index].key_part; - if (file->inited != handler::NONE) - file->ha_index_or_rnd_end(); - return error=file->ha_index_init(index); + bcopy(&org, this, sizeof(*this)); + multi_range_length= 0; + multi_range= NULL; + multi_range_buff= NULL; } - virtual int get_next(); - virtual bool reverse_sorted() { return 0; } - bool unique_key_range(); }; -class QUICK_SELECT_GEOM: public QUICK_SELECT +class QUICK_RANGE_SELECT_GEOM: public QUICK_RANGE_SELECT { public: - QUICK_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, bool no_alloc) - :QUICK_SELECT(thd, table, index_arg, no_alloc) + QUICK_RANGE_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, + bool no_alloc, MEM_ROOT *parent_alloc) + :QUICK_RANGE_SELECT(thd, table, index_arg, no_alloc, parent_alloc) {}; virtual int get_next(); }; -class QUICK_SELECT_DESC: public QUICK_SELECT +/* + QUICK_INDEX_MERGE_SELECT - index_merge access method quick select. + + QUICK_INDEX_MERGE_SELECT uses + * QUICK_RANGE_SELECTs to get rows + * Unique class to remove duplicate rows + + INDEX MERGE OPTIMIZER + Current implementation doesn't detect all cases where index_merge could + be used, in particular: + * index_merge will never be used if range scan is possible (even if + range scan is more expensive) + + * index_merge+'using index' is not supported (this the consequence of + the above restriction) + + * If WHERE part contains complex nested AND and OR conditions, some ways + to retrieve rows using index_merge will not be considered. The choice + of read plan may depend on the order of conjuncts/disjuncts in WHERE + part of the query, see comments near imerge_list_or_list and + SEL_IMERGE::or_sel_tree_with_checks functions for details. + + * There is no "index_merge_ref" method (but index_merge on non-first + table in join is possible with 'range checked for each record'). + + See comments around SEL_IMERGE class and test_quick_select for more + details. + + ROW RETRIEVAL ALGORITHM + + index_merge uses Unique class for duplicates removal. index_merge takes + advantage of Clustered Primary Key (CPK) if the table has one. + The index_merge algorithm consists of two phases: + + Phase 1 (implemented in QUICK_INDEX_MERGE_SELECT::prepare_unique): + prepare() + { + activate 'index only'; + while(retrieve next row for non-CPK scan) + { + if (there is a CPK scan and row will be retrieved by it) + skip this row; + else + put its rowid into Unique; + } + deactivate 'index only'; + } + + Phase 2 (implemented as sequence of QUICK_INDEX_MERGE_SELECT::get_next + calls): + + fetch() + { + retrieve all rows from row pointers stored in Unique; + free Unique; + retrieve all rows for CPK scan; + } +*/ + +class QUICK_INDEX_MERGE_SELECT : public QUICK_SELECT_I { public: - QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts); + QUICK_INDEX_MERGE_SELECT(THD *thd, TABLE *table); + ~QUICK_INDEX_MERGE_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_INDEX_MERGE; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool is_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + + bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); + + /* range quick selects this index_merge read consists of */ + List<QUICK_RANGE_SELECT> quick_selects; + + /* quick select that uses clustered primary key (NULL if none) */ + QUICK_RANGE_SELECT* pk_quick_select; + + /* true if this select is currently doing a clustered PK scan */ + bool doing_pk_scan; + + MEM_ROOT alloc; + THD *thd; + int read_keys_and_merge(); + + /* used to get rows collected in Unique */ + READ_RECORD read_record; +}; + + +/* + Rowid-Ordered Retrieval (ROR) index intersection quick select. + This quick select produces intersection of row sequences returned + by several QUICK_RANGE_SELECTs it "merges". + + All merged QUICK_RANGE_SELECTs must return rowids in rowid order. + QUICK_ROR_INTERSECT_SELECT will return rows in rowid order, too. + + All merged quick selects retrieve {rowid, covered_fields} tuples (not full + table records). + QUICK_ROR_INTERSECT_SELECT retrieves full records if it is not being used + by QUICK_ROR_INTERSECT_SELECT and all merged quick selects together don't + cover needed all fields. + + If one of the merged quick selects is a Clustered PK range scan, it is + used only to filter rowid sequence produced by other merged quick selects. +*/ + +class QUICK_ROR_INTERSECT_SELECT : public QUICK_SELECT_I +{ +public: + QUICK_ROR_INTERSECT_SELECT(THD *thd, TABLE *table, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + ~QUICK_ROR_INTERSECT_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_ROR_INTERSECT; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool is_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + int init_ror_merged_scan(bool reuse_handler); + bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); + + /* + Range quick selects this intersection consists of, not including + cpk_quick. + */ + List<QUICK_RANGE_SELECT> quick_selects; + + /* + Merged quick select that uses Clustered PK, if there is one. This quick + select is not used for row retrieval, it is used for row retrieval. + */ + QUICK_RANGE_SELECT *cpk_quick; + + MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ + THD *thd; /* current thread */ + bool need_to_fetch_row; /* if true, do retrieve full table records. */ + /* in top-level quick select, true if merged scans where initialized */ + bool scans_inited; +}; + + +/* + Rowid-Ordered Retrieval index union select. + This quick select produces union of row sequences returned by several + quick select it "merges". + + All merged quick selects must return rowids in rowid order. + QUICK_ROR_UNION_SELECT will return rows in rowid order, too. + + All merged quick selects are set not to retrieve full table records. + ROR-union quick select always retrieves full records. + +*/ + +class QUICK_ROR_UNION_SELECT : public QUICK_SELECT_I +{ +public: + QUICK_ROR_UNION_SELECT(THD *thd, TABLE *table); + ~QUICK_ROR_UNION_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_ROR_UNION; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool is_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + + bool push_quick_back(QUICK_SELECT_I *quick_sel_range); + + List<QUICK_SELECT_I> quick_selects; /* Merged quick selects */ + + QUEUE queue; /* Priority queue for merge operation */ + MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ + + THD *thd; /* current thread */ + byte *cur_rowid; /* buffer used in get_next() */ + byte *prev_rowid; /* rowid of last row returned by get_next() */ + bool have_prev_rowid; /* true if prev_rowid has valid data */ + uint rowid_length; /* table rowid length */ +private: + static int queue_cmp(void *arg, byte *val1, byte *val2); + bool scans_inited; +}; + + +/* + Index scan for GROUP-BY queries with MIN/MAX aggregate functions. + + This class provides a specialized index access method for GROUP-BY queries + of the forms: + + SELECT A_1,...,A_k, [B_1,...,B_m], [MIN(C)], [MAX(C)] + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND EQ(B_1,...,B_m)] + [AND PC(C)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k; + + or + + SELECT DISTINCT A_i1,...,A_ik + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)]; + + where all selected fields are parts of the same index. + The class of queries that can be processed by this quick select is fully + specified in the description of get_best_trp_group_min_max() in opt_range.cc. + + The get_next() method directly produces result tuples, thus obviating the + need to call end_send_group() because all grouping is already done inside + get_next(). + + Since one of the requirements is that all select fields are part of the same + index, this class produces only index keys, and not complete records. +*/ + +class QUICK_GROUP_MIN_MAX_SELECT : public QUICK_SELECT_I +{ +private: + handler *file; /* The handler used to get data. */ + JOIN *join; /* Descriptor of the current query */ + KEY *index_info; /* The index chosen for data access */ + byte *record; /* Buffer where the next record is returned. */ + byte *tmp_record; /* Temporary storage for next_min(), next_max(). */ + byte *group_prefix; /* Key prefix consisting of the GROUP fields. */ + uint group_prefix_len; /* Length of the group prefix. */ + byte *last_prefix; /* Prefix of the last group for detecting EOF. */ + bool have_min; /* Specify whether we are computing */ + bool have_max; /* a MIN, a MAX, or both. */ + bool seen_first_key; /* Denotes whether the first key was retrieved.*/ + KEY_PART_INFO *min_max_arg_part; /* The keypart of the only argument field */ + /* of all MIN/MAX functions. */ + uint min_max_arg_len; /* The length of the MIN/MAX argument field */ + byte *key_infix; /* Infix of constants from equality predicates. */ + uint key_infix_len; + DYNAMIC_ARRAY min_max_ranges; /* Array of range ptrs for the MIN/MAX field. */ + uint real_prefix_len; /* Length of key prefix extended with key_infix. */ + List<Item_sum> *min_functions; + List<Item_sum> *max_functions; + List_iterator<Item_sum> *min_functions_it; + List_iterator<Item_sum> *max_functions_it; +public: + /* + The following two members are public to allow easy access from + TRP_GROUP_MIN_MAX::make_quick() + */ + MEM_ROOT alloc; /* Memory pool for this and quick_prefix_select data. */ + QUICK_RANGE_SELECT *quick_prefix_select;/* For retrieval of group prefixes. */ +private: + int next_prefix(); + int next_min_in_range(); + int next_max_in_range(); + int next_min(); + int next_max(); + void update_min_result(); + void update_max_result(); +public: + QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join, bool have_min, + bool have_max, KEY_PART_INFO *min_max_arg_part, + uint group_prefix_len, uint used_key_parts, + KEY *index_info, uint use_index, double read_cost, + ha_rows records, uint key_infix_len, + byte *key_infix, MEM_ROOT *parent_alloc); + ~QUICK_GROUP_MIN_MAX_SELECT(); + bool add_range(SEL_ARG *sel_range); + void update_key_stat(); + void adjust_prefix_ranges(); + bool alloc_buffers(); + int init(); + int reset(); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_GROUP_MIN_MAX; } + void add_keys_and_lengths(String *key_names, String *used_lengths); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif +}; + + +class QUICK_SELECT_DESC: public QUICK_RANGE_SELECT +{ +public: + QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, uint used_key_parts); int get_next(); bool reverse_sorted() { return 1; } + int get_type() { return QS_TYPE_RANGE_DESC; } private: - int cmp_prev(QUICK_RANGE *range); bool range_reads_after_key(QUICK_RANGE *range); #ifdef NOT_USED bool test_if_null_range(QUICK_RANGE *range, uint used_key_parts); #endif - void reset(void); + int reset(void) { rev_it.rewind(); return QUICK_RANGE_SELECT::reset(); } List<QUICK_RANGE> rev_ranges; List_iterator<QUICK_RANGE> rev_it; }; @@ -130,7 +678,7 @@ private: class SQL_SELECT :public Sql_alloc { public: - QUICK_SELECT *quick; // If quick-select used + QUICK_SELECT_I *quick; // If quick-select used COND *cond; // where condition TABLE *head; IO_CACHE file; // Positions to used records @@ -156,19 +704,20 @@ class SQL_SELECT :public Sql_alloc { }; -class FT_SELECT: public QUICK_SELECT { +class FT_SELECT: public QUICK_RANGE_SELECT { public: - FT_SELECT(THD *thd, TABLE *table, uint key): - QUICK_SELECT (thd, table, key, 1) { init(); } + FT_SELECT(THD *thd, TABLE *table, uint key) : + QUICK_RANGE_SELECT (thd, table, key, 1) { VOID(init()); } ~FT_SELECT() { file->ft_end(); } - int init() { return error= file->ft_init(); } - int get_next() { return error= file->ft_read(record); } + int init() { return error=file->ft_init(); } + int reset() { return 0; } + int get_next() { return error=file->ft_read(record); } + int get_type() { return QS_TYPE_FULLTEXT; } }; - -QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, - struct st_table_ref *ref); - +QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + struct st_table_ref *ref, + ha_rows records); uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit); #endif diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index f912d67fe06..9222e15ff91 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -59,9 +58,9 @@ static int maxmin_in_range(bool max_fl, Field* field, COND *cond); SYNOPSIS opt_sum_query() - tables Tables in query - all_fields All fields to be returned - conds WHERE clause + tables list of leaves of join table tree + all_fields All fields to be returned + conds WHERE clause NOTE: This function is only called for queries with sum functions and no @@ -94,10 +93,16 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) Analyze outer join dependencies, and, if possible, compute the number of returned rows. */ - for (TABLE_LIST *tl=tables; tl ; tl= tl->next) + for (TABLE_LIST *tl= tables; tl; tl= tl->next_leaf) { + TABLE_LIST *embedded; + for (embedded= tl ; embedded; embedded= embedded->embedding) + { + if (embedded->on_expr) + break; + } + if (embedded) /* Don't replace expression on a table that is part of an outer join */ - if (tl->on_expr) { outer_tables|= tl->table->map; @@ -117,8 +122,11 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) If the storage manager of 'tl' gives exact row count, compute the total number of rows. If there are no outer table dependencies, this count may be used as the real count. + Schema tables are filled after this function is invoked, so we can't + get row count */ - if (tl->table->file->table_flags() & HA_NOT_EXACT_COUNT) + if ((tl->table->file->table_flags() & HA_NOT_EXACT_COUNT) || + tl->schema_table) { is_exact_count= FALSE; count= 1; // ensure count != 0 @@ -148,7 +156,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) switch (item_sum->sum_func()) { case Item_sum::COUNT_FUNC: /* - If the expr in count(expr) can never be null we can change this + If the expr in COUNT(expr) can never be null we can change this to the number of rows in the tables if this number is exact and there are no outer joins. */ @@ -169,14 +177,14 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) indexes to find the key. */ Item *expr=item_sum->args[0]; - if (expr->type() == Item::FIELD_ITEM) + if (expr->real_item()->type() == Item::FIELD_ITEM) { byte key_buff[MAX_KEY_LENGTH]; TABLE_REF ref; uint range_fl, prefix_len; ref.key_buff= key_buff; - Item_field *item_field= ((Item_field*) expr); + Item_field *item_field= (Item_field*) (expr->real_item()); TABLE *table= item_field->field->table; /* @@ -256,14 +264,14 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) indexes to find the key. */ Item *expr=item_sum->args[0]; - if (expr->type() == Item::FIELD_ITEM) + if (expr->real_item()->type() == Item::FIELD_ITEM) { byte key_buff[MAX_KEY_LENGTH]; TABLE_REF ref; - uint range_fl, prefix_len; + uint range_fl, prefix_len; ref.key_buff= key_buff; - Item_field *item_field= ((Item_field*) expr); + Item_field *item_field= (Item_field*) (expr->real_item()); TABLE *table= item_field->field->table; /* @@ -356,7 +364,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) removed_tables is != 0 if we have used MIN() or MAX(). */ if (removed_tables && used_tables != removed_tables) - const_result= 0; // We didn't remove all tables + const_result= 0; // We didn't remove all tables return const_result; } @@ -366,20 +374,34 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) SYNOPSIS simple_pred() - func_item in: Predicate item + func_item Predicate item args out: Here we store the field followed by constants - inv_order out: Is set to 1 if the predicate is of the form 'const op field' + inv_order out: Is set to 1 if the predicate is of the form + 'const op field' RETURN - 0 func_item is a simple predicate: a field is compared with constants + 0 func_item is a simple predicate: a field is compared with + constants 1 Otherwise */ -static bool simple_pred(Item_func *func_item, Item **args, bool *inv_order) +bool simple_pred(Item_func *func_item, Item **args, bool *inv_order) { Item *item; *inv_order= 0; switch (func_item->argument_count()) { + case 0: + /* MULT_EQUAL_FUNC */ + { + Item_equal *item_equal= (Item_equal *) func_item; + Item_equal_iterator it(*item_equal); + args[0]= it++; + if (it++) + return 0; + if (!(args[1]= item_equal->get_const())) + return 0; + } + break; case 1: /* field IS NULL */ item= func_item->arguments()[0]; @@ -520,6 +542,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, case Item_func::BETWEEN: between= 1; break; + case Item_func::MULT_EQUAL_FUNC: + eq_type= 1; + break; default: return 0; // Can't optimize function } @@ -591,8 +616,7 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, CHECK_FIELD_IGNORE); if (part->null_bit) *key_ptr++= (byte) test(part->field->is_null()); - part->field->get_key_image((char*) key_ptr, part->length, - part->field->charset(), Field::itRAW); + part->field->get_key_image((char*) key_ptr, part->length, Field::itRAW); } if (is_field_part) { @@ -675,7 +699,7 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, uint idx= 0; KEY *keyinfo,*keyinfo_end; - for (keyinfo= table->key_info, keyinfo_end= keyinfo+table->keys ; + for (keyinfo= table->key_info, keyinfo_end= keyinfo+table->s->keys ; keyinfo != keyinfo_end; keyinfo++,idx++) { @@ -696,8 +720,10 @@ static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, if (!(table->file->index_flags(idx, jdx, 0) & HA_READ_ORDER)) return 0; - /* Check whether the index component is partial */ - if (part->length < table->field[part->fieldnr-1]->pack_length()) + /* Check whether the index component is partial */ + Field *part_field= table->field[part->fieldnr-1]; + if ((part_field->flags & BLOB_FLAG) || + part->length < part_field->key_length()) break; if (field->eq(part->field)) diff --git a/sql/parse_file.cc b/sql/parse_file.cc new file mode 100644 index 00000000000..1351cf66161 --- /dev/null +++ b/sql/parse_file.cc @@ -0,0 +1,947 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +// Text .frm files management routines + +#include "mysql_priv.h" +#include <errno.h> +#include <m_ctype.h> +#include <my_sys.h> +#include <my_dir.h> + + +/* + write string with escaping + + SYNOPSIS + write_escaped_string() + file - IO_CACHE for record + val_s - string for writing + + RETURN + FALSE - OK + TRUE - error +*/ + +static my_bool +write_escaped_string(IO_CACHE *file, LEX_STRING *val_s) +{ + char *eos= val_s->str + val_s->length; + char *ptr= val_s->str; + + for (; ptr < eos; ptr++) + { + /* + Should be in sync with read_escaped_string() and + parse_quoted_escaped_string() + */ + switch(*ptr) { + case '\\': // escape character + if (my_b_append(file, (const byte *)STRING_WITH_LEN("\\\\"))) + return TRUE; + break; + case '\n': // parameter value delimiter + if (my_b_append(file, (const byte *)STRING_WITH_LEN("\\n"))) + return TRUE; + break; + case '\0': // problem for some string processing utilities + if (my_b_append(file, (const byte *)STRING_WITH_LEN("\\0"))) + return TRUE; + break; + case 26: // problem for windows utilities (Ctrl-Z) + if (my_b_append(file, (const byte *)STRING_WITH_LEN("\\z"))) + return TRUE; + break; + case '\'': // list of string delimiter + if (my_b_append(file, (const byte *)STRING_WITH_LEN("\\\'"))) + return TRUE; + break; + default: + if (my_b_append(file, (const byte *)ptr, 1)) + return TRUE; + } + } + return FALSE; +} + + +/* + write parameter value to IO_CACHE + + SYNOPSIS + write_parameter() + file pointer to IO_CACHE structure for writing + base pointer to data structure + parameter pointer to parameter descriptor + old_version for returning back old version number value + + RETURN + FALSE - OK + TRUE - error +*/ + +static my_bool +write_parameter(IO_CACHE *file, gptr base, File_option *parameter, + ulonglong *old_version) +{ + char num_buf[20]; // buffer for numeric operations + // string for numeric operations + String num(num_buf, sizeof(num_buf), &my_charset_bin); + DBUG_ENTER("write_parameter"); + + switch (parameter->type) { + case FILE_OPTIONS_STRING: + { + LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset); + if (my_b_append(file, (const byte *)val_s->str, val_s->length)) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_ESTRING: + { + if (write_escaped_string(file, (LEX_STRING *)(base + parameter->offset))) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_ULONGLONG: + { + num.set(*((ulonglong *)(base + parameter->offset)), &my_charset_bin); + if (my_b_append(file, (const byte *)num.ptr(), num.length())) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_REV: + { + ulonglong *val_i= (ulonglong *)(base + parameter->offset); + *old_version= (*val_i)++; + num.set(*val_i, &my_charset_bin); + if (my_b_append(file, (const byte *)num.ptr(), num.length())) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_TIMESTAMP: + { + /* string have to be allocated already */ + LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset); + time_t tm= time(NULL); + + get_date(val_s->str, GETDATE_DATE_TIME|GETDATE_GMT|GETDATE_FIXEDLENGTH, + tm); + val_s->length= PARSE_FILE_TIMESTAMPLENGTH; + if (my_b_append(file, (const byte *)val_s->str, + PARSE_FILE_TIMESTAMPLENGTH)) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_STRLIST: + { + List_iterator_fast<LEX_STRING> it(*((List<LEX_STRING>*) + (base + parameter->offset))); + bool first= 1; + LEX_STRING *str; + while ((str= it++)) + { + // We need ' ' after string to detect list continuation + if ((!first && my_b_append(file, (const byte *)STRING_WITH_LEN(" "))) || + my_b_append(file, (const byte *)STRING_WITH_LEN("\'")) || + write_escaped_string(file, str) || + my_b_append(file, (const byte *)STRING_WITH_LEN("\'"))) + { + DBUG_RETURN(TRUE); + } + first= 0; + } + break; + } + case FILE_OPTIONS_ULLLIST: + { + List_iterator_fast<ulonglong> it(*((List<ulonglong>*) + (base + parameter->offset))); + bool first= 1; + ulonglong *val; + while ((val= it++)) + { + num.set(*val, &my_charset_bin); + // We need ' ' after string to detect list continuation + if ((!first && my_b_append(file, (const byte *)STRING_WITH_LEN(" "))) || + my_b_append(file, (const byte *)num.ptr(), num.length())) + { + DBUG_RETURN(TRUE); + } + first= 0; + } + break; + } + default: + DBUG_ASSERT(0); // never should happened + } + DBUG_RETURN(FALSE); +} + + +/* + write new .frm + + SYNOPSIS + sql_create_definition_file() + dir directory where put .frm + file .frm file name + type .frm type string (VIEW, TABLE) + base base address for parameter reading (structure like + TABLE) + parameters parameters description + max_versions number of versions to save + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name, + const LEX_STRING *type, + gptr base, File_option *parameters, + uint max_versions) +{ + File handler; + IO_CACHE file; + char path[FN_REFLEN+1]; // +1 to put temporary file name for sure + ulonglong old_version= ULONGLONG_MAX; + int path_end; + File_option *param; + DBUG_ENTER("sql_create_definition_file"); + DBUG_PRINT("enter", ("Dir: %s, file: %s, base 0x%lx", + dir->str, file_name->str, (ulong) base)); + + fn_format(path, file_name->str, dir->str, 0, MY_UNPACK_FILENAME); + path_end= strlen(path); + + // temporary file name + path[path_end]='~'; + path[path_end+1]= '\0'; + if ((handler= my_create(path, CREATE_MODE, O_RDWR | O_TRUNC, + MYF(MY_WME))) <= 0) + { + DBUG_RETURN(TRUE); + } + + if (init_io_cache(&file, handler, 0, SEQ_READ_APPEND, 0L, 0, MYF(MY_WME))) + goto err_w_file; + + // write header (file signature) + if (my_b_append(&file, (const byte *)STRING_WITH_LEN("TYPE=")) || + my_b_append(&file, (const byte *)type->str, type->length) || + my_b_append(&file, (const byte *)STRING_WITH_LEN("\n"))) + goto err_w_file; + + // write parameters to temporary file + for (param= parameters; param->name.str; param++) + { + if (my_b_append(&file, (const byte *)param->name.str, + param->name.length) || + my_b_append(&file, (const byte *)STRING_WITH_LEN("=")) || + write_parameter(&file, base, param, &old_version) || + my_b_append(&file, (const byte *)STRING_WITH_LEN("\n"))) + goto err_w_cache; + } + + if (end_io_cache(&file)) + goto err_w_file; + + if (my_close(handler, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + + // archive copies management + path[path_end]='\0'; + if (!access(path, F_OK)) + { + if (old_version != ULONGLONG_MAX && max_versions != 0) + { + // save backup + char path_arc[FN_REFLEN]; + // backup old version + char path_to[FN_REFLEN]; + + // check archive directory existence + fn_format(path_arc, "arc", dir->str, "", MY_UNPACK_FILENAME); + if (access(path_arc, F_OK)) + { + if (my_mkdir(path_arc, 0777, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + + my_snprintf(path_to, FN_REFLEN, "%s/%s-%04lu", + path_arc, file_name->str, (ulong) old_version); + if (my_rename(path, path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + + // remove very old version + if (old_version > max_versions) + { + my_snprintf(path_to, FN_REFLEN, "%s/%s-%04lu", + path_arc, file_name->str, + (ulong)(old_version - max_versions)); + if (!access(path_arc, F_OK) && my_delete(path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + } + else + { + if (my_delete(path, MYF(MY_WME))) // no backups + { + DBUG_RETURN(TRUE); + } + } + } + + { + // rename temporary file + char path_to[FN_REFLEN]; + memcpy(path_to, path, path_end+1); + path[path_end]='~'; + if (my_rename(path, path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + DBUG_RETURN(FALSE); +err_w_cache: + end_io_cache(&file); +err_w_file: + my_close(handler, MYF(MY_WME)); + DBUG_RETURN(TRUE); +} + +/* + Renames a frm file (including backups) in same schema + + SYNOPSIS + rename_in_schema_file + schema name of given schema + old_name original file name + new_name new file name + revision revision number + num_view_backups number of backups + + RETURN + 0 - OK + 1 - Error (only if renaming of frm failed) + +*/ +my_bool rename_in_schema_file(const char *schema, const char *old_name, + const char *new_name, ulonglong revision, + uint num_view_backups) +{ + char old_path[FN_REFLEN], new_path[FN_REFLEN], arc_path[FN_REFLEN]; + + strxnmov(old_path, FN_REFLEN, mysql_data_home, "/", schema, "/", + old_name, reg_ext, NullS); + (void) unpack_filename(old_path, old_path); + + strxnmov(new_path, FN_REFLEN, mysql_data_home, "/", schema, "/", + new_name, reg_ext, NullS); + (void) unpack_filename(new_path, new_path); + + if (my_rename(old_path, new_path, MYF(MY_WME))) + return 1; + + /* check if arc_dir exists */ + strxnmov(arc_path, FN_REFLEN, mysql_data_home, "/", schema, "/arc", NullS); + (void) unpack_filename(arc_path, arc_path); + + if (revision > 0 && !access(arc_path, F_OK)) + { + ulonglong limit= ((revision > num_view_backups) ? + revision - num_view_backups : 0); + for (; revision > limit ; revision--) + { + my_snprintf(old_path, FN_REFLEN, "%s/%s%s-%04lu", + arc_path, old_name, reg_ext, (ulong)revision); + (void) unpack_filename(old_path, old_path); + my_snprintf(new_path, FN_REFLEN, "%s/%s%s-%04lu", + arc_path, new_name, reg_ext, (ulong)revision); + (void) unpack_filename(new_path, new_path); + my_rename(old_path, new_path, MYF(0)); + } + } + return 0; +} + +/* + Prepare frm to parse (read to memory) + + SYNOPSIS + sql_parse_prepare() + file_name - path & filename to .frm file + mem_root - MEM_ROOT for buffer allocation + bad_format_errors - send errors on bad content + + RETURN + 0 - error + parser object + + NOTE + returned pointer + 1 will be type of .frm +*/ + +File_parser * +sql_parse_prepare(const LEX_STRING *file_name, MEM_ROOT *mem_root, + bool bad_format_errors) +{ + MY_STAT stat_info; + uint len; + char *end, *sign; + File_parser *parser; + File file; + DBUG_ENTER("sql__parse_prepare"); + + if (!my_stat(file_name->str, &stat_info, MYF(MY_WME))) + { + DBUG_RETURN(0); + } + + if (stat_info.st_size > INT_MAX-1) + { + my_error(ER_FPARSER_TOO_BIG_FILE, MYF(0), file_name->str); + DBUG_RETURN(0); + } + + if (!(parser= new(mem_root) File_parser)) + { + DBUG_RETURN(0); + } + + if (!(parser->buff= alloc_root(mem_root, stat_info.st_size+1))) + { + DBUG_RETURN(0); + } + + if ((file= my_open(file_name->str, O_RDONLY | O_SHARE, MYF(MY_WME))) < 0) + { + DBUG_RETURN(0); + } + + if ((len= my_read(file, (byte *)parser->buff, + stat_info.st_size, MYF(MY_WME))) == + MY_FILE_ERROR) + { + my_close(file, MYF(MY_WME)); + DBUG_RETURN(0); + } + + if (my_close(file, MYF(MY_WME))) + { + DBUG_RETURN(0); + } + + end= parser->end= parser->buff + len; + *end= '\0'; // barrier for more simple parsing + + // 7 = 5 (TYPE=) + 1 (letter at least of type name) + 1 ('\n') + if (len < 7 || + parser->buff[0] != 'T' || + parser->buff[1] != 'Y' || + parser->buff[2] != 'P' || + parser->buff[3] != 'E' || + parser->buff[4] != '=') + goto frm_error; + + // skip signature; + parser->file_type.str= sign= parser->buff + 5; + while (*sign >= 'A' && *sign <= 'Z' && sign < end) + sign++; + if (*sign != '\n') + goto frm_error; + parser->file_type.length= sign - parser->file_type.str; + // EOS for file signature just for safety + *sign= '\0'; + + parser->start= sign + 1; + parser->content_ok= 1; + + DBUG_RETURN(parser); + +frm_error: + if (bad_format_errors) + { + my_error(ER_FPARSER_BAD_HEADER, MYF(0), file_name->str); + DBUG_RETURN(0); + } + else + DBUG_RETURN(parser); // upper level have to check parser->ok() +} + + +/* + parse LEX_STRING + + SYNOPSIS + parse_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_string(char *ptr, char *end, MEM_ROOT *mem_root, LEX_STRING *str) +{ + // get string length + char *eol= strchr(ptr, '\n'); + + if (eol >= end) + return 0; + + str->length= eol - ptr; + + if (!(str->str= alloc_root(mem_root, str->length+1))) + return 0; + + memcpy(str->str, ptr, str->length); + str->str[str->length]= '\0'; // just for safety + return eol+1; +} + + +/* + read escaped string from ptr to eol in already allocated str + + SYNOPSIS + read_escaped_string() + ptr - pointer on string beginning + eol - pointer on character after end of string + str - target string + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +read_escaped_string(char *ptr, char *eol, LEX_STRING *str) +{ + char *write_pos= str->str; + + for (; ptr < eol; ptr++, write_pos++) + { + char c= *ptr; + if (c == '\\') + { + ptr++; + if (ptr >= eol) + return TRUE; + /* + Should be in sync with write_escaped_string() and + parse_quoted_escaped_string() + */ + switch(*ptr) { + case '\\': + *write_pos= '\\'; + break; + case 'n': + *write_pos= '\n'; + break; + case '0': + *write_pos= '\0'; + break; + case 'z': + *write_pos= 26; + break; + case '\'': + *write_pos= '\''; + break; + default: + return TRUE; + } + } + else + *write_pos= c; + } + str->str[str->length= write_pos-str->str]= '\0'; // just for safety + return FALSE; +} + + +/* + parse \n delimited escaped string + + SYNOPSIS + parse_escaped_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +char * +parse_escaped_string(char *ptr, char *end, MEM_ROOT *mem_root, LEX_STRING *str) +{ + char *eol= strchr(ptr, '\n'); + + if (eol == 0 || eol >= end || + !(str->str= alloc_root(mem_root, (eol - ptr) + 1)) || + read_escaped_string(ptr, eol, str)) + return 0; + + return eol+1; +} + + +/* + parse '' delimited escaped string + + SYNOPSIS + parse_quoted_escaped_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_quoted_escaped_string(char *ptr, char *end, + MEM_ROOT *mem_root, LEX_STRING *str) +{ + char *eol; + uint result_len= 0; + bool escaped= 0; + + // starting ' + if (*(ptr++) != '\'') + return 0; + + // find ending ' + for (eol= ptr; (*eol != '\'' || escaped) && eol < end; eol++) + { + if (!(escaped= (*eol == '\\' && !escaped))) + result_len++; + } + + // process string + if (eol >= end || + !(str->str= alloc_root(mem_root, result_len + 1)) || + read_escaped_string(ptr, eol, str)) + return 0; + + return eol+1; +} + + +/* + Parser for FILE_OPTIONS_ULLLIST type value. + + SYNOPSIS + get_file_options_ulllist() + ptr [in/out] pointer to parameter + end [in] end of the configuration + line [in] pointer to the line begining + base [in] base address for parameter writing (structure + like TABLE) + parameter [in] description + mem_root [in] MEM_ROOT for parameters allocation +*/ + +bool get_file_options_ulllist(char *&ptr, char *end, char *line, + gptr base, File_option *parameter, + MEM_ROOT *mem_root) +{ + List<ulonglong> *nlist= (List<ulonglong>*)(base + parameter->offset); + ulonglong *num; + nlist->empty(); + // list parsing + while (ptr < end) + { + int not_used; + char *num_end= end; + if (!(num= (ulonglong*)alloc_root(mem_root, sizeof(ulonglong))) || + nlist->push_back(num, mem_root)) + goto nlist_err; + *num= my_strtoll10(ptr, &num_end, ¬_used); + ptr= num_end; + switch (*ptr) { + case '\n': + goto end_of_nlist; + case ' ': + // we cant go over buffer bounds, because we have \0 at the end + ptr++; + break; + default: + goto nlist_err_w_message; + } + } + +end_of_nlist: + if (*(ptr++) != '\n') + goto nlist_err; + return FALSE; + +nlist_err_w_message: + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), parameter->name.str, line); +nlist_err: + return TRUE; +} + + +/* + parse parameters + + SYNOPSIS + File_parser::parse() + base base address for parameter writing (structure like + TABLE) + mem_root MEM_ROOT for parameters allocation + parameters parameters description + required number of required parameters in above list + hook hook called for unknown keys + hook_data some data specific for the hook + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +File_parser::parse(gptr base, MEM_ROOT *mem_root, + struct File_option *parameters, uint required, + Unknown_key_hook *hook) +{ + uint first_param= 0, found= 0; + char *ptr= start; + char *eol; + LEX_STRING *str; + List<LEX_STRING> *list; + DBUG_ENTER("File_parser::parse"); + + while (ptr < end && found < required) + { + char *line= ptr; + if (*ptr == '#') + { + // it is comment + if (!(ptr= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_EOF_IN_COMMENT, MYF(0), line); + DBUG_RETURN(TRUE); + } + ptr++; + } + else + { + File_option *parameter= parameters+first_param, + *parameters_end= parameters+required; + int len= 0; + for (; parameter < parameters_end; parameter++) + { + len= parameter->name.length; + // check length + if (len < (end-ptr) && ptr[len] != '=') + continue; + // check keyword + if (memcmp(parameter->name.str, ptr, len) == 0) + break; + } + + if (parameter < parameters_end) + { + found++; + /* + if we found first parameter, start search from next parameter + next time. + (this small optimisation should work, because they should be + written in same order) + */ + if (parameter == parameters+first_param) + first_param++; + + // get value + ptr+= (len+1); + switch (parameter->type) { + case FILE_OPTIONS_STRING: + { + if (!(ptr= parse_string(ptr, end, mem_root, + (LEX_STRING *)(base + + parameter->offset)))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + break; + } + case FILE_OPTIONS_ESTRING: + { + if (!(ptr= parse_escaped_string(ptr, end, mem_root, + (LEX_STRING *) + (base + parameter->offset)))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + break; + } + case FILE_OPTIONS_ULONGLONG: + case FILE_OPTIONS_REV: + if (!(eol= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + { + int not_used; + *((ulonglong*)(base + parameter->offset))= + my_strtoll10(ptr, 0, ¬_used); + } + ptr= eol+1; + break; + case FILE_OPTIONS_TIMESTAMP: + { + /* string have to be allocated already */ + LEX_STRING *val= (LEX_STRING *)(base + parameter->offset); + /* yyyy-mm-dd HH:MM:SS = 19(PARSE_FILE_TIMESTAMPLENGTH) characters */ + if (ptr[PARSE_FILE_TIMESTAMPLENGTH] != '\n') + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + memcpy(val->str, ptr, PARSE_FILE_TIMESTAMPLENGTH); + val->str[val->length= PARSE_FILE_TIMESTAMPLENGTH]= '\0'; + ptr+= (PARSE_FILE_TIMESTAMPLENGTH+1); + break; + } + case FILE_OPTIONS_STRLIST: + { + list= (List<LEX_STRING>*)(base + parameter->offset); + + list->empty(); + // list parsing + while (ptr < end) + { + if (!(str= (LEX_STRING*)alloc_root(mem_root, + sizeof(LEX_STRING))) || + list->push_back(str, mem_root)) + goto list_err; + if (!(ptr= parse_quoted_escaped_string(ptr, end, mem_root, str))) + goto list_err_w_message; + switch (*ptr) { + case '\n': + goto end_of_list; + case ' ': + // we cant go over buffer bounds, because we have \0 at the end + ptr++; + break; + default: + goto list_err_w_message; + } + } + +end_of_list: + if (*(ptr++) != '\n') + goto list_err; + break; + +list_err_w_message: + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); +list_err: + DBUG_RETURN(TRUE); + } + case FILE_OPTIONS_ULLLIST: + if (get_file_options_ulllist(ptr, end, line, base, + parameter, mem_root)) + DBUG_RETURN(TRUE); + break; + default: + DBUG_ASSERT(0); // never should happened + } + } + else + { + ptr= line; + if (hook->process_unknown_string(ptr, base, mem_root, end)) + { + DBUG_RETURN(TRUE); + } + // skip unknown parameter + if (!(ptr= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER, MYF(0), line); + DBUG_RETURN(TRUE); + } + ptr++; + } + } + } + DBUG_RETURN(FALSE); +} + + +/* + Dummy unknown key hook + + SYNOPSIS + File_parser_dummy_hook::process_unknown_string() + unknown_key [in/out] reference on the line with unknown + parameter and the parsing point + base [in] base address for parameter writing (structure like + TABLE) + mem_root [in] MEM_ROOT for parameters allocation + end [in] the end of the configuration + + NOTE + This hook used to catch no longer supported keys and process them for + backward compatibility, but it will not slow down processing of modern + format files. + This hook does nothing except debug output. + + RETURN + FALSE OK + TRUE Error +*/ + +bool +File_parser_dummy_hook::process_unknown_string(char *&unknown_key, + gptr base, MEM_ROOT *mem_root, + char *end) +{ + DBUG_ENTER("file_parser_dummy_hook::process_unknown_string"); + DBUG_PRINT("info", ("Unknown key: '%60s'", unknown_key)); + DBUG_RETURN(FALSE); +} diff --git a/sql/parse_file.h b/sql/parse_file.h new file mode 100644 index 00000000000..ab8b34561fe --- /dev/null +++ b/sql/parse_file.h @@ -0,0 +1,125 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _PARSE_FILE_H_ +#define _PARSE_FILE_H_ + +#define PARSE_FILE_TIMESTAMPLENGTH 19 + +enum file_opt_type { + FILE_OPTIONS_STRING, /* String (LEX_STRING) */ + FILE_OPTIONS_ESTRING, /* Escaped string (LEX_STRING) */ + FILE_OPTIONS_ULONGLONG, /* ulonglong parameter (ulonglong) */ + FILE_OPTIONS_REV, /* Revision version number (ulonglong) */ + FILE_OPTIONS_TIMESTAMP, /* timestamp (LEX_STRING have to be + allocated with length 20 (19+1) */ + FILE_OPTIONS_STRLIST, /* list of escaped strings + (List<LEX_STRING>) */ + FILE_OPTIONS_ULLLIST /* list of ulonglong values + (List<ulonglong>) */ +}; + +struct File_option +{ + LEX_STRING name; /* Name of the option */ + int offset; /* offset to base address of value */ + file_opt_type type; /* Option type */ +}; + + +/* + This hook used to catch no longer supported keys and process them for + backward compatibility. +*/ + +class Unknown_key_hook +{ +public: + Unknown_key_hook() {} /* Remove gcc warning */ + virtual ~Unknown_key_hook() {} /* Remove gcc warning */ + virtual bool process_unknown_string(char *&unknown_key, gptr base, + MEM_ROOT *mem_root, char *end)= 0; +}; + + +/* Dummy hook for parsers which do not need hook for unknown keys */ + +class File_parser_dummy_hook: public Unknown_key_hook +{ +public: + File_parser_dummy_hook() {} /* Remove gcc warning */ + virtual bool process_unknown_string(char *&unknown_key, gptr base, + MEM_ROOT *mem_root, char *end); +}; + +extern File_parser_dummy_hook file_parser_dummy_hook; + +bool get_file_options_ulllist(char *&ptr, char *end, char *line, + gptr base, File_option *parameter, + MEM_ROOT *mem_root); + +char * +parse_escaped_string(char *ptr, char *end, MEM_ROOT *mem_root, LEX_STRING *str); + +class File_parser; +File_parser *sql_parse_prepare(const LEX_STRING *file_name, + MEM_ROOT *mem_root, bool bad_format_errors); + +my_bool +sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name, + const LEX_STRING *type, + gptr base, File_option *parameters, uint versions); +my_bool rename_in_schema_file(const char *schema, const char *old_name, + const char *new_name, ulonglong revision, + uint num_view_backups); + +class File_parser: public Sql_alloc +{ + char *buff, *start, *end; + LEX_STRING file_type; + my_bool content_ok; +public: + File_parser() :buff(0), start(0), end(0), content_ok(0) + { file_type.str= 0; file_type.length= 0; } + + my_bool ok() { return content_ok; } + LEX_STRING *type() { return &file_type; } + my_bool parse(gptr base, MEM_ROOT *mem_root, + struct File_option *parameters, uint required, + Unknown_key_hook *hook); + + friend File_parser *sql_parse_prepare(const LEX_STRING *file_name, + MEM_ROOT *mem_root, + bool bad_format_errors); +}; + + +/* + Custom version of standard offsetof() macro which can be used to get + offsets of members in class for non-POD types (according to the current + version of C++ standard offsetof() macro can't be used in such cases and + attempt to do so causes warnings to be emitted, OTOH in many cases it is + still OK to assume that all instances of the class has the same offsets + for the same members). + + This is temporary solution which should be removed once File_parser class + and related routines are refactored. +*/ + +#define my_offsetof(TYPE, MEMBER) \ + ((size_t)((char *)&(((TYPE *)0x10)->MEMBER) - (char*)0x10)) + +#endif /* _PARSE_FILE_H_ */ diff --git a/sql/password.c b/sql/password.c index 94b9dc440be..57ed3e6ab0f 100644 --- a/sql/password.c +++ b/sql/password.c @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -65,7 +64,7 @@ #include <sha1.h> #include "mysql.h" -/************ MySQL 3.23-4.0 authentification routines: untouched ***********/ +/************ MySQL 3.23-4.0 authentication routines: untouched ***********/ /* New (MySQL 3.21+) random generation structure initialization @@ -146,7 +145,7 @@ void hash_password(ulong *result, const char *password, uint password_len) void make_scrambled_password_323(char *to, const char *password) { ulong hash_res[2]; - hash_password(hash_res, password, strlen(password)); + hash_password(hash_res, password, (uint) strlen(password)); sprintf(to, "%08lx%08lx", hash_res[0], hash_res[1]); } @@ -172,7 +171,7 @@ void scramble_323(char *to, const char *message, const char *password) { char extra, *to_start=to; const char *message_end= message + SCRAMBLE_LENGTH_323; - hash_password(hash_pass,password, strlen(password)); + hash_password(hash_pass,password, (uint) strlen(password)); hash_password(hash_message, message, SCRAMBLE_LENGTH_323); randominit(&rand_st,hash_pass[0] ^ hash_message[0], hash_pass[1] ^ hash_message[1]); @@ -281,7 +280,7 @@ void make_password_from_salt_323(char *to, const ulong *salt) /* - **************** MySQL 4.1.1 authentification routines ************* + **************** MySQL 4.1.1 authentication routines ************* */ /* @@ -316,18 +315,21 @@ void create_random_string(char *to, uint length, struct rand_struct *rand_st) octet2hex() buf OUT output buffer. Must be at least 2*len+1 bytes str, len IN the beginning and the length of the input string + + RETURN + buf+len*2 */ -static void -octet2hex(char *to, const uint8 *str, uint len) +char *octet2hex(char *to, const char *str, uint len) { - const uint8 *str_end= str + len; + const byte *str_end= str + len; for (; str != str_end; ++str) { - *to++= _dig_vec_upper[(*str & 0xF0) >> 4]; - *to++= _dig_vec_upper[*str & 0x0F]; + *to++= _dig_vec_upper[((uchar) *str) >> 4]; + *to++= _dig_vec_upper[((uchar) *str) & 0x0F]; } *to= '\0'; + return to; } @@ -394,7 +396,7 @@ make_scrambled_password(char *to, const char *password) mysql_sha1_reset(&sha1_context); /* stage 1: hash password */ - mysql_sha1_input(&sha1_context, (uint8 *) password, strlen(password)); + mysql_sha1_input(&sha1_context, (uint8 *) password, (uint) strlen(password)); mysql_sha1_result(&sha1_context, (uint8 *) to); /* stage 2: hash stage1 output */ mysql_sha1_reset(&sha1_context); @@ -403,7 +405,7 @@ make_scrambled_password(char *to, const char *password) mysql_sha1_result(&sha1_context, hash_stage2); /* convert hash_stage2 to hex string */ *to++= PVERSION41_CHAR; - octet2hex(to, hash_stage2, SHA1_HASH_SIZE); + octet2hex(to, (char*) hash_stage2, SHA1_HASH_SIZE); } @@ -433,7 +435,7 @@ scramble(char *to, const char *message, const char *password) mysql_sha1_reset(&sha1_context); /* stage 1: hash password */ - mysql_sha1_input(&sha1_context, (uint8 *) password, strlen(password)); + mysql_sha1_input(&sha1_context, (uint8 *) password, (uint) strlen(password)); mysql_sha1_result(&sha1_context, hash_stage1); /* stage 2: hash stage 1; note that hash_stage2 is stored in the database */ mysql_sha1_reset(&sha1_context); @@ -470,7 +472,7 @@ scramble(char *to, const char *message, const char *password) */ my_bool -check_scramble(const char *scramble, const char *message, +check_scramble(const char *scramble_arg, const char *message, const uint8 *hash_stage2) { SHA1_CONTEXT sha1_context; @@ -483,7 +485,7 @@ check_scramble(const char *scramble, const char *message, mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE); mysql_sha1_result(&sha1_context, buf); /* encrypt scramble */ - my_crypt((char *) buf, buf, (const uchar *) scramble, SCRAMBLE_LENGTH); + my_crypt((char *) buf, buf, (const uchar *) scramble_arg, SCRAMBLE_LENGTH); /* now buf supposedly contains hash_stage1: so we can get hash_stage2 */ mysql_sha1_reset(&sha1_context); mysql_sha1_input(&sha1_context, buf, SHA1_HASH_SIZE); @@ -493,7 +495,8 @@ check_scramble(const char *scramble, const char *message, /* - Convert scrambled password from asciiz hex string to binary form. + Convert scrambled password from asciiz hex string to binary form. + SYNOPSIS get_salt_from_password() res OUT buf to hold password. Must be at least SHA1_HASH_SIZE @@ -517,5 +520,5 @@ void get_salt_from_password(uint8 *hash_stage2, const char *password) void make_password_from_salt(char *to, const uint8 *hash_stage2) { *to++= PVERSION41_CHAR; - octet2hex(to, hash_stage2, SHA1_HASH_SIZE); + octet2hex(to, (char*) hash_stage2, SHA1_HASH_SIZE); } diff --git a/sql/procedure.cc b/sql/procedure.cc index a0042dd879e..bbfabc46608 100644 --- a/sql/procedure.cc +++ b/sql/procedure.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2002, 2004-2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -41,6 +40,34 @@ static struct st_procedure_def { { "analyse",proc_analyse_init } // Analyse a result }; + +my_decimal *Item_proc_string::val_decimal(my_decimal *decimal_value) +{ + if (null_value) + return 0; + string2my_decimal(E_DEC_FATAL_ERROR, &str_value, decimal_value); + return (decimal_value); +} + + +my_decimal *Item_proc_int::val_decimal(my_decimal *decimal_value) +{ + if (null_value) + return 0; + int2my_decimal(E_DEC_FATAL_ERROR, value, unsigned_flag, decimal_value); + return (decimal_value); +} + + +my_decimal *Item_proc_real::val_decimal(my_decimal *decimal_value) +{ + if (null_value) + return 0; + double2my_decimal(E_DEC_FATAL_ERROR, value, decimal_value); + return (decimal_value); +} + + /***************************************************************************** ** Setup handling of procedure ** Return 0 if everything is ok @@ -65,8 +92,7 @@ setup_procedure(THD *thd,ORDER *param,select_result *result, DBUG_RETURN(proc); } } - my_printf_error(ER_UNKNOWN_PROCEDURE,ER(ER_UNKNOWN_PROCEDURE),MYF(0), - (*param->item)->name); + my_error(ER_UNKNOWN_PROCEDURE, MYF(0), (*param->item)->name); *error=1; DBUG_RETURN(0); } diff --git a/sql/procedure.h b/sql/procedure.h index 0a1e9ddfa2f..850d5c74db4 100644 --- a/sql/procedure.h +++ b/sql/procedure.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -60,13 +59,18 @@ public: void set(longlong nr) { value=(double) nr; } void set(const char *str,uint length,CHARSET_INFO *cs) { - int err; + int err_not_used; char *end_not_used; - value= my_strntod(cs, (char*) str, length, &end_not_used, &err); + value= my_strntod(cs,(char*) str,length, &end_not_used, &err_not_used); } - double val() { return value; } + double val_real() { return value; } longlong val_int() { return (longlong) value; } - String *val_str(String *s) { s->set(value,decimals,default_charset()); return s; } + String *val_str(String *s) + { + s->set(value,decimals,default_charset()); + return s; + } + my_decimal *val_decimal(my_decimal *); unsigned int size_of() { return sizeof(*this);} }; @@ -82,9 +86,10 @@ public: void set(longlong nr) { value=nr; } void set(const char *str,uint length, CHARSET_INFO *cs) { int err; value=my_strntoll(cs,str,length,10,NULL,&err); } - double val() { return (double) value; } + double val_real() { return (double) value; } longlong val_int() { return value; } String *val_str(String *s) { s->set(value, default_charset()); return s; } + my_decimal *val_decimal(my_decimal *); unsigned int size_of() { return sizeof(*this);} }; @@ -95,18 +100,18 @@ public: Item_proc_string(const char *name_par,uint length) :Item_proc(name_par) { this->max_length=length; } enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } void set(double nr) { str_value.set(nr, 2, default_charset()); } void set(longlong nr) { str_value.set(nr, default_charset()); } void set(const char *str, uint length, CHARSET_INFO *cs) { str_value.copy(str,length,cs); } - double val() + double val_real() { - int err; - CHARSET_INFO *cs= str_value.charset(); + int err_not_used; char *end_not_used; + CHARSET_INFO *cs= str_value.charset(); return my_strntod(cs, (char*) str_value.ptr(), str_value.length(), - &end_not_used, &err); + &end_not_used, &err_not_used); } longlong val_int() { @@ -118,6 +123,7 @@ public: { return null_value ? (String*) 0 : (String*) &str_value; } + my_decimal *val_decimal(my_decimal *); unsigned int size_of() { return sizeof(*this);} }; diff --git a/sql/protocol.cc b/sql/protocol.cc index 7c7dfaf7bef..f9ba734a48d 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -24,9 +23,14 @@ #endif #include "mysql_priv.h" +#include "sp_rcontext.h" #include <stdarg.h> static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024; +void net_send_error_packet(THD *thd, uint sql_errno, const char *err); +#ifndef EMBEDDED_LIBRARY +static void write_eof_packet(THD *thd, NET *net); +#endif #ifndef EMBEDDED_LIBRARY bool Protocol::net_store_data(const char *from, uint length) @@ -50,24 +54,36 @@ bool Protocol_prep::net_store_data(const char *from, uint length) } - /* Send a error string to client */ +/* + Send a error string to client + + Design note: -void send_error(THD *thd, uint sql_errno, const char *err) + net_printf_error and net_send_error are low-level functions + that shall be used only when a new connection is being + established or at server startup. + For SIGNAL/RESIGNAL and GET DIAGNOSTICS functionality it's + critical that every error that can be intercepted is issued in one + place only, my_message_sql. +*/ +void net_send_error(THD *thd, uint sql_errno, const char *err) { -#ifndef EMBEDDED_LIBRARY - uint length; - char buff[MYSQL_ERRMSG_SIZE+2], *pos; -#endif - const char *orig_err= err; NET *net= &thd->net; - DBUG_ENTER("send_error"); + bool generate_warning= thd->killed != THD::KILL_CONNECTION; + DBUG_ENTER("net_send_error"); DBUG_PRINT("enter",("sql_errno: %d err: %s", sql_errno, err ? err : net->last_error[0] ? net->last_error : "NULL")); -#ifndef EMBEDDED_LIBRARY /* TODO query cache in embedded library*/ - query_cache_abort(net); -#endif + DBUG_ASSERT(!thd->spcont); + + if (net && net->no_send_error) + { + thd->clear_error(); + DBUG_PRINT("info", ("sending error messages prohibited")); + DBUG_VOID_RETURN; + } + thd->query_error= 1; // needed to catch query errors during replication if (!err) { @@ -76,97 +92,51 @@ void send_error(THD *thd, uint sql_errno, const char *err) else { if ((err=net->last_error)[0]) + { sql_errno=net->last_errno; + generate_warning= 0; // This warning has already been given + } else { sql_errno=ER_UNKNOWN_ERROR; err=ER(sql_errno); /* purecov: inspected */ } } - orig_err= err; } -#ifdef EMBEDDED_LIBRARY - net->last_errno= sql_errno; - strmake(net->last_error, err, sizeof(net->last_error)-1); - strmov(net->sqlstate, mysql_errno_to_sqlstate(sql_errno)); -#else - - if (net->vio == 0) + if (generate_warning) { - if (thd->bootstrap) - { - /* In bootstrap it's ok to print on stderr */ - fprintf(stderr,"ERROR: %d %s\n",sql_errno,err); - } - DBUG_VOID_RETURN; + /* Error that we have not got with my_error() */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, sql_errno, err); } - if (net->return_errno) - { // new client code; Add errno before message - int2store(buff,sql_errno); - pos= buff+2; - if (thd->client_capabilities & CLIENT_PROTOCOL_41) - { - /* The first # is to make the protocol backward compatible */ - buff[2]= '#'; - pos= strmov(buff+3, mysql_errno_to_sqlstate(sql_errno)); - } - length= (uint) (strmake(pos, err, MYSQL_ERRMSG_SIZE-1) - buff); - err=buff; - } - else - { - length=(uint) strlen(err); - set_if_smaller(length,MYSQL_ERRMSG_SIZE-1); - } - VOID(net_write_command(net,(uchar) 255, "", 0, (char*) err,length)); -#endif /* EMBEDDED_LIBRARY*/ - if (!thd->killed) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, sql_errno, - orig_err ? orig_err : ER(sql_errno)); + net_send_error_packet(thd, sql_errno, err); + thd->is_fatal_error=0; // Error message is given thd->net.report_error= 0; /* Abort multi-result sets */ - thd->lex->found_colon= 0; thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; DBUG_VOID_RETURN; } - -/* - Send a warning to the end user - - SYNOPSIS - send_warning() - thd Thread handler - sql_errno Warning number (error message) - err Error string. If not set, use ER(sql_errno) - - DESCRIPTION - Register the warning so that the user can get it with mysql_warnings() - Send an ok (+ warning count) to the end user. -*/ - -void send_warning(THD *thd, uint sql_errno, const char *err) -{ - DBUG_ENTER("send_warning"); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, sql_errno, - err ? err : ER(sql_errno)); - send_ok(thd); - DBUG_VOID_RETURN; -} - - /* Write error package and flush to client It's a little too low level, but I don't want to use another buffer for this + + Design note: + + net_printf_error and net_send_error are low-level functions + that shall be used only when a new connection is being + established or at server startup. + For SIGNAL/RESIGNAL and GET DIAGNOSTICS functionality it's + critical that every error that can be intercepted is issued in one + place only, my_message_sql. */ void -net_printf(THD *thd, uint errcode, ...) +net_printf_error(THD *thd, uint errcode, ...) { va_list args; uint length,offset; @@ -179,17 +149,26 @@ net_printf(THD *thd, uint errcode, ...) #endif NET *net= &thd->net; - DBUG_ENTER("net_printf"); + DBUG_ENTER("net_printf_error"); DBUG_PRINT("enter",("message: %u",errcode)); + DBUG_ASSERT(!thd->spcont); + + if (net && net->no_send_error) + { + thd->clear_error(); + DBUG_PRINT("info", ("sending error messages prohibited")); + DBUG_VOID_RETURN; + } + thd->query_error= 1; // needed to catch query errors during replication #ifndef EMBEDDED_LIBRARY query_cache_abort(net); // Safety #endif va_start(args,errcode); /* - The following is needed to make net_printf() work with 0 argument for - errorcode and use the argument after that as the format string. This + The following is needed to make net_printf_error() work with 0 argument + for errorcode and use the argument after that as the format string. This is useful for rare errors that are not worth the hassle to put in errmsg.sys, but at the same time, the message is not fixed text */ @@ -252,7 +231,7 @@ net_printf(THD *thd, uint errcode, ...) strmake(net->last_error, text_pos, length); strmake(net->sqlstate, mysql_errno_to_sqlstate(errcode), SQLSTATE_LENGTH); #endif - if (!thd->killed) + if (thd->killed != THD::KILL_CONNECTION) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, errcode, text_pos ? text_pos : ER(errcode)); thd->is_fatal_error=0; // Error message is given @@ -294,7 +273,12 @@ send_ok(THD *thd, ha_rows affected_rows, ulonglong id, const char *message) DBUG_ENTER("send_ok"); if (net->no_send_ok || !net->vio) // hack for re-parsing queries + { + DBUG_PRINT("info", ("no send ok: %s, vio present: %s", + (net->no_send_ok ? "YES" : "NO"), + (net->vio ? "YES" : "NO"))); DBUG_VOID_RETURN; + } buff[0]=0; // No fields pos=net_store_length(buff+1,affected_rows); @@ -326,6 +310,9 @@ send_ok(THD *thd, ha_rows affected_rows, ulonglong id, const char *message) VOID(net_flush(net)); /* We can't anymore send an error to the client */ thd->net.report_error= 0; + thd->net.no_send_error= 1; + DBUG_PRINT("info", ("OK sent, so no more error sending allowed")); + DBUG_VOID_RETURN; } @@ -346,7 +333,7 @@ static char eof_buff[1]= { (char) 254 }; /* Marker for end of fields */ 254 Marker (1 byte) warning_count Stored in 2 bytes; New in 4.1 protocol status_flag Stored in 2 bytes; - For flags like SERVER_STATUS_MORE_RESULTS + For flags like SERVER_MORE_RESULTS_EXISTS Note that the warning count will not be sent if 'no_flush' is set as we don't want to report the warning count until all data is sent to the @@ -354,39 +341,52 @@ static char eof_buff[1]= { (char) 254 }; /* Marker for end of fields */ */ void -send_eof(THD *thd, bool no_flush) +send_eof(THD *thd) { NET *net= &thd->net; DBUG_ENTER("send_eof"); - if (net->vio != 0) + if (net->vio != 0 && !net->no_send_eof) { - if (thd->client_capabilities & CLIENT_PROTOCOL_41) - { - uchar buff[5]; - uint tmp= min(thd->total_warn_count, 65535); - buff[0]=254; - int2store(buff+1, tmp); - /* - The following test should never be true, but it's better to do it - because if 'is_fatal_error' is set the server is not going to execute - other queries (see the if test in dispatch_command / COM_QUERY) - */ - if (thd->is_fatal_error) - thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; - int2store(buff+3, thd->server_status); - VOID(my_net_write(net,(char*) buff,5)); - VOID(net_flush(net)); - } - else - { - VOID(my_net_write(net,eof_buff,1)); - if (!no_flush) - VOID(net_flush(net)); - } + write_eof_packet(thd, net); + VOID(net_flush(net)); + thd->net.no_send_error= 1; + DBUG_PRINT("info", ("EOF sent, so no more error sending allowed")); } DBUG_VOID_RETURN; } + +/* + Format EOF packet according to the current protocol and + write it to the network output buffer. +*/ + +static void write_eof_packet(THD *thd, NET *net) +{ + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + uchar buff[5]; + /* + Don't send warn count during SP execution, as the warn_list + is cleared between substatements, and mysqltest gets confused + */ + uint tmp= (thd->spcont ? 0 : min(thd->total_warn_count, 65535)); + buff[0]= 254; + int2store(buff+1, tmp); + /* + The following test should never be true, but it's better to do it + because if 'is_fatal_error' is set the server is not going to execute + other queries (see the if test in dispatch_command / COM_QUERY) + */ + if (thd->is_fatal_error) + thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; + int2store(buff+3, thd->server_status); + VOID(my_net_write(net, (char*) buff, 5)); + } + else + VOID(my_net_write(net, eof_buff, 1)); +} + /* Please client to send scrambled_password in old format. SYNOPSYS @@ -404,6 +404,47 @@ bool send_old_password_request(THD *thd) return my_net_write(net, eof_buff, 1) || net_flush(net); } + +void net_send_error_packet(THD *thd, uint sql_errno, const char *err) +{ + NET *net= &thd->net; + uint length; + char buff[MYSQL_ERRMSG_SIZE+2], *pos; + + DBUG_ENTER("send_error_packet"); + + if (net->vio == 0) + { + if (thd->bootstrap) + { + /* In bootstrap it's ok to print on stderr */ + fprintf(stderr,"ERROR: %d %s\n",sql_errno,err); + } + DBUG_VOID_RETURN; + } + + if (net->return_errno) + { // new client code; Add errno before message + int2store(buff,sql_errno); + pos= buff+2; + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + /* The first # is to make the protocol backward compatible */ + buff[2]= '#'; + pos= strmov(buff+3, mysql_errno_to_sqlstate(sql_errno)); + } + length= (uint) (strmake(pos, err, MYSQL_ERRMSG_SIZE-1) - buff); + err=buff; + } + else + { + length=(uint) strlen(err); + set_if_smaller(length,MYSQL_ERRMSG_SIZE-1); + } + VOID(net_write_command(net,(uchar) 255, "", 0, (char*) err,length)); + DBUG_VOID_RETURN; +} + #endif /* EMBEDDED_LIBRARY */ /* @@ -472,7 +513,7 @@ void Protocol::init(THD *thd_arg) thd=thd_arg; packet= &thd->packet; convert= &thd->convert_buffer; -#ifndef DEBUG_OFF +#ifndef DBUG_OFF field_types= 0; #endif } @@ -497,6 +538,7 @@ bool Protocol::flush() flag Bit mask with the following functions: 1 send number of rows 2 send default values + 4 don't write eof packet DESCRIPTION Sum fields has table name empty and field_name. @@ -507,7 +549,7 @@ bool Protocol::flush() */ #ifndef EMBEDDED_LIBRARY -bool Protocol::send_fields(List<Item> *list, uint flag) +bool Protocol::send_fields(List<Item> *list, uint flags) { List_iterator_fast<Item> it(*list); Item *item; @@ -518,13 +560,13 @@ bool Protocol::send_fields(List<Item> *list, uint flag) CHARSET_INFO *thd_charset= thd->variables.character_set_results; DBUG_ENTER("send_fields"); - if (flag & 1) + if (flags & SEND_NUM_ROWS) { // Packet with number of elements char *pos=net_store_length(buff, list->elements); (void) my_net_write(&thd->net, buff,(uint) (pos-buff)); } -#ifndef DEBUG_OFF +#ifndef DBUG_OFF field_types= (enum_field_types*) thd->alloc(sizeof(field_types) * list->elements); uint count= 0; @@ -536,11 +578,16 @@ bool Protocol::send_fields(List<Item> *list, uint flag) CHARSET_INFO *cs= system_charset_info; Send_field field; item->make_field(&field); + + /* Keep things compatible for old clients */ + if (field.type == MYSQL_TYPE_VARCHAR) + field.type= MYSQL_TYPE_VAR_STRING; + prot.prepare_for_resend(); if (thd->client_capabilities & CLIENT_PROTOCOL_41) { - if (prot.store("def", 3, cs, thd_charset) || + if (prot.store(STRING_WITH_LEN("def"), cs, thd_charset) || prot.store(field.db_name, (uint) strlen(field.db_name), cs, thd_charset) || prot.store(field.table_name, (uint) strlen(field.table_name), @@ -626,34 +673,26 @@ bool Protocol::send_fields(List<Item> *list, uint flag) } } local_packet->length((uint) (pos - local_packet->ptr())); - if (flag & 2) + if (flags & SEND_DEFAULTS) item->send(&prot, &tmp); // Send default value if (prot.write()) break; /* purecov: inspected */ -#ifndef DEBUG_OFF +#ifndef DBUG_OFF field_types[count++]= field.type; #endif } - my_net_write(&thd->net, eof_buff, 1); + if (flags & SEND_EOF) + write_eof_packet(thd, &thd->net); DBUG_RETURN(prepare_for_send(list)); err: - send_error(thd,ER_OUT_OF_RESOURCES); /* purecov: inspected */ + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), + MYF(0)); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } -bool Protocol::send_records_num(List<Item> *list, ulonglong records) -{ - char *pos; - char buff[20]; - pos=net_store_length(buff, list->elements); - pos=net_store_length(pos, records); - return my_net_write(&thd->net, buff,(uint) (pos-buff)); -} - - bool Protocol::write() { DBUG_ENTER("Protocol::write"); @@ -722,14 +761,14 @@ bool Protocol::store(I_List<i_string>* str_list) void Protocol_simple::prepare_for_resend() { packet->length(0); -#ifndef DEBUG_OFF +#ifndef DBUG_OFF field_pos= 0; #endif } bool Protocol_simple::store_null() { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF field_pos++; #endif char buff[1]; @@ -763,9 +802,11 @@ bool Protocol::store_string_aux(const char *from, uint length, bool Protocol_simple::store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || + field_types[field_pos] == MYSQL_TYPE_BIT || + field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL || (field_types[field_pos] >= MYSQL_TYPE_ENUM && field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); field_pos++; @@ -778,9 +819,11 @@ bool Protocol_simple::store(const char *from, uint length, CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= this->thd->variables.character_set_results; -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || + field_types[field_pos] == MYSQL_TYPE_BIT || + field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL || (field_types[field_pos] >= MYSQL_TYPE_ENUM && field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); field_pos++; @@ -791,7 +834,7 @@ bool Protocol_simple::store(const char *from, uint length, bool Protocol_simple::store_tiny(longlong from) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_TINY); field_pos++; #endif @@ -803,8 +846,9 @@ bool Protocol_simple::store_tiny(longlong from) bool Protocol_simple::store_short(longlong from) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_YEAR || field_types[field_pos] == MYSQL_TYPE_SHORT); field_pos++; #endif @@ -816,7 +860,7 @@ bool Protocol_simple::store_short(longlong from) bool Protocol_simple::store_long(longlong from) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_INT24 || field_types[field_pos] == MYSQL_TYPE_LONG); @@ -830,7 +874,7 @@ bool Protocol_simple::store_long(longlong from) bool Protocol_simple::store_longlong(longlong from, bool unsigned_flag) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_LONGLONG); field_pos++; @@ -843,9 +887,23 @@ bool Protocol_simple::store_longlong(longlong from, bool unsigned_flag) } +bool Protocol_simple::store_decimal(const my_decimal *d) +{ +#ifndef DBUG_OFF + DBUG_ASSERT(field_types == 0 || + field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL); + field_pos++; +#endif + char buff[DECIMAL_MAX_STR_LENGTH]; + String str(buff, sizeof(buff), &my_charset_bin); + (void) my_decimal2string(E_DEC_FATAL_ERROR, d, 0, 0, 0, &str); + return net_store_data(str.ptr(), str.length()); +} + + bool Protocol_simple::store(float from, uint32 decimals, String *buffer) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_FLOAT); field_pos++; @@ -857,7 +915,7 @@ bool Protocol_simple::store(float from, uint32 decimals, String *buffer) bool Protocol_simple::store(double from, uint32 decimals, String *buffer) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DOUBLE); field_pos++; @@ -871,7 +929,7 @@ bool Protocol_simple::store(Field *field) { if (field->is_null()) return store_null(); -#ifndef DEBUG_OFF +#ifndef DBUG_OFF field_pos++; #endif char buff[MAX_FIELD_WIDTH]; @@ -892,7 +950,7 @@ bool Protocol_simple::store(Field *field) bool Protocol_simple::store(TIME *tm) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DATETIME || field_types[field_pos] == MYSQL_TYPE_TIMESTAMP); @@ -915,7 +973,7 @@ bool Protocol_simple::store(TIME *tm) bool Protocol_simple::store_date(TIME *tm) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DATE); field_pos++; @@ -934,7 +992,7 @@ bool Protocol_simple::store_date(TIME *tm) bool Protocol_simple::store_time(TIME *tm) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_TIME); field_pos++; @@ -994,12 +1052,6 @@ void Protocol_prep::prepare_for_resend() bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= thd->variables.character_set_results; -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DECIMAL || - (field_types[field_pos] >= MYSQL_TYPE_ENUM && - field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); -#endif field_pos++; return store_string_aux(from, length, fromcs, tocs); } @@ -1007,12 +1059,6 @@ bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) bool Protocol_prep::store(const char *from,uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DECIMAL || - (field_types[field_pos] >= MYSQL_TYPE_ENUM && - field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); -#endif field_pos++; return store_string_aux(from, length, fromcs, tocs); } @@ -1030,10 +1076,6 @@ bool Protocol_prep::store_null() bool Protocol_prep::store_tiny(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_TINY); -#endif char buff[1]; field_pos++; buff[0]= (uchar) from; @@ -1043,11 +1085,6 @@ bool Protocol_prep::store_tiny(longlong from) bool Protocol_prep::store_short(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_SHORT || - field_types[field_pos] == MYSQL_TYPE_YEAR); -#endif field_pos++; char *to= packet->prep_append(2, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1059,11 +1096,6 @@ bool Protocol_prep::store_short(longlong from) bool Protocol_prep::store_long(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_INT24 || - field_types[field_pos] == MYSQL_TYPE_LONG); -#endif field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1075,10 +1107,6 @@ bool Protocol_prep::store_long(longlong from) bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_LONGLONG); -#endif field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1087,13 +1115,21 @@ bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) return 0; } - -bool Protocol_prep::store(float from, uint32 decimals, String *buffer) +bool Protocol_prep::store_decimal(const my_decimal *d) { -#ifndef DEBUG_OFF +#ifndef DBUG_OFF DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_FLOAT); + field_types[field_pos] == MYSQL_TYPE_NEWDECIMAL); + field_pos++; #endif + char buff[DECIMAL_MAX_STR_LENGTH]; + String str(buff, sizeof(buff), &my_charset_bin); + (void) my_decimal2string(E_DEC_FATAL_ERROR, d, 0, 0, 0, &str); + return store(str.ptr(), str.length(), str.charset()); +} + +bool Protocol_prep::store(float from, uint32 decimals, String *buffer) +{ field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1105,10 +1141,6 @@ bool Protocol_prep::store(float from, uint32 decimals, String *buffer) bool Protocol_prep::store(double from, uint32 decimals, String *buffer) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DOUBLE); -#endif field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1132,12 +1164,6 @@ bool Protocol_prep::store(Field *field) bool Protocol_prep::store(TIME *tm) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DATETIME || - field_types[field_pos] == MYSQL_TYPE_DATE || - field_types[field_pos] == MYSQL_TYPE_TIMESTAMP); -#endif char buff[12],*pos; uint length; field_pos++; @@ -1172,10 +1198,6 @@ bool Protocol_prep::store_date(TIME *tm) bool Protocol_prep::store_time(TIME *tm) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_TIME); -#endif char buff[13], *pos; uint length; field_pos++; @@ -1202,12 +1224,3 @@ bool Protocol_prep::store_time(TIME *tm) buff[0]=(char) length; // Length is stored first return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC); } - -#ifdef EMBEDDED_LIBRARY -/* Should be removed when we define the Protocol_cursor's future */ -bool Protocol_cursor::write() -{ - return Protocol_simple::write(); -} -#endif - diff --git a/sql/protocol.h b/sql/protocol.h index ce3adb41df5..0e00a7c21e0 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2002-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -31,7 +30,7 @@ protected: String *packet; String *convert; uint field_pos; -#ifndef DEBUG_OFF +#ifndef DBUG_OFF enum enum_field_types *field_types; #endif uint field_count; @@ -50,17 +49,15 @@ public: Protocol(THD *thd_arg) { init(thd_arg); } virtual ~Protocol() {} void init(THD* thd_arg); - bool send_fields(List<Item> *list, uint flag); - bool send_records_num(List<Item> *list, ulonglong records); + + enum { SEND_NUM_ROWS= 1, SEND_DEFAULTS= 2, SEND_EOF= 4 }; + virtual bool send_fields(List<Item> *list, uint flags); + bool store(I_List<i_string> *str_list); bool store(const char *from, CHARSET_INFO *cs); String *storage_packet() { return packet; } inline void free() { packet->free(); } -#ifndef EMBEDDED_LIBRARY - bool write(); -#else virtual bool write(); -#endif inline bool store(uint32 from) { return store_long((longlong) from); } inline bool store(longlong from) @@ -83,6 +80,7 @@ public: virtual bool store_short(longlong from)=0; virtual bool store_long(longlong from)=0; virtual bool store_longlong(longlong from, bool unsigned_flag)=0; + virtual bool store_decimal(const my_decimal *)=0; virtual bool store(const char *from, uint length, CHARSET_INFO *cs)=0; virtual bool store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs)=0; @@ -92,6 +90,12 @@ public: virtual bool store_date(TIME *time)=0; virtual bool store_time(TIME *time)=0; virtual bool store(Field *field)=0; +#ifdef EMBEDDED_LIBRARY + int begin_dataset(); + virtual void remove_last_row() {} +#else + void remove_last_row() {} +#endif }; @@ -108,6 +112,7 @@ public: virtual bool store_short(longlong from); virtual bool store_long(longlong from); virtual bool store_longlong(longlong from, bool unsigned_flag); + virtual bool store_decimal(const my_decimal *); virtual bool store(const char *from, uint length, CHARSET_INFO *cs); virtual bool store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs); @@ -117,6 +122,9 @@ public: virtual bool store(float nr, uint32 decimals, String *buffer); virtual bool store(double from, uint32 decimals, String *buffer); virtual bool store(Field *field); +#ifdef EMBEDDED_LIBRARY + void remove_last_row(); +#endif }; @@ -138,6 +146,7 @@ public: virtual bool store_short(longlong from); virtual bool store_long(longlong from); virtual bool store_longlong(longlong from, bool unsigned_flag); + virtual bool store_decimal(const my_decimal *); virtual bool store(const char *from,uint length, CHARSET_INFO *cs); virtual bool store(const char *from, uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs); @@ -149,33 +158,12 @@ public: virtual bool store(Field *field); }; -class Protocol_cursor :public Protocol_simple -{ -public: - MEM_ROOT *alloc; - MYSQL_FIELD *fields; - MYSQL_ROWS *data; - MYSQL_ROWS **prev_record; - ulong row_count; - - Protocol_cursor() {} - Protocol_cursor(THD *thd_arg, MEM_ROOT *ini_alloc) :Protocol_simple(thd_arg), alloc(ini_alloc) {} - bool prepare_for_send(List<Item> *item_list) - { - fields= NULL; - data= NULL; - prev_record= &data; - return Protocol_simple::prepare_for_send(item_list); - } - bool send_fields(List<Item> *list, uint flag); - bool write(); -}; - void send_warning(THD *thd, uint sql_errno, const char *err=0); -void net_printf(THD *thd,uint sql_errno, ...); +void net_printf_error(THD *thd, uint sql_errno, ...); +void net_send_error(THD *thd, uint sql_errno=0, const char *err=0); void send_ok(THD *thd, ha_rows affected_rows=0L, ulonglong id=0L, const char *info=0); -void send_eof(THD *thd, bool no_flush=0); +void send_eof(THD *thd); bool send_old_password_request(THD *thd); char *net_store_data(char *to,const char *from, uint length); char *net_store_data(char *to,int32 from); diff --git a/sql/protocol_cursor.cc b/sql/protocol_cursor.cc deleted file mode 100644 index b225e06ed32..00000000000 --- a/sql/protocol_cursor.cc +++ /dev/null @@ -1,143 +0,0 @@ -/* Copyright (C) 2000-2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Low level functions for storing data to be send to the MySQL client - The actual communction is handled by the net_xxx functions in net_serv.cc -*/ - -#ifdef USE_PRAGMA_IMPLEMENTATION -#pragma implementation // gcc: Class implementation -#endif - -#include "mysql_priv.h" -#include <mysql.h> - -bool Protocol_cursor::send_fields(List<Item> *list, uint flag) -{ - List_iterator_fast<Item> it(*list); - Item *item; - MYSQL_FIELD *client_field; - - DBUG_ENTER("send_fields"); - if (prepare_for_send(list)) - return FALSE; - - fields= (MYSQL_FIELD *)alloc_root(alloc, sizeof(MYSQL_FIELD) * field_count); - if (!fields) - goto err; - - client_field= fields; - while ((item= it++)) - { - Send_field server_field; - item->make_field(&server_field); - - client_field->db= strdup_root(alloc, server_field.db_name); - client_field->table= strdup_root(alloc, server_field.table_name); - client_field->name= strdup_root(alloc, server_field.col_name); - client_field->org_table= strdup_root(alloc, server_field.org_table_name); - client_field->org_name= strdup_root(alloc, server_field.org_col_name); - client_field->length= server_field.length; - client_field->type= server_field.type; - client_field->flags= server_field.flags; - client_field->decimals= server_field.decimals; - client_field->db_length= strlen(client_field->db); - client_field->table_length= strlen(client_field->table); - client_field->name_length= strlen(client_field->name); - client_field->org_name_length= strlen(client_field->org_name); - client_field->org_table_length= strlen(client_field->org_table); - client_field->charsetnr= server_field.charsetnr; - - if (INTERNAL_NUM_FIELD(client_field)) - client_field->flags|= NUM_FLAG; - - if (flag & 2) - { - char buff[80]; - String tmp(buff, sizeof(buff), default_charset_info), *res; - - if (!(res=item->val_str(&tmp))) - client_field->def= (char*) ""; - else - client_field->def= strmake_root(alloc, res->ptr(), res->length()); - } - else - client_field->def=0; - client_field->max_length= 0; - ++client_field; - } - - DBUG_RETURN(FALSE); - err: - send_error(thd, ER_OUT_OF_RESOURCES); /* purecov: inspected */ - DBUG_RETURN(TRUE); /* purecov: inspected */ -} - -/* Get the length of next field. Change parameter to point at fieldstart */ -bool Protocol_cursor::write() -{ - byte *cp= (byte *)packet->ptr(); - byte *end_pos= (byte *)packet->ptr() + packet->length(); - ulong len; - MYSQL_FIELD *cur_field= fields; - MYSQL_FIELD *fields_end= fields + field_count; - MYSQL_ROWS *new_record; - byte **data_tmp; - byte *to; - - new_record= (MYSQL_ROWS *)alloc_root(alloc, - sizeof(MYSQL_ROWS) + (field_count + 1)*sizeof(char *) + packet->length()); - if (!new_record) - goto err; - data_tmp= (byte **)(new_record + 1); - new_record->data= (char **)data_tmp; - - to= (byte *)(fields + field_count + 1); - - for (; cur_field < fields_end; ++cur_field, ++data_tmp) - { - if ((len=net_field_length((uchar **)&cp))) - { - *data_tmp= 0; - } - else - { - if ((long)len > (end_pos - cp)) - { -// TODO error signal send_error(thd, CR_MALFORMED_PACKET); - return TRUE; - } - memcpy(to,(char*) cp,len); - to[len]=0; - to+=len+1; - cp+=len; - if (cur_field->max_length < len) - cur_field->max_length=len; - } - } - - *prev_record= new_record; - prev_record= &new_record->next; - new_record->next= NULL; - row_count++; - return FALSE; - err: -// TODO error signal send_error(thd, ER_OUT_OF_RESOURCES); - return TRUE; -} - - diff --git a/sql/records.cc b/sql/records.cc index 7e4a808f0c3..3a833c87b7b 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -20,13 +19,13 @@ #include "mysql_priv.h" static int rr_quick(READ_RECORD *info); -static int rr_sequential(READ_RECORD *info); +int rr_sequential(READ_RECORD *info); static int rr_from_tempfile(READ_RECORD *info); static int rr_unpack_from_tempfile(READ_RECORD *info); static int rr_unpack_from_buffer(READ_RECORD *info); static int rr_from_pointers(READ_RECORD *info); static int rr_from_cache(READ_RECORD *info); -static int init_rr_cache(READ_RECORD *info); +static int init_rr_cache(THD *thd, READ_RECORD *info); static int rr_cmp(uchar *a,uchar *b); static int rr_index_first(READ_RECORD *info); static int rr_index(READ_RECORD *info); @@ -128,14 +127,15 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, !(table->file->table_flags() & HA_FAST_KEY_READ) && (table->db_stat & HA_READ_ONLY || table->reginfo.lock_type <= TL_READ_NO_INSERT) && - (ulonglong) table->reclength*(table->file->records+ - table->file->deleted) > + (ulonglong) table->s->reclength* (table->file->records+ + table->file->deleted) > (ulonglong) MIN_FILE_LENGTH_TO_USE_ROW_CACHE && - info->io_cache->end_of_file/info->ref_length*table->reclength > + info->io_cache->end_of_file/info->ref_length * table->s->reclength > (my_off_t) MIN_ROWS_TO_USE_TABLE_CACHE && - !table->blob_fields) + !table->s->blob_fields && + info->ref_length <= MAX_REFLENGTH) { - if (! init_rr_cache(info)) + if (! init_rr_cache(thd, info)) { DBUG_PRINT("info",("using rr_from_cache")); info->read_record=rr_from_cache; @@ -145,9 +145,6 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, else if (select && select->quick) { DBUG_PRINT("info",("using rr_quick")); - - if (!table->file->inited) - table->file->ha_index_init(select->quick->index); info->read_record=rr_quick; } else if (table->sort.record_pointers) @@ -169,16 +166,24 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, if (!table->no_cache && (use_record_cache > 0 || (int) table->reginfo.lock_type <= (int) TL_READ_HIGH_PRIORITY || - !(table->db_options_in_use & HA_OPTION_PACK_RECORD) || + !(table->s->db_options_in_use & HA_OPTION_PACK_RECORD) || (use_record_cache < 0 && !(table->file->table_flags() & HA_NOT_DELETE_WITH_CACHE)))) VOID(table->file->extra_opt(HA_EXTRA_CACHE, thd->variables.read_buff_size)); } + /* Condition pushdown to storage engine */ + if (thd->variables.engine_condition_pushdown && + select && select->cond && + (select->cond->used_tables() & table->map) && + !table->file->pushed_cond) + table->file->cond_push(select->cond); + DBUG_VOID_RETURN; } /* init_read_record */ + void end_read_record(READ_RECORD *info) { /* free cache if used */ if (info->cache) @@ -188,7 +193,7 @@ void end_read_record(READ_RECORD *info) } if (info->table) { - filesort_free_buffers(info->table); + filesort_free_buffers(info->table,0); (void) info->file->extra(HA_EXTRA_NO_CACHE); if (info->read_record != rr_quick) // otherwise quick_range does it (void) info->file->ha_index_or_rnd_end(); @@ -284,14 +289,14 @@ static int rr_index(READ_RECORD *info) } -static int rr_sequential(READ_RECORD *info) +int rr_sequential(READ_RECORD *info) { int tmp; while ((tmp=info->file->rnd_next(info->record))) { if (info->thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + info->thd->send_kill_message(); return 1; } /* @@ -411,23 +416,21 @@ static int rr_unpack_from_buffer(READ_RECORD *info) } /* cacheing of records from a database */ -static int init_rr_cache(READ_RECORD *info) +static int init_rr_cache(THD *thd, READ_RECORD *info) { uint rec_cache_size; - THD *thd= current_thd; - DBUG_ENTER("init_rr_cache"); - info->struct_length=3+MAX_REFLENGTH; - info->reclength=ALIGN_SIZE(info->table->reclength+1); + info->struct_length= 3+MAX_REFLENGTH; + info->reclength= ALIGN_SIZE(info->table->s->reclength+1); if (info->reclength < info->struct_length) - info->reclength=ALIGN_SIZE(info->struct_length); + info->reclength= ALIGN_SIZE(info->struct_length); - info->error_offset=info->table->reclength; - info->cache_records= thd->variables.read_rnd_buff_size / - (info->reclength+info->struct_length); - rec_cache_size=info->cache_records*info->reclength; - info->rec_cache_size=info->cache_records*info->ref_length; + info->error_offset= info->table->s->reclength; + info->cache_records= (thd->variables.read_rnd_buff_size / + (info->reclength+info->struct_length)); + rec_cache_size= info->cache_records*info->reclength; + info->rec_cache_size= info->cache_records*info->ref_length; // We have to allocate one more byte to use uint3korr (see comments for it) if (info->cache_records <= 2 || @@ -436,7 +439,8 @@ static int init_rr_cache(READ_RECORD *info) MYF(0)))) DBUG_RETURN(1); #ifdef HAVE_purify - bzero(info->cache,rec_cache_size); // Avoid warnings in qsort + // Avoid warnings in qsort + bzero(info->cache,rec_cache_size+info->cache_records* info->struct_length+1); #endif DBUG_PRINT("info",("Allocated buffert for %d records",info->cache_records)); info->read_positions=info->cache+rec_cache_size; @@ -467,7 +471,8 @@ static int rr_from_cache(READ_RECORD *info) else { error=0; - memcpy(info->record,info->cache_pos,(size_t) info->table->reclength); + memcpy(info->record,info->cache_pos, + (size_t) info->table->s->reclength); } info->cache_pos+=info->reclength; return ((int) error); diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 4c8703226a6..1dc16b6e566 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB & Sasha +/* Copyright (C) 2001-2006 MySQL AB & Sasha This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -67,12 +66,10 @@ static int init_failsafe_rpl_thread(THD* thd) this thread has no other error reporting method). */ thd->system_thread = thd->bootstrap = 1; - thd->host_or_ip= ""; + thd->security_ctx->skip_grants(); my_net_init(&thd->net, 0); thd->net.read_timeout = slave_net_timeout; thd->max_client_packet_length=thd->net.max_packet; - thd->master_access= ~(ulong)0; - thd->priv_user = 0; pthread_mutex_lock(&LOCK_thread_count); thd->thread_id = thread_id++; pthread_mutex_unlock(&LOCK_thread_count); @@ -162,7 +159,7 @@ int register_slave(THD* thd, uchar* packet, uint packet_length) SLAVE_INFO *si; uchar *p= packet, *p_end= packet + packet_length; - if (check_access(thd, REPL_SLAVE_ACL, any_db,0,0,0)) + if (check_access(thd, REPL_SLAVE_ACL, any_db,0,0,0,0)) return 1; if (!(si = (SLAVE_INFO*)my_malloc(sizeof(SLAVE_INFO), MYF(MY_WME)))) goto err2; @@ -193,7 +190,6 @@ err: my_message(ER_UNKNOWN_ERROR, "Wrong parameters to function register_slave", MYF(0)); err2: - send_error(thd); return 1; } @@ -437,7 +433,7 @@ static Slave_log_event* find_slave_event(IO_CACHE* log, This function is broken now. See comment for translate_master(). */ -int show_new_master(THD* thd) +bool show_new_master(THD* thd) { Protocol *protocol= thd->protocol; DBUG_ENTER("show_new_master"); @@ -450,23 +446,24 @@ int show_new_master(THD* thd) { if (errmsg[0]) my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), - "SHOW NEW MASTER", errmsg); - DBUG_RETURN(-1); + "SHOW NEW MASTER", errmsg); + DBUG_RETURN(TRUE); } else { field_list.push_back(new Item_empty_string("Log_name", 20)); field_list.push_back(new Item_return_int("Log_pos", 10, MYSQL_TYPE_LONGLONG)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); protocol->store(lex_mi->log_file_name, &my_charset_bin); protocol->store((ulonglong) lex_mi->pos); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } } @@ -505,7 +502,7 @@ int update_slave_list(MYSQL* mysql, MASTER_INFO* mi) int port_ind; DBUG_ENTER("update_slave_list"); - if (mysql_real_query(mysql,"SHOW SLAVE HOSTS",16) || + if (mysql_real_query(mysql, STRING_WITH_LEN("SHOW SLAVE HOSTS")) || !(res = mysql_store_result(mysql))) { error= mysql_error(mysql); @@ -531,11 +528,11 @@ HOSTS"; while ((row= mysql_fetch_row(res))) { - uint32 server_id; + uint32 log_server_id; SLAVE_INFO* si, *old_si; - server_id = atoi(row[0]); + log_server_id = atoi(row[0]); if ((old_si= (SLAVE_INFO*)hash_search(&slave_list, - (byte*)&server_id,4))) + (byte*)&log_server_id,4))) si = old_si; else { @@ -545,7 +542,7 @@ HOSTS"; pthread_mutex_unlock(&LOCK_slave_list); goto err; } - si->server_id = server_id; + si->server_id = log_server_id; my_hash_insert(&slave_list, (byte*)si); } strmake(si->host, row[1], sizeof(si->host)-1); @@ -582,7 +579,7 @@ int find_recovery_captain(THD* thd, MYSQL* mysql) #endif #if NOT_USED -pthread_handler_decl(handle_failsafe_rpl,arg) +pthread_handler_t handle_failsafe_rpl(void *arg) { DBUG_ENTER("handle_failsafe_rpl"); THD *thd = new THD; @@ -631,7 +628,7 @@ err: } #endif -int show_slave_hosts(THD* thd) +bool show_slave_hosts(THD* thd) { List<Item> field_list; Protocol *protocol= thd->protocol; @@ -651,8 +648,9 @@ int show_slave_hosts(THD* thd) field_list.push_back(new Item_return_int("Master_id", 10, MYSQL_TYPE_LONG)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); pthread_mutex_lock(&LOCK_slave_list); @@ -673,12 +671,12 @@ int show_slave_hosts(THD* thd) if (protocol->write()) { pthread_mutex_unlock(&LOCK_slave_list); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } pthread_mutex_unlock(&LOCK_slave_list); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -709,6 +707,7 @@ int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi) if (!mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0, mi->port, 0, 0)) DBUG_RETURN(1); + mysql->reconnect= 1; DBUG_RETURN(0); } @@ -739,7 +738,7 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db, { bzero((char*) &table, sizeof(table)); //just for safe table.db= (char*) db; - table.real_name= (char*) table_name; + table.table_name= (char*) table_name; table.updating= 1; if (!tables_ok(thd, &table)) @@ -759,7 +758,7 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db, - No active transaction (flush_relay_log_info would not work in this case) */ -int load_master_data(THD* thd) +bool load_master_data(THD* thd) { MYSQL mysql; MYSQL_RES* master_status_res = 0; @@ -781,16 +780,15 @@ int load_master_data(THD* thd) (error=terminate_slave_threads(active_mi,restart_thread_mask, 1 /*skip lock*/))) { - send_error(thd,error); + my_message(error, ER(error), MYF(0)); unlock_slave_threads(active_mi); pthread_mutex_unlock(&LOCK_active_mi); - return 1; + return TRUE; } if (connect_to_master(thd, &mysql, active_mi)) { - net_printf(thd, error= ER_CONNECT_TO_MASTER, - mysql_error(&mysql)); + my_error(error= ER_CONNECT_TO_MASTER, MYF(0), mysql_error(&mysql)); goto err; } @@ -799,11 +797,10 @@ int load_master_data(THD* thd) MYSQL_RES *db_res, **table_res, **table_res_end, **cur_table_res; uint num_dbs; - if (mysql_real_query(&mysql, "SHOW DATABASES", 14) || + if (mysql_real_query(&mysql, STRING_WITH_LEN("SHOW DATABASES")) || !(db_res = mysql_store_result(&mysql))) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); goto err; } @@ -816,7 +813,7 @@ int load_master_data(THD* thd) if (!(table_res = (MYSQL_RES**)thd->alloc(num_dbs * sizeof(MYSQL_RES*)))) { - net_printf(thd, error = ER_OUTOFMEMORY); + my_message(error = ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(0)); goto err; } @@ -826,12 +823,12 @@ int load_master_data(THD* thd) we wait to issue FLUSH TABLES WITH READ LOCK for as long as we can to minimize the lock time. */ - if (mysql_real_query(&mysql, "FLUSH TABLES WITH READ LOCK", 27) || - mysql_real_query(&mysql, "SHOW MASTER STATUS",18) || + if (mysql_real_query(&mysql, + STRING_WITH_LEN("FLUSH TABLES WITH READ LOCK")) || + mysql_real_query(&mysql, STRING_WITH_LEN("SHOW MASTER STATUS")) || !(master_status_res = mysql_store_result(&mysql))) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); goto err; } @@ -865,7 +862,8 @@ int load_master_data(THD* thd) if (!db_ok(db, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(db) || - !strcmp(db,"mysql")) + !strcmp(db,"mysql") || + is_schema_db(db)) { *cur_table_res = 0; continue; @@ -876,17 +874,15 @@ int load_master_data(THD* thd) if (mysql_create_db(thd, db, &create_info, 1)) { - send_error(thd, 0, 0); cleanup_mysql_results(db_res, cur_table_res - 1, table_res); goto err; } if (mysql_select_db(&mysql, db) || - mysql_real_query(&mysql, "SHOW TABLES", 11) || + mysql_real_query(&mysql, STRING_WITH_LEN("SHOW TABLES")) || !(*cur_table_res = mysql_store_result(&mysql))) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); cleanup_mysql_results(db_res, cur_table_res - 1, table_res); goto err; } @@ -920,14 +916,14 @@ int load_master_data(THD* thd) setting active_mi, because init_master_info() sets active_mi with defaults. */ - int error; + int error_2; if (init_master_info(active_mi, master_info_file, relay_log_info_file, 0, (SLAVE_IO | SLAVE_SQL))) - send_error(thd, ER_MASTER_INFO); + my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0)); strmake(active_mi->master_log_name, row[0], sizeof(active_mi->master_log_name)); - active_mi->master_log_pos= my_strtoll10(row[1], (char**) 0, &error); + active_mi->master_log_pos= my_strtoll10(row[1], (char**) 0, &error_2); /* at least in recent versions, the condition below should be false */ if (active_mi->master_log_pos < BIN_LOG_HEADER_SIZE) active_mi->master_log_pos = BIN_LOG_HEADER_SIZE; @@ -936,15 +932,15 @@ int load_master_data(THD* thd) host was specified; there could have been a problem when replication started, which led to relay log's IO_CACHE to not be inited. */ - flush_master_info(active_mi, 0); + if (flush_master_info(active_mi, 0)) + sql_print_error("Failed to flush master info file"); } mysql_free_result(master_status_res); } - if (mysql_real_query(&mysql, "UNLOCK TABLES", 13)) + if (mysql_real_query(&mysql, STRING_WITH_LEN("UNLOCK TABLES"))) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); goto err; } } @@ -953,10 +949,10 @@ int load_master_data(THD* thd) 0 /* not only reset, but also reinit */, &errmsg)) { - send_error(thd, 0, "Failed purging old relay logs"); + my_error(ER_RELAY_LOG_FAIL, MYF(0), errmsg); unlock_slave_threads(active_mi); pthread_mutex_unlock(&LOCK_active_mi); - return 1; + return TRUE; } pthread_mutex_lock(&active_mi->rli.data_lock); active_mi->rli.group_master_log_pos = active_mi->master_log_pos; diff --git a/sql/repl_failsafe.h b/sql/repl_failsafe.h index ad0219bb735..561db00d841 100644 --- a/sql/repl_failsafe.h +++ b/sql/repl_failsafe.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB & Sasha +/* Copyright (C) 2001-2005 MySQL AB & Sasha This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -31,18 +30,18 @@ extern pthread_cond_t COND_rpl_status; extern TYPELIB rpl_role_typelib, rpl_status_typelib; extern const char* rpl_role_type[], *rpl_status_type[]; -pthread_handler_decl(handle_failsafe_rpl,arg); +pthread_handler_t handle_failsafe_rpl(void *arg); void change_rpl_status(RPL_STATUS from_status, RPL_STATUS to_status); int find_recovery_captain(THD* thd, MYSQL* mysql); int update_slave_list(MYSQL* mysql, MASTER_INFO* mi); extern HASH slave_list; -int load_master_data(THD* thd); +bool load_master_data(THD* thd); int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi); -int show_new_master(THD* thd); -int show_slave_hosts(THD* thd); +bool show_new_master(THD* thd); +bool show_slave_hosts(THD* thd); int translate_master(THD* thd, LEX_MASTER_INFO* mi, char* errmsg); void init_slave_list(); void end_slave_list(); diff --git a/sql/set_var.cc b/sql/set_var.cc index 57bb93ef4b1..46c2a775d8a 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -28,7 +27,7 @@ - If the variable is thread specific, add it to 'system_variables' struct. If not, add it to mysqld.cc and an declaration in 'mysql_priv.h' - If the variable should be changed from the command line, add a definition - of it in the my_option structure list in mysqld.dcc + of it in the my_option structure list in mysqld.cc - Don't forget to initialize new fields in global_system_variables and max_system_variables! - If the variable should show up in 'show variables' add it to the @@ -83,9 +82,6 @@ TYPELIB delay_key_write_typelib= delay_key_write_type_names, NULL }; -static int sys_check_charset(THD *thd, set_var *var); -static bool sys_update_charset(THD *thd, set_var *var); -static void sys_set_default_charset(THD *thd, enum_var_type type); static int sys_check_ftb_syntax(THD *thd, set_var *var); static bool sys_update_ftb_syntax(THD *thd, set_var * var); static void sys_default_ftb_syntax(THD *thd, enum_var_type type); @@ -98,15 +94,17 @@ static bool set_option_autocommit(THD *thd, set_var *var); static int check_log_update(THD *thd, set_var *var); static bool set_log_update(THD *thd, set_var *var); static int check_pseudo_thread_id(THD *thd, set_var *var); +static bool set_log_bin(THD *thd, set_var *var); static void fix_low_priority_updates(THD *thd, enum_var_type type); static void fix_tx_isolation(THD *thd, enum_var_type type); +static int check_completion_type(THD *thd, set_var *var); +static void fix_completion_type(THD *thd, enum_var_type type); static void fix_net_read_timeout(THD *thd, enum_var_type type); static void fix_net_write_timeout(THD *thd, enum_var_type type); static void fix_net_retry_count(THD *thd, enum_var_type type); static void fix_max_join_size(THD *thd, enum_var_type type); static void fix_query_cache_size(THD *thd, enum_var_type type); static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type); -static void fix_myisam_max_extra_sort_file_size(THD *thd, enum_var_type type); static void fix_myisam_max_sort_file_size(THD *thd, enum_var_type type); static void fix_max_binlog_size(THD *thd, enum_var_type type); static void fix_max_relay_log_size(THD *thd, enum_var_type type); @@ -120,6 +118,7 @@ void fix_sql_mode_var(THD *thd, enum_var_type type); static byte *get_error_count(THD *thd); static byte *get_warning_count(THD *thd); static byte *get_have_innodb(THD *thd); +static byte *get_tmpdir(THD *thd); /* Variable definition list @@ -128,27 +127,39 @@ static byte *get_have_innodb(THD *thd); alphabetic order */ +sys_var_thd_ulong sys_auto_increment_increment("auto_increment_increment", + &SV::auto_increment_increment); +sys_var_thd_ulong sys_auto_increment_offset("auto_increment_offset", + &SV::auto_increment_offset); + +sys_var_bool_ptr sys_automatic_sp_privileges("automatic_sp_privileges", + &sp_automatic_privileges); + +sys_var_const_str sys_basedir("basedir", mysql_home); sys_var_long_ptr sys_binlog_cache_size("binlog_cache_size", &binlog_cache_size); sys_var_thd_ulong sys_bulk_insert_buff_size("bulk_insert_buffer_size", &SV::bulk_insert_buff_size); sys_var_character_set_server sys_character_set_server("character_set_server"); -sys_var_str sys_charset_system("character_set_system", - sys_check_charset, - sys_update_charset, - sys_set_default_charset, - (char *)my_charset_utf8_general_ci.name); +sys_var_const_str sys_charset_system("character_set_system", + (char *)my_charset_utf8_general_ci.name); sys_var_character_set_database sys_character_set_database("character_set_database"); sys_var_character_set_client sys_character_set_client("character_set_client"); sys_var_character_set_connection sys_character_set_connection("character_set_connection"); sys_var_character_set_results sys_character_set_results("character_set_results"); +sys_var_character_set_filesystem sys_character_set_filesystem("character_set_filesystem"); +sys_var_thd_ulong sys_completion_type("completion_type", + &SV::completion_type, + check_completion_type, + fix_completion_type); sys_var_collation_connection sys_collation_connection("collation_connection"); sys_var_collation_database sys_collation_database("collation_database"); sys_var_collation_server sys_collation_server("collation_server"); -sys_var_bool_ptr sys_concurrent_insert("concurrent_insert", - &myisam_concurrent_insert); +sys_var_long_ptr sys_concurrent_insert("concurrent_insert", + &myisam_concurrent_insert); sys_var_long_ptr sys_connect_timeout("connect_timeout", &connect_timeout); +sys_var_const_str sys_datadir("datadir", mysql_real_data_home); sys_var_enum sys_delay_key_write("delay_key_write", &delay_key_write_options, &delay_key_write_typelib, @@ -190,6 +201,15 @@ sys_var_key_cache_long sys_key_cache_age_threshold("key_cache_age_threshold", param_age_threshold)); sys_var_bool_ptr sys_local_infile("local_infile", &opt_local_infile); +sys_var_trust_routine_creators +sys_trust_routine_creators("log_bin_trust_routine_creators", + &trust_function_creators); +sys_var_bool_ptr +sys_trust_function_creators("log_bin_trust_function_creators", + &trust_function_creators); +sys_var_bool_ptr + sys_log_queries_not_using_indexes("log_queries_not_using_indexes", + &opt_log_queries_not_using_indexes); sys_var_thd_ulong sys_log_warnings("log_warnings", &SV::log_warnings); sys_var_thd_ulong sys_long_query_time("long_query_time", &SV::long_query_time); @@ -223,7 +243,7 @@ sys_var_thd_ulong sys_max_delayed_threads("max_delayed_threads", fix_max_connections); sys_var_thd_ulong sys_max_error_count("max_error_count", &SV::max_error_count); -sys_var_thd_ulong sys_max_heap_table_size("max_heap_table_size", +sys_var_thd_ulonglong sys_max_heap_table_size("max_heap_table_size", &SV::max_heap_table_size); sys_var_thd_ulong sys_pseudo_thread_id("pseudo_thread_id", &SV::pseudo_thread_id, @@ -249,15 +269,17 @@ sys_var_long_ptr sys_max_relay_log_size("max_relay_log_size", fix_max_relay_log_size); sys_var_thd_ulong sys_max_sort_length("max_sort_length", &SV::max_sort_length); -sys_var_long_ptr sys_max_user_connections("max_user_connections", - &max_user_connections); +sys_var_thd_ulong sys_max_sp_recursion_depth("max_sp_recursion_depth", + &SV::max_sp_recursion_depth); +sys_var_max_user_conn sys_max_user_connections("max_user_connections"); sys_var_thd_ulong sys_max_tmp_tables("max_tmp_tables", &SV::max_tmp_tables); sys_var_long_ptr sys_max_write_lock_count("max_write_lock_count", &max_write_lock_count); +sys_var_thd_ulong sys_multi_range_count("multi_range_count", + &SV::multi_range_count); sys_var_long_ptr sys_myisam_data_pointer_size("myisam_data_pointer_size", &myisam_data_pointer_size); -sys_var_thd_ulonglong sys_myisam_max_extra_sort_file_size("myisam_max_extra_sort_file_size", &SV::myisam_max_extra_sort_file_size, fix_myisam_max_extra_sort_file_size, 1); sys_var_thd_ulonglong sys_myisam_max_sort_file_size("myisam_max_sort_file_size", &SV::myisam_max_sort_file_size, fix_myisam_max_sort_file_size, 1); sys_var_thd_ulong sys_myisam_repair_threads("myisam_repair_threads", &SV::myisam_repair_threads); sys_var_thd_ulong sys_myisam_sort_buffer_size("myisam_sort_buffer_size", &SV::myisam_sort_buff_size); @@ -280,6 +302,10 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count", 0, fix_net_retry_count); sys_var_thd_bool sys_new_mode("new", &SV::new_mode); sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords); +sys_var_thd_ulong sys_optimizer_prune_level("optimizer_prune_level", + &SV::optimizer_prune_level); +sys_var_thd_ulong sys_optimizer_search_depth("optimizer_search_depth", + &SV::optimizer_search_depth); sys_var_thd_ulong sys_preload_buff_size("preload_buffer_size", &SV::preload_buff_size); sys_var_thd_ulong sys_read_buff_size("read_buffer_size", @@ -287,6 +313,8 @@ sys_var_thd_ulong sys_read_buff_size("read_buffer_size", sys_var_bool_ptr sys_readonly("read_only", &opt_readonly); sys_var_thd_ulong sys_read_rnd_buff_size("read_rnd_buffer_size", &SV::read_rnd_buff_size); +sys_var_thd_ulong sys_div_precincrement("div_precision_increment", + &SV::div_precincrement); #ifdef HAVE_REPLICATION sys_var_bool_ptr sys_relay_log_purge("relay_log_purge", &relay_log_purge); @@ -305,6 +333,7 @@ sys_var_thd_ulong sys_query_alloc_block_size("query_alloc_block_size", sys_var_thd_ulong sys_query_prealloc_size("query_prealloc_size", &SV::query_prealloc_size, 0, fix_thd_mem_root); +sys_var_readonly sys_tmpdir("tmpdir", OPT_GLOBAL, SHOW_CHAR, get_tmpdir); sys_var_thd_ulong sys_trans_alloc_block_size("transaction_alloc_block_size", &SV::trans_alloc_block_size, 0, fix_trans_mem_root); @@ -326,6 +355,8 @@ sys_query_cache_wlock_invalidate("query_cache_wlock_invalidate", &SV::query_cache_wlock_invalidate); #endif /* HAVE_QUERY_CACHE */ sys_var_bool_ptr sys_secure_auth("secure_auth", &opt_secure_auth); +sys_var_const_str_ptr sys_secure_file_priv("secure_file_priv", + &opt_secure_file_priv); sys_var_long_ptr sys_server_id("server_id", &server_id, fix_server_id); sys_var_bool_ptr sys_slave_compressed_protocol("slave_compressed_protocol", &opt_slave_compressed_protocol); @@ -341,49 +372,99 @@ sys_var_thd_ulong sys_sort_buffer("sort_buffer_size", &SV::sortbuff_size); sys_var_thd_sql_mode sys_sql_mode("sql_mode", &SV::sql_mode); +#ifdef HAVE_OPENSSL +extern char *opt_ssl_ca, *opt_ssl_capath, *opt_ssl_cert, *opt_ssl_cipher, + *opt_ssl_key; +sys_var_const_str_ptr sys_ssl_ca("ssl_ca", &opt_ssl_ca); +sys_var_const_str_ptr sys_ssl_capath("ssl_capath", &opt_ssl_capath); +sys_var_const_str_ptr sys_ssl_cert("ssl_cert", &opt_ssl_cert); +sys_var_const_str_ptr sys_ssl_cipher("ssl_cipher", &opt_ssl_cipher); +sys_var_const_str_ptr sys_ssl_key("ssl_key", &opt_ssl_key); +#else +sys_var_const_str sys_ssl_ca("ssl_ca", NULL); +sys_var_const_str sys_ssl_capath("ssl_capath", NULL); +sys_var_const_str sys_ssl_cert("ssl_cert", NULL); +sys_var_const_str sys_ssl_cipher("ssl_cipher", NULL); +sys_var_const_str sys_ssl_key("ssl_key", NULL); +#endif +sys_var_thd_enum +sys_updatable_views_with_limit("updatable_views_with_limit", + &SV::updatable_views_with_limit, + &updatable_views_with_limit_typelib); + sys_var_thd_table_type sys_table_type("table_type", &SV::table_type); sys_var_thd_storage_engine sys_storage_engine("storage_engine", &SV::table_type); #ifdef HAVE_REPLICATION sys_var_sync_binlog_period sys_sync_binlog_period("sync_binlog", &sync_binlog_period); -sys_var_thd_ulong sys_sync_replication("sync_replication", - &SV::sync_replication); -sys_var_thd_ulong sys_sync_replication_slave_id( - "sync_replication_slave_id", - &SV::sync_replication_slave_id); -sys_var_thd_ulong sys_sync_replication_timeout( - "sync_replication_timeout", - &SV::sync_replication_timeout); #endif sys_var_bool_ptr sys_sync_frm("sync_frm", &opt_sync_frm); +sys_var_const_str sys_system_time_zone("system_time_zone", + system_time_zone); sys_var_long_ptr sys_table_cache_size("table_cache", &table_cache_size); +sys_var_long_ptr sys_table_lock_wait_timeout("table_lock_wait_timeout", + &table_lock_wait_timeout); sys_var_long_ptr sys_thread_cache_size("thread_cache_size", &thread_cache_size); sys_var_thd_enum sys_tx_isolation("tx_isolation", &SV::tx_isolation, &tx_isolation_typelib, fix_tx_isolation); -sys_var_thd_ulong sys_tmp_table_size("tmp_table_size", +sys_var_thd_ulonglong sys_tmp_table_size("tmp_table_size", &SV::tmp_table_size); +sys_var_bool_ptr sys_timed_mutexes("timed_mutexes", + &timed_mutexes); +sys_var_const_str sys_version("version", server_version); +#ifdef HAVE_BERKELEY_DB +sys_var_const_str sys_version_bdb("version_bdb", DB_VERSION_STRING); +#endif +sys_var_const_str sys_version_comment("version_comment", + MYSQL_COMPILATION_COMMENT); +sys_var_const_str sys_version_compile_machine("version_compile_machine", + MACHINE_TYPE); +sys_var_const_str sys_version_compile_os("version_compile_os", + SYSTEM_TYPE); sys_var_thd_ulong sys_net_wait_timeout("wait_timeout", &SV::net_wait_timeout); #ifdef HAVE_INNOBASE_DB +sys_var_long_ptr sys_innodb_fast_shutdown("innodb_fast_shutdown", + &innobase_fast_shutdown); sys_var_long_ptr sys_innodb_max_dirty_pages_pct("innodb_max_dirty_pages_pct", &srv_max_buf_pool_modified_pct); sys_var_long_ptr sys_innodb_max_purge_lag("innodb_max_purge_lag", &srv_max_purge_lag); sys_var_thd_bool sys_innodb_table_locks("innodb_table_locks", &SV::innodb_table_locks); +sys_var_thd_bool sys_innodb_support_xa("innodb_support_xa", + &SV::innodb_support_xa); sys_var_long_ptr sys_innodb_autoextend_increment("innodb_autoextend_increment", &srv_auto_extend_increment); +sys_var_long_ptr sys_innodb_sync_spin_loops("innodb_sync_spin_loops", + &srv_n_spin_wait_rounds); +sys_var_long_ptr sys_innodb_concurrency_tickets("innodb_concurrency_tickets", + &srv_n_free_tickets_to_enter); +sys_var_long_ptr sys_innodb_thread_sleep_delay("innodb_thread_sleep_delay", + &srv_thread_sleep_delay); +sys_var_long_ptr sys_innodb_thread_concurrency("innodb_thread_concurrency", + &srv_thread_concurrency); +sys_var_long_ptr sys_innodb_commit_concurrency("innodb_commit_concurrency", + &srv_commit_concurrency); +sys_var_long_ptr sys_innodb_flush_log_at_trx_commit( + "innodb_flush_log_at_trx_commit", + &srv_flush_log_at_trx_commit); #endif +/* Condition pushdown to storage engine */ +sys_var_thd_bool +sys_engine_condition_pushdown("engine_condition_pushdown", + &SV::engine_condition_pushdown); + #ifdef HAVE_NDBCLUSTER_DB /* ndb thread specific variable settings */ -sys_var_thd_ulong +sys_var_thd_ulong sys_ndb_autoincrement_prefetch_sz("ndb_autoincrement_prefetch_sz", &SV::ndb_autoincrement_prefetch_sz); sys_var_thd_bool @@ -392,6 +473,8 @@ sys_var_thd_bool sys_ndb_use_exact_count("ndb_use_exact_count", &SV::ndb_use_exact_count); sys_var_thd_bool sys_ndb_use_transactions("ndb_use_transactions", &SV::ndb_use_transactions); +sys_var_long_ptr +sys_ndb_cache_check_time("ndb_cache_check_time", &ndb_cache_check_time); #endif /* Time/date/datetime formats */ @@ -408,10 +491,10 @@ sys_var_thd_date_time_format sys_datetime_format("datetime_format", /* Variables that are bits in THD */ -static sys_var_thd_bit sys_autocommit("autocommit", 0, - set_option_autocommit, - OPTION_NOT_AUTOCOMMIT, - 1); +sys_var_thd_bit sys_autocommit("autocommit", 0, + set_option_autocommit, + OPTION_NOT_AUTOCOMMIT, + 1); static sys_var_thd_bit sys_big_tables("big_tables", 0, set_option_bit, OPTION_BIG_TABLES); @@ -433,8 +516,8 @@ static sys_var_thd_bit sys_log_update("sql_log_update", OPTION_UPDATE_LOG); static sys_var_thd_bit sys_log_binlog("sql_log_bin", check_log_update, - set_log_update, - OPTION_BIN_LOG); + set_log_bin, + OPTION_BIN_LOG); static sys_var_thd_bit sys_sql_warnings("sql_warnings", 0, set_option_bit, OPTION_WARNINGS); @@ -499,12 +582,15 @@ sys_var_thd_time_zone sys_time_zone("time_zone"); /* Read only variables */ -sys_var_const_str sys_os("version_compile_os", SYSTEM_TYPE); sys_var_readonly sys_have_innodb("have_innodb", OPT_GLOBAL, SHOW_CHAR, get_have_innodb); /* Global read-only variable describing server license */ sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE)); +/* Global read-only variable containing hostname */ +sys_var_const_str sys_hostname("hostname", glob_hostname); + + /* List of all variables for initialisation and storage in hash @@ -517,7 +603,11 @@ sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE)); sys_var *sys_variables[]= { &sys_auto_is_null, + &sys_auto_increment_increment, + &sys_auto_increment_offset, &sys_autocommit, + &sys_automatic_sp_privileges, + &sys_basedir, &sys_big_tables, &sys_big_selects, &sys_binlog_cache_size, @@ -528,13 +618,18 @@ sys_var *sys_variables[]= &sys_character_set_client, &sys_character_set_connection, &sys_character_set_results, + &sys_character_set_filesystem, + &sys_charset_system, &sys_collation_connection, &sys_collation_database, &sys_collation_server, + &sys_completion_type, &sys_concurrent_insert, &sys_connect_timeout, + &sys_datadir, &sys_date_format, &sys_datetime_format, + &sys_div_precincrement, &sys_default_week_format, &sys_delay_key_write, &sys_delayed_insert_limit, @@ -548,6 +643,7 @@ sys_var *sys_variables[]= &sys_foreign_key_checks, &sys_group_concat_max_len, &sys_have_innodb, + &sys_hostname, &sys_identity, &sys_init_connect, &sys_init_slave, @@ -564,6 +660,7 @@ sys_var *sys_variables[]= &sys_local_infile, &sys_log_binlog, &sys_log_off, + &sys_log_queries_not_using_indexes, &sys_log_update, &sys_log_warnings, &sys_long_query_time, @@ -583,11 +680,12 @@ sys_var *sys_variables[]= &sys_max_relay_log_size, &sys_max_seeks_for_key, &sys_max_sort_length, + &sys_max_sp_recursion_depth, &sys_max_tmp_tables, &sys_max_user_connections, &sys_max_write_lock_count, + &sys_multi_range_count, &sys_myisam_data_pointer_size, - &sys_myisam_max_extra_sort_file_size, &sys_myisam_max_sort_file_size, &sys_myisam_repair_threads, &sys_myisam_sort_buffer_size, @@ -599,6 +697,8 @@ sys_var *sys_variables[]= &sys_net_write_timeout, &sys_new_mode, &sys_old_passwords, + &sys_optimizer_prune_level, + &sys_optimizer_search_depth, &sys_preload_buff_size, &sys_pseudo_thread_id, &sys_query_alloc_block_size, @@ -623,6 +723,7 @@ sys_var *sys_variables[]= &sys_rpl_recovery_rank, &sys_safe_updates, &sys_secure_auth, + &sys_secure_file_priv, &sys_select_limit, &sys_server_id, #ifdef HAVE_REPLICATION @@ -639,50 +740,78 @@ sys_var *sys_variables[]= &sys_sql_mode, &sys_sql_warnings, &sys_sql_notes, + &sys_ssl_ca, + &sys_ssl_capath, + &sys_ssl_cert, + &sys_ssl_cipher, + &sys_ssl_key, &sys_storage_engine, #ifdef HAVE_REPLICATION &sys_sync_binlog_period, - &sys_sync_replication, - &sys_sync_replication_slave_id, - &sys_sync_replication_timeout, #endif &sys_sync_frm, + &sys_system_time_zone, &sys_table_cache_size, + &sys_table_lock_wait_timeout, &sys_table_type, &sys_thread_cache_size, &sys_time_format, + &sys_timed_mutexes, &sys_timestamp, &sys_time_zone, + &sys_tmpdir, &sys_tmp_table_size, &sys_trans_alloc_block_size, &sys_trans_prealloc_size, &sys_tx_isolation, - &sys_os, + &sys_version, +#ifdef HAVE_BERKELEY_DB + &sys_version_bdb, +#endif + &sys_version_comment, + &sys_version_compile_machine, + &sys_version_compile_os, #ifdef HAVE_INNOBASE_DB + &sys_innodb_fast_shutdown, &sys_innodb_max_dirty_pages_pct, &sys_innodb_max_purge_lag, &sys_innodb_table_locks, + &sys_innodb_support_xa, &sys_innodb_max_purge_lag, &sys_innodb_autoextend_increment, -#endif + &sys_innodb_sync_spin_loops, + &sys_innodb_concurrency_tickets, + &sys_innodb_thread_sleep_delay, + &sys_innodb_thread_concurrency, + &sys_innodb_commit_concurrency, + &sys_innodb_flush_log_at_trx_commit, +#endif + &sys_trust_routine_creators, + &sys_trust_function_creators, + &sys_engine_condition_pushdown, #ifdef HAVE_NDBCLUSTER_DB &sys_ndb_autoincrement_prefetch_sz, + &sys_ndb_cache_check_time, &sys_ndb_force_send, &sys_ndb_use_exact_count, &sys_ndb_use_transactions, #endif &sys_unique_checks, + &sys_updatable_views_with_limit, &sys_warning_count }; /* - Variables shown by SHOW variables in alphabetical order + Variables shown by SHOW VARIABLES in alphabetical order */ struct show_var_st init_vars[]= { + {"auto_increment_increment", (char*) &sys_auto_increment_increment, SHOW_SYS}, + {"auto_increment_offset", (char*) &sys_auto_increment_offset, SHOW_SYS}, + {sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS}, {"back_log", (char*) &back_log, SHOW_LONG}, - {"basedir", mysql_home, SHOW_CHAR}, + {sys_basedir.name, (char*) &sys_basedir, SHOW_SYS}, #ifdef HAVE_BERKELEY_DB {"bdb_cache_size", (char*) &berkeley_cache_size, SHOW_LONG}, {"bdb_home", (char*) &berkeley_home, SHOW_CHAR_PTR}, @@ -697,6 +826,7 @@ struct show_var_st init_vars[]= { {sys_character_set_client.name,(char*) &sys_character_set_client, SHOW_SYS}, {sys_character_set_connection.name,(char*) &sys_character_set_connection,SHOW_SYS}, {sys_character_set_database.name, (char*) &sys_character_set_database,SHOW_SYS}, + {sys_character_set_filesystem.name,(char*) &sys_character_set_filesystem, SHOW_SYS}, {sys_character_set_results.name,(char*) &sys_character_set_results, SHOW_SYS}, {sys_character_set_server.name, (char*) &sys_character_set_server,SHOW_SYS}, {sys_charset_system.name, (char*) &sys_charset_system, SHOW_SYS}, @@ -704,9 +834,10 @@ struct show_var_st init_vars[]= { {sys_collation_connection.name,(char*) &sys_collation_connection, SHOW_SYS}, {sys_collation_database.name,(char*) &sys_collation_database, SHOW_SYS}, {sys_collation_server.name,(char*) &sys_collation_server, SHOW_SYS}, + {sys_completion_type.name, (char*) &sys_completion_type, SHOW_SYS}, {sys_concurrent_insert.name,(char*) &sys_concurrent_insert, SHOW_SYS}, {sys_connect_timeout.name, (char*) &sys_connect_timeout, SHOW_SYS}, - {"datadir", mysql_real_data_home, SHOW_CHAR}, + {sys_datadir.name, (char*) &sys_datadir, SHOW_SYS}, {sys_date_format.name, (char*) &sys_date_format, SHOW_SYS}, {sys_datetime_format.name, (char*) &sys_datetime_format, SHOW_SYS}, {sys_default_week_format.name, (char*) &sys_default_week_format, SHOW_SYS}, @@ -714,6 +845,9 @@ struct show_var_st init_vars[]= { {sys_delayed_insert_limit.name, (char*) &sys_delayed_insert_limit,SHOW_SYS}, {sys_delayed_insert_timeout.name, (char*) &sys_delayed_insert_timeout, SHOW_SYS}, {sys_delayed_queue_size.name,(char*) &sys_delayed_queue_size, SHOW_SYS}, + {sys_div_precincrement.name,(char*) &sys_div_precincrement,SHOW_SYS}, + {sys_engine_condition_pushdown.name, + (char*) &sys_engine_condition_pushdown, SHOW_SYS}, {sys_expire_logs_days.name, (char*) &sys_expire_logs_days, SHOW_SYS}, {sys_flush.name, (char*) &sys_flush, SHOW_SYS}, {sys_flush_time.name, (char*) &sys_flush_time, SHOW_SYS}, @@ -729,17 +863,22 @@ struct show_var_st init_vars[]= { {"have_compress", (char*) &have_compress, SHOW_HAVE}, {"have_crypt", (char*) &have_crypt, SHOW_HAVE}, {"have_csv", (char*) &have_csv_db, SHOW_HAVE}, - {"have_example_engine", (char*) &have_example_db, SHOW_HAVE}, + {"have_dynamic_loading", (char*) &have_dlopen, SHOW_HAVE}, + {"have_example_engine", (char*) &have_example_db, SHOW_HAVE}, + {"have_federated_engine", (char*) &have_federated_db, SHOW_HAVE}, {"have_geometry", (char*) &have_geometry, SHOW_HAVE}, {"have_innodb", (char*) &have_innodb, SHOW_HAVE}, {"have_isam", (char*) &have_isam, SHOW_HAVE}, {"have_merge_engine", (char*) &have_merge_db, SHOW_HAVE}, {"have_ndbcluster", (char*) &have_ndbcluster, SHOW_HAVE}, - {"have_openssl", (char*) &have_openssl, SHOW_HAVE}, + /* have_openssl is just and alias for have_ssl */ + {"have_openssl", (char*) &have_ssl, SHOW_HAVE}, + {"have_ssl", (char*) &have_ssl, SHOW_HAVE}, {"have_query_cache", (char*) &have_query_cache, SHOW_HAVE}, {"have_raid", (char*) &have_raid, SHOW_HAVE}, {"have_rtree_keys", (char*) &have_rtree_keys, SHOW_HAVE}, {"have_symlink", (char*) &have_symlink, SHOW_HAVE}, + {sys_hostname.name, (char*) &sys_hostname, SHOW_SYS}, {"init_connect", (char*) &sys_init_connect, SHOW_SYS}, {"init_file", (char*) &opt_init_file, SHOW_CHAR_PTR}, {"init_slave", (char*) &sys_init_slave, SHOW_SYS}, @@ -747,13 +886,17 @@ struct show_var_st init_vars[]= { {"innodb_additional_mem_pool_size", (char*) &innobase_additional_mem_pool_size, SHOW_LONG }, {sys_innodb_autoextend_increment.name, (char*) &sys_innodb_autoextend_increment, SHOW_SYS}, {"innodb_buffer_pool_awe_mem_mb", (char*) &innobase_buffer_pool_awe_mem_mb, SHOW_LONG }, - {"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONG }, + {"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONGLONG }, + {"innodb_checksums", (char*) &innobase_use_checksums, SHOW_MY_BOOL}, + {sys_innodb_commit_concurrency.name, (char*) &sys_innodb_commit_concurrency, SHOW_SYS}, + {sys_innodb_concurrency_tickets.name, (char*) &sys_innodb_concurrency_tickets, SHOW_SYS}, {"innodb_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR}, {"innodb_data_home_dir", (char*) &innobase_data_home_dir, SHOW_CHAR_PTR}, - {"innodb_fast_shutdown", (char*) &innobase_fast_shutdown, SHOW_MY_BOOL}, + {"innodb_doublewrite", (char*) &innobase_use_doublewrite, SHOW_MY_BOOL}, + {sys_innodb_fast_shutdown.name,(char*) &sys_innodb_fast_shutdown, SHOW_SYS}, {"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG }, {"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL}, - {"innodb_flush_log_at_trx_commit", (char*) &innobase_flush_log_at_trx_commit, SHOW_INT}, + {sys_innodb_flush_log_at_trx_commit.name, (char*) &sys_innodb_flush_log_at_trx_commit, SHOW_SYS}, {"innodb_flush_method", (char*) &innobase_unix_file_flush_method, SHOW_CHAR_PTR}, {"innodb_force_recovery", (char*) &innobase_force_recovery, SHOW_LONG }, {"innodb_lock_wait_timeout", (char*) &innobase_lock_wait_timeout, SHOW_LONG }, @@ -761,15 +904,19 @@ struct show_var_st init_vars[]= { {"innodb_log_arch_dir", (char*) &innobase_log_arch_dir, SHOW_CHAR_PTR}, {"innodb_log_archive", (char*) &innobase_log_archive, SHOW_MY_BOOL}, {"innodb_log_buffer_size", (char*) &innobase_log_buffer_size, SHOW_LONG }, - {"innodb_log_file_size", (char*) &innobase_log_file_size, SHOW_LONG}, + {"innodb_log_file_size", (char*) &innobase_log_file_size, SHOW_LONGLONG}, {"innodb_log_files_in_group", (char*) &innobase_log_files_in_group, SHOW_LONG}, {"innodb_log_group_home_dir", (char*) &innobase_log_group_home_dir, SHOW_CHAR_PTR}, {sys_innodb_max_dirty_pages_pct.name, (char*) &sys_innodb_max_dirty_pages_pct, SHOW_SYS}, {sys_innodb_max_purge_lag.name, (char*) &sys_innodb_max_purge_lag, SHOW_SYS}, {"innodb_mirrored_log_groups", (char*) &innobase_mirrored_log_groups, SHOW_LONG}, {"innodb_open_files", (char*) &innobase_open_files, SHOW_LONG }, + {"innodb_rollback_on_timeout", (char*) &innobase_rollback_on_timeout, SHOW_MY_BOOL}, + {sys_innodb_support_xa.name, (char*) &sys_innodb_support_xa, SHOW_SYS}, + {sys_innodb_sync_spin_loops.name, (char*) &sys_innodb_sync_spin_loops, SHOW_SYS}, {sys_innodb_table_locks.name, (char*) &sys_innodb_table_locks, SHOW_SYS}, - {"innodb_thread_concurrency", (char*) &innobase_thread_concurrency, SHOW_LONG }, + {sys_innodb_thread_concurrency.name, (char*) &sys_innodb_thread_concurrency, SHOW_SYS}, + {sys_innodb_thread_sleep_delay.name, (char*) &sys_innodb_thread_sleep_delay, SHOW_SYS}, #endif {sys_interactive_timeout.name,(char*) &sys_interactive_timeout, SHOW_SYS}, {sys_join_buffer_size.name, (char*) &sys_join_buffer_size, SHOW_SYS}, @@ -782,6 +929,8 @@ struct show_var_st init_vars[]= { SHOW_SYS}, {"language", language, SHOW_CHAR}, {"large_files_support", (char*) &opt_large_files, SHOW_BOOL}, + {"large_page_size", (char*) &opt_large_page_size, SHOW_INT}, + {"large_pages", (char*) &opt_large_pages, SHOW_MY_BOOL}, {sys_lc_time_names.name, (char*) &sys_lc_time_names, SHOW_SYS}, {sys_license.name, (char*) &sys_license, SHOW_SYS}, {sys_local_infile.name, (char*) &sys_local_infile, SHOW_SYS}, @@ -790,12 +939,14 @@ struct show_var_st init_vars[]= { #endif {"log", (char*) &opt_log, SHOW_BOOL}, {"log_bin", (char*) &opt_bin_log, SHOW_BOOL}, + {sys_trust_function_creators.name,(char*) &sys_trust_function_creators, SHOW_SYS}, {"log_error", (char*) log_error_file, SHOW_CHAR}, + {sys_log_queries_not_using_indexes.name, + (char*) &sys_log_queries_not_using_indexes, SHOW_SYS}, #ifdef HAVE_REPLICATION {"log_slave_updates", (char*) &opt_log_slave_updates, SHOW_MY_BOOL}, #endif {"log_slow_queries", (char*) &opt_slow_log, SHOW_BOOL}, - {"log_update", (char*) &opt_update_log, SHOW_BOOL}, {sys_log_warnings.name, (char*) &sys_log_warnings, SHOW_SYS}, {sys_long_query_time.name, (char*) &sys_long_query_time, SHOW_SYS}, {sys_low_priority_updates.name, (char*) &sys_low_priority_updates, SHOW_SYS}, @@ -819,22 +970,22 @@ struct show_var_st init_vars[]= { {sys_max_relay_log_size.name, (char*) &sys_max_relay_log_size, SHOW_SYS}, {sys_max_seeks_for_key.name, (char*) &sys_max_seeks_for_key, SHOW_SYS}, {sys_max_sort_length.name, (char*) &sys_max_sort_length, SHOW_SYS}, + {sys_max_sp_recursion_depth.name, + (char*) &sys_max_sp_recursion_depth, SHOW_SYS}, {sys_max_tmp_tables.name, (char*) &sys_max_tmp_tables, SHOW_SYS}, {sys_max_user_connections.name,(char*) &sys_max_user_connections, SHOW_SYS}, {sys_max_write_lock_count.name, (char*) &sys_max_write_lock_count,SHOW_SYS}, + {sys_multi_range_count.name, (char*) &sys_multi_range_count, SHOW_SYS}, {sys_myisam_data_pointer_size.name, (char*) &sys_myisam_data_pointer_size, SHOW_SYS}, - {sys_myisam_max_extra_sort_file_size.name, - (char*) &sys_myisam_max_extra_sort_file_size, - SHOW_SYS}, {sys_myisam_max_sort_file_size.name, (char*) &sys_myisam_max_sort_file_size, SHOW_SYS}, {"myisam_recover_options", (char*) &myisam_recover_options_str, SHOW_CHAR_PTR}, {sys_myisam_repair_threads.name, (char*) &sys_myisam_repair_threads, SHOW_SYS}, {sys_myisam_sort_buffer_size.name, (char*) &sys_myisam_sort_buffer_size, SHOW_SYS}, - + {sys_myisam_stats_method.name, (char*) &sys_myisam_stats_method, SHOW_SYS}, - + #ifdef __NT__ {"named_pipe", (char*) &opt_enable_named_pipe, SHOW_MY_BOOL}, #endif @@ -844,6 +995,7 @@ struct show_var_st init_vars[]= { {sys_ndb_force_send.name, (char*) &sys_ndb_force_send, SHOW_SYS}, {sys_ndb_use_exact_count.name,(char*) &sys_ndb_use_exact_count, SHOW_SYS}, {sys_ndb_use_transactions.name,(char*) &sys_ndb_use_transactions, SHOW_SYS}, + {sys_ndb_cache_check_time.name,(char*) &sys_ndb_cache_check_time, SHOW_SYS}, #endif {sys_net_buffer_length.name,(char*) &sys_net_buffer_length, SHOW_SYS}, {sys_net_read_timeout.name, (char*) &sys_net_read_timeout, SHOW_SYS}, @@ -852,6 +1004,10 @@ struct show_var_st init_vars[]= { {sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS}, {sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS}, {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, + {sys_optimizer_prune_level.name, (char*) &sys_optimizer_prune_level, + SHOW_SYS}, + {sys_optimizer_search_depth.name,(char*) &sys_optimizer_search_depth, + SHOW_SYS}, {"pid_file", (char*) pidfile_name, SHOW_CHAR}, {"port", (char*) &mysqld_port, SHOW_INT}, {sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS}, @@ -879,6 +1035,7 @@ struct show_var_st init_vars[]= { #endif {sys_rpl_recovery_rank.name,(char*) &sys_rpl_recovery_rank, SHOW_SYS}, {"secure_auth", (char*) &sys_secure_auth, SHOW_SYS}, + {"secure_file_priv", (char*) &sys_secure_file_priv, SHOW_SYS}, #ifdef HAVE_SMEM {"shared_memory", (char*) &opt_enable_shared_memory, SHOW_MY_BOOL}, {"shared_memory_base_name", (char*) &shared_memory_base_name, SHOW_CHAR_PTR}, @@ -888,31 +1045,37 @@ struct show_var_st init_vars[]= { {"skip_networking", (char*) &opt_disable_networking, SHOW_BOOL}, {"skip_show_database", (char*) &opt_skip_show_db, SHOW_BOOL}, #ifdef HAVE_REPLICATION + {sys_slave_compressed_protocol.name, + (char*) &sys_slave_compressed_protocol, SHOW_SYS}, + {"slave_load_tmpdir", (char*) &slave_load_tmpdir, SHOW_CHAR_PTR}, {sys_slave_net_timeout.name,(char*) &sys_slave_net_timeout, SHOW_SYS}, + {"slave_skip_errors", (char*) &slave_error_mask, SHOW_SLAVE_SKIP_ERRORS}, {sys_slave_trans_retries.name,(char*) &sys_slave_trans_retries, SHOW_SYS}, #endif {sys_slow_launch_time.name, (char*) &sys_slow_launch_time, SHOW_SYS}, #ifdef HAVE_SYS_UN_H {"socket", (char*) &mysqld_unix_port, SHOW_CHAR_PTR}, #endif - {sys_sort_buffer.name, (char*) &sys_sort_buffer, SHOW_SYS}, + {sys_sort_buffer.name, (char*) &sys_sort_buffer, SHOW_SYS}, + {sys_big_selects.name, (char*) &sys_big_selects, SHOW_SYS}, {sys_sql_mode.name, (char*) &sys_sql_mode, SHOW_SYS}, - {"sql_notes", (char*) &sys_sql_notes, SHOW_BOOL}, - {"sql_warnings", (char*) &sys_sql_warnings, SHOW_BOOL}, + {"sql_notes", (char*) &sys_sql_notes, SHOW_SYS}, + {"sql_warnings", (char*) &sys_sql_warnings, SHOW_SYS}, + {sys_ssl_ca.name, (char*) &sys_ssl_ca, SHOW_SYS}, + {sys_ssl_capath.name, (char*) &sys_ssl_capath, SHOW_SYS}, + {sys_ssl_cert.name, (char*) &sys_ssl_cert, SHOW_SYS}, + {sys_ssl_cipher.name, (char*) &sys_ssl_cipher, SHOW_SYS}, + {sys_ssl_key.name, (char*) &sys_ssl_key, SHOW_SYS}, {sys_storage_engine.name, (char*) &sys_storage_engine, SHOW_SYS}, #ifdef HAVE_REPLICATION {sys_sync_binlog_period.name,(char*) &sys_sync_binlog_period, SHOW_SYS}, #endif {sys_sync_frm.name, (char*) &sys_sync_frm, SHOW_SYS}, -#ifdef HAVE_REPLICATION - {sys_sync_replication.name, (char*) &sys_sync_replication, SHOW_SYS}, - {sys_sync_replication_slave_id.name, (char*) &sys_sync_replication_slave_id,SHOW_SYS}, - {sys_sync_replication_timeout.name, (char*) &sys_sync_replication_timeout,SHOW_SYS}, -#endif #ifdef HAVE_TZNAME {"system_time_zone", system_time_zone, SHOW_CHAR}, #endif {"table_cache", (char*) &table_cache_size, SHOW_LONG}, + {"table_lock_wait_timeout", (char*) &table_lock_wait_timeout, SHOW_LONG }, {sys_table_type.name, (char*) &sys_table_type, SHOW_SYS}, {sys_thread_cache_size.name,(char*) &sys_thread_cache_size, SHOW_SYS}, #ifdef HAVE_THR_SETCONCURRENCY @@ -921,19 +1084,23 @@ struct show_var_st init_vars[]= { {"thread_stack", (char*) &thread_stack, SHOW_LONG}, {sys_time_format.name, (char*) &sys_time_format, SHOW_SYS}, {"time_zone", (char*) &sys_time_zone, SHOW_SYS}, + {sys_timed_mutexes.name, (char*) &sys_timed_mutexes, SHOW_SYS}, {sys_tmp_table_size.name, (char*) &sys_tmp_table_size, SHOW_SYS}, - {"tmpdir", (char*) &opt_mysql_tmpdir, SHOW_CHAR_PTR}, + {sys_tmpdir.name, (char*) &sys_tmpdir, SHOW_SYS}, {sys_trans_alloc_block_size.name, (char*) &sys_trans_alloc_block_size, SHOW_SYS}, {sys_trans_prealloc_size.name, (char*) &sys_trans_prealloc_size, SHOW_SYS}, {sys_tx_isolation.name, (char*) &sys_tx_isolation, SHOW_SYS}, - {"version", server_version, SHOW_CHAR}, + {sys_updatable_views_with_limit.name, + (char*) &sys_updatable_views_with_limit,SHOW_SYS}, + {sys_version.name, (char*) &sys_version, SHOW_SYS}, #ifdef HAVE_BERKELEY_DB - {"version_bdb", (char*) DB_VERSION_STRING, SHOW_CHAR}, + {sys_version_bdb.name, (char*) &sys_version_bdb, SHOW_SYS}, #endif - {"version_comment", (char*) MYSQL_COMPILATION_COMMENT, SHOW_CHAR}, - {"version_compile_machine", (char*) MACHINE_TYPE, SHOW_CHAR}, - {sys_os.name, (char*) &sys_os, SHOW_SYS}, + {sys_version_comment.name, (char*) &sys_version_comment, SHOW_SYS}, + {sys_version_compile_machine.name, (char*) &sys_version_compile_machine, + SHOW_SYS}, + {sys_version_compile_os.name, (char*) &sys_version_compile_os, SHOW_SYS}, {sys_net_wait_timeout.name, (char*) &sys_net_wait_timeout, SHOW_SYS}, {NullS, NullS, SHOW_LONG} }; @@ -952,8 +1119,8 @@ bool sys_var_str::check(THD *thd, set_var *var) return 0; if ((res=(*check_func)(thd, var)) < 0) - my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, - var->value->str_value.ptr()); + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), + name, var->value->str_value.ptr()); return res; } @@ -977,7 +1144,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex, uint new_length= (var ? var->value->str_value.length() : 0); if (!old_value) old_value= (char*) ""; - if (!(res= my_strdup_with_length((byte*)old_value, new_length, MYF(0)))) + if (!(res= my_strdup_with_length(old_value, new_length, MYF(0)))) return 1; /* Replace the old value in such a way that the any thread using @@ -1018,9 +1185,10 @@ static void sys_default_init_slave(THD* thd, enum_var_type type) static int sys_check_ftb_syntax(THD *thd, set_var *var) { - if (thd->master_access & SUPER_ACL) - return ft_boolean_check_syntax_string((byte*) var->value->str_value.c_ptr()) ? - -1 : 0; + if (thd->security_ctx->master_access & SUPER_ACL) + return (ft_boolean_check_syntax_string((byte*) + var->value->str_value.c_ptr()) ? + -1 : 0); else { my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); @@ -1041,27 +1209,6 @@ static void sys_default_ftb_syntax(THD *thd, enum_var_type type) sizeof(ft_boolean_syntax)-1); } -/* - The following 3 functions need to be changed in 4.1 when we allow - one to change character sets -*/ - -static int sys_check_charset(THD *thd, set_var *var) -{ - return 0; -} - - -static bool sys_update_charset(THD *thd, set_var *var) -{ - return 0; -} - - -static void sys_set_default_charset(THD *thd, enum_var_type type) -{ -} - /* If one sets the LOW_PRIORIY UPDATES flag, we also must change the @@ -1077,14 +1224,6 @@ static void fix_low_priority_updates(THD *thd, enum_var_type type) static void -fix_myisam_max_extra_sort_file_size(THD *thd, enum_var_type type) -{ - myisam_max_extra_temp_length= - (my_off_t) global_system_variables.myisam_max_extra_sort_file_size; -} - - -static void fix_myisam_max_sort_file_size(THD *thd, enum_var_type type) { myisam_max_temp_length= @@ -1119,6 +1258,21 @@ static void fix_tx_isolation(THD *thd, enum_var_type type) thd->variables.tx_isolation); } +static void fix_completion_type(THD *thd __attribute__((unused)), + enum_var_type type __attribute__((unused))) {} + +static int check_completion_type(THD *thd, set_var *var) +{ + longlong val= var->value->val_int(); + if (val < 0 || val > 2) + { + char buf[64]; + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), var->var->name, llstr(val, buf)); + return 1; + } + return 0; +} + /* If we are changing the thread variable, we have to copy it to NET too @@ -1172,7 +1326,7 @@ static void fix_query_cache_size(THD *thd, enum_var_type type) #ifdef HAVE_QUERY_CACHE static void fix_query_cache_min_res_unit(THD *thd, enum_var_type type) { - query_cache_min_res_unit= + query_cache_min_res_unit= query_cache.set_min_res_unit(query_cache_min_res_unit); } #endif @@ -1233,11 +1387,10 @@ static int check_max_delayed_threads(THD *thd, set_var *var) return 0; } - static void fix_max_connections(THD *thd, enum_var_type type) { #ifndef EMBEDDED_LIBRARY - resize_thr_alarm(max_connections + + resize_thr_alarm(max_connections + global_system_variables.max_insert_delayed_threads + 10); #endif } @@ -1254,10 +1407,12 @@ static void fix_thd_mem_root(THD *thd, enum_var_type type) static void fix_trans_mem_root(THD *thd, enum_var_type type) { +#ifdef USING_TRANSACTIONS if (type != OPT_GLOBAL) reset_root_defaults(&thd->transaction.mem_root, thd->variables.trans_alloc_block_size, thd->variables.trans_prealloc_size); +#endif } @@ -1268,9 +1423,9 @@ static void fix_server_id(THD *thd, enum_var_type type) sys_var_long_ptr:: -sys_var_long_ptr(const char *name_arg, ulong *value_ptr, +sys_var_long_ptr(const char *name_arg, ulong *value_ptr_arg, sys_after_update_func after_update_arg) - :sys_var_long_ptr_global(name_arg, value_ptr, + :sys_var_long_ptr_global(name_arg, value_ptr_arg, &LOCK_global_system_variables, after_update_arg) {} @@ -1363,6 +1518,12 @@ bool sys_var_thd_ulong::update(THD *thd, set_var *var) if ((ulong) tmp > max_system_variables.*offset) tmp= max_system_variables.*offset; +#if SIZEOF_LONG == 4 + /* Avoid overflows on 32 bit systems */ + if (tmp > (ulonglong) ~(ulong) 0) + tmp= ((ulonglong) ~(ulong) 0); +#endif + if (option_limits) tmp= (ulong) getopt_ull_limit_value(tmp, option_limits); if (var->type == OPT_GLOBAL) @@ -1407,7 +1568,7 @@ bool sys_var_thd_ha_rows::update(THD *thd, set_var *var) if (var->type == OPT_GLOBAL) { /* Lock is needed to make things safe on 32 bit systems */ - pthread_mutex_lock(&LOCK_global_system_variables); + pthread_mutex_lock(&LOCK_global_system_variables); global_system_variables.*offset= (ha_rows) tmp; pthread_mutex_unlock(&LOCK_global_system_variables); } @@ -1513,7 +1674,7 @@ byte *sys_var_thd_bool::value_ptr(THD *thd, enum_var_type type, bool sys_var::check_enum(THD *thd, set_var *var, TYPELIB *enum_names) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; const char *value; String str(buff, sizeof(buff), system_charset_info), *res; @@ -1550,7 +1711,7 @@ err: bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) { bool not_used; - char buff[80], *error= 0; + char buff[STRING_BUFFER_USUAL_SIZE], *error= 0; uint error_len= 0; String str(buff, sizeof(buff), system_charset_info), *res; @@ -1576,7 +1737,12 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) else { ulonglong tmp= var->value->val_int(); - if (tmp >= enum_names->count) + /* + For when the enum is made to contain 64 elements, as 1ULL<<64 is + undefined, we guard with a "count<64" test. + */ + if (unlikely((tmp >= ((ULL(1)) << enum_names->count)) && + (enum_names->count < 64))) { llstr(tmp, buff); goto err; @@ -1609,14 +1775,22 @@ Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base) /* As there was no local variable, return the global value */ var_type= OPT_GLOBAL; } - switch (type()) { + switch (show_type()) { + case SHOW_INT: + { + uint value; + pthread_mutex_lock(&LOCK_global_system_variables); + value= *(uint*) value_ptr(thd, var_type, base); + pthread_mutex_unlock(&LOCK_global_system_variables); + return new Item_uint((ulonglong) value); + } case SHOW_LONG: { ulong value; pthread_mutex_lock(&LOCK_global_system_variables); value= *(ulong*) value_ptr(thd, var_type, base); pthread_mutex_unlock(&LOCK_global_system_variables); - return new Item_uint((int32) value); + return new Item_uint((ulonglong) value); } case SHOW_LONGLONG: { @@ -1755,7 +1929,7 @@ bool sys_var_thd_date_time_format::update(THD *thd, set_var *var) bool sys_var_thd_date_time_format::check(THD *thd, set_var *var) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String str(buff,sizeof(buff), system_charset_info), *res; DATE_TIME_FORMAT *format; @@ -1768,7 +1942,7 @@ bool sys_var_thd_date_time_format::check(THD *thd, set_var *var) my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, res->c_ptr()); return 1; } - + /* We must copy result to thread space to not get a memory leak if update is aborted @@ -1825,7 +1999,7 @@ typedef struct old_names_map_st const char *new_name; } my_old_conv; -static my_old_conv old_conv[]= +static my_old_conv old_conv[]= { { "cp1251_koi8" , "cp1251" }, { "cp1250_latin2" , "cp1250" }, @@ -1843,7 +2017,7 @@ static my_old_conv old_conv[]= CHARSET_INFO *get_old_charset_by_name(const char *name) { my_old_conv *conv; - + for (conv= old_conv; conv->old_name; conv++) { if (!my_strcasecmp(&my_charset_latin1, name, conv->old_name)) @@ -1859,7 +2033,7 @@ bool sys_var_collation::check(THD *thd, set_var *var) if (var->value->result_type() == STRING_RESULT) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String str(buff,sizeof(buff), system_charset_info), *res; if (!(res=var->value->val_str(&str))) { @@ -1893,7 +2067,7 @@ bool sys_var_character_set::check(THD *thd, set_var *var) if (var->value->result_type() == STRING_RESULT) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; String str(buff,sizeof(buff), system_charset_info), *res; if (!(res=var->value->val_str(&str))) { @@ -1989,6 +2163,32 @@ void sys_var_character_set_client::set_default(THD *thd, enum_var_type type) CHARSET_INFO ** +sys_var_character_set_filesystem::ci_ptr(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + return &global_system_variables.character_set_filesystem; + else + return &thd->variables.character_set_filesystem; +} + + +extern CHARSET_INFO *character_set_filesystem; + +void +sys_var_character_set_filesystem::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.character_set_filesystem= character_set_filesystem; + else + { + thd->variables.character_set_filesystem= (global_system_variables. + character_set_filesystem); + thd->update_charset(); + } +} + + +CHARSET_INFO ** sys_var_character_set_results::ci_ptr(THD *thd, enum_var_type type) { if (type == OPT_GLOBAL) @@ -2032,21 +2232,6 @@ void sys_var_character_set_server::set_default(THD *thd, enum_var_type type) } } -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) -bool sys_var_character_set_server::check(THD *thd, set_var *var) -{ - if ((var->type == OPT_GLOBAL) && - (mysql_bin_log.is_open() || - active_mi->slave_running || active_mi->rli.slave_running)) - { - my_printf_error(0, "Binary logging and replication forbid changing \ -the global server character set or collation", MYF(0)); - return 1; - } - return sys_var_character_set::check(thd,var); -} -#endif - CHARSET_INFO ** sys_var_character_set_database::ci_ptr(THD *thd, enum_var_type type) { @@ -2139,20 +2324,6 @@ void sys_var_collation_database::set_default(THD *thd, enum_var_type type) } } -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) -bool sys_var_collation_server::check(THD *thd, set_var *var) -{ - if ((var->type == OPT_GLOBAL) && - (mysql_bin_log.is_open() || - active_mi->slave_running || active_mi->rli.slave_running)) - { - my_printf_error(0, "Binary logging and replication forbid changing \ -the global server character set or collation", MYF(0)); - return 1; - } - return sys_var_collation::check(thd,var); -} -#endif bool sys_var_collation_server::update(THD *thd, set_var *var) { @@ -2227,7 +2398,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var) pthread_mutex_lock(&LOCK_global_system_variables); key_cache= get_key_cache(base_name); - + if (!key_cache) { /* Key cache didn't exists */ @@ -2251,7 +2422,12 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var) if (!tmp) // Zero size means delete { if (key_cache == dflt_key_cache) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_CANT_DROP_DEFAULT_KEYCACHE, + ER(ER_WARN_CANT_DROP_DEFAULT_KEYCACHE)); goto end; // Ignore default key cache + } if (key_cache->key_cache_inited) // If initied { @@ -2259,7 +2435,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var) Move tables using this key cache to the default key cache and clear the old key cache. */ - NAMED_LIST *list; + NAMED_LIST *list; key_cache= (KEY_CACHE *) find_named(&key_caches, base_name->str, base_name->length, &list); key_cache->in_init= 1; @@ -2288,7 +2464,7 @@ bool sys_var_key_buffer_size::update(THD *thd, set_var *var) error= (bool)(ha_resize_key_cache(key_cache)); pthread_mutex_lock(&LOCK_global_system_variables); - key_cache->in_init= 0; + key_cache->in_init= 0; end: pthread_mutex_unlock(&LOCK_global_system_variables); @@ -2337,7 +2513,7 @@ bool sys_var_key_cache_long::update(THD *thd, set_var *var) error= (bool) (ha_resize_key_cache(key_cache)); pthread_mutex_lock(&LOCK_global_system_variables); - key_cache->in_init= 0; + key_cache->in_init= 0; end: pthread_mutex_unlock(&LOCK_global_system_variables); @@ -2398,11 +2574,17 @@ bool sys_var_last_insert_id::update(THD *thd, set_var *var) byte *sys_var_last_insert_id::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - /* - As this statement reads @@LAST_INSERT_ID, set - THD::last_insert_id_used. - */ - thd->last_insert_id_used= TRUE; + if (!thd->last_insert_id_used) + { + /* + As this statement reads @@LAST_INSERT_ID, set + THD::last_insert_id_used and remember first generated insert id + of the previous statement in THD::current_insert_id. + */ + thd->last_insert_id_used= TRUE; + thd->last_insert_id_used_bin_log= TRUE; + thd->current_insert_id= thd->last_insert_id; + } return (byte*) &thd->current_insert_id; } @@ -2417,7 +2599,7 @@ bool sys_var_insert_id::update(THD *thd, set_var *var) byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - return (byte*) &thd->current_insert_id; + return (byte*) &thd->next_insert_id; } @@ -2429,7 +2611,7 @@ bool sys_var_slave_skip_counter::check(THD *thd, set_var *var) pthread_mutex_lock(&active_mi->rli.run_lock); if (active_mi->rli.slave_running) { - my_error(ER_SLAVE_MUST_STOP, MYF(0)); + my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0)); result=1; } pthread_mutex_unlock(&active_mi->rli.run_lock); @@ -2462,16 +2644,7 @@ bool sys_var_slave_skip_counter::update(THD *thd, set_var *var) bool sys_var_sync_binlog_period::update(THD *thd, set_var *var) { - pthread_mutex_t *lock_log= mysql_bin_log.get_log_lock(); sync_binlog_period= (ulong) var->save_result.ulonglong_value; - /* - Must reset the counter otherwise it may already be beyond the new period - and so the new period will not be taken into account. Need mutex otherwise - might be cancelled by a simultanate ++ in MYSQL_LOG::write(). - */ - pthread_mutex_lock(lock_log); - sync_binlog_counter= 0; - pthread_mutex_unlock(lock_log); return 0; } #endif /* HAVE_REPLICATION */ @@ -2491,23 +2664,12 @@ bool sys_var_rand_seed2::update(THD *thd, set_var *var) bool sys_var_thd_time_zone::check(THD *thd, set_var *var) { - char buff[MAX_TIME_ZONE_NAME_LENGTH]; + char buff[MAX_TIME_ZONE_NAME_LENGTH]; String str(buff, sizeof(buff), &my_charset_latin1); String *res= var->value->val_str(&str); -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) - if ((var->type == OPT_GLOBAL) && - (mysql_bin_log.is_open() || - active_mi->slave_running || active_mi->rli.slave_running)) - { - my_printf_error(0, "Binary logging and replication forbid changing " - "of the global server time zone", MYF(0)); - return 1; - } -#endif - if (!(var->save_result.time_zone= - my_tz_find(res, thd->lex->time_zone_tables_used))) + my_tz_find(res, thd->lex->time_zone_tables_used))) { my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL"); return 1; @@ -2534,14 +2696,25 @@ bool sys_var_thd_time_zone::update(THD *thd, set_var *var) byte *sys_var_thd_time_zone::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - /* + /* We can use ptr() instead of c_ptr() here because String contaning time zone name is guaranteed to be zero ended. */ if (type == OPT_GLOBAL) return (byte *)(global_system_variables.time_zone->get_name()->ptr()); else + { + /* + This is an ugly fix for replication: we don't replicate properly queries + invoking system variables' values to update tables; but + CONVERT_TZ(,,@@session.time_zone) is so popular that we make it + replicable (i.e. we tell the binlog code to store the session + timezone). If it's the global value which was used we can't replicate + (binlog code stores session value only). + */ + thd->time_zone_used= 1; return (byte *)(thd->variables.time_zone->get_name()->ptr()); + } } @@ -2568,6 +2741,51 @@ void sys_var_thd_time_zone::set_default(THD *thd, enum_var_type type) pthread_mutex_unlock(&LOCK_global_system_variables); } + +bool sys_var_max_user_conn::check(THD *thd, set_var *var) +{ + if (var->type == OPT_GLOBAL) + return sys_var_thd::check(thd, var); + else + { + /* + Per-session values of max_user_connections can't be set directly. + QQ: May be we should have a separate error message for this? + */ + my_error(ER_GLOBAL_VARIABLE, MYF(0), name); + return TRUE; + } +} + +bool sys_var_max_user_conn::update(THD *thd, set_var *var) +{ + DBUG_ASSERT(var->type == OPT_GLOBAL); + pthread_mutex_lock(&LOCK_global_system_variables); + max_user_connections= (uint)var->save_result.ulonglong_value; + pthread_mutex_unlock(&LOCK_global_system_variables); + return 0; +} + + +void sys_var_max_user_conn::set_default(THD *thd, enum_var_type type) +{ + DBUG_ASSERT(type == OPT_GLOBAL); + pthread_mutex_lock(&LOCK_global_system_variables); + max_user_connections= (ulong) option_limits->def_value; + pthread_mutex_unlock(&LOCK_global_system_variables); +} + + +byte *sys_var_max_user_conn::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + if (type != OPT_GLOBAL && + thd->user_connect && thd->user_connect->user_resources.user_conn) + return (byte*) &(thd->user_connect->user_resources.user_conn); + return (byte*) &(max_user_connections); +} + + bool sys_var_thd_lc_time_names::check(THD *thd, set_var *var) { MY_LOCALE *locale_match; @@ -2643,7 +2861,7 @@ static bool set_option_autocommit(THD *thd, set_var *var) { /* The test is negative as the flag we use is NOT autocommit */ - ulong org_options=thd->options; + ulonglong org_options= thd->options; if (var->save_result.ulong_value != 0) thd->options&= ~((sys_var_thd_bit*) var->var)->bit_flag; @@ -2672,7 +2890,7 @@ static bool set_option_autocommit(THD *thd, set_var *var) static int check_log_update(THD *thd, set_var *var) { #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (!(thd->master_access & SUPER_ACL)) + if (!(thd->security_ctx->master_access & SUPER_ACL)) { my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); return 1; @@ -2683,6 +2901,30 @@ static int check_log_update(THD *thd, set_var *var) static bool set_log_update(THD *thd, set_var *var) { + /* + The update log is not supported anymore since 5.0. + See sql/mysqld.cc/, comments in function init_server_components() for an + explaination of the different warnings we send below + */ + + if (opt_sql_bin_update) + { + ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG | + OPTION_UPDATE_LOG); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_UPDATE_LOG_DEPRECATED_TRANSLATED, + ER(ER_UPDATE_LOG_DEPRECATED_TRANSLATED)); + } + else + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_UPDATE_LOG_DEPRECATED_IGNORED, + ER(ER_UPDATE_LOG_DEPRECATED_IGNORED)); + set_option_bit(thd, var); + return 0; +} + +static bool set_log_bin(THD *thd, set_var *var) +{ if (opt_sql_bin_update) ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG | OPTION_UPDATE_LOG); @@ -2694,7 +2936,7 @@ static int check_pseudo_thread_id(THD *thd, set_var *var) { var->save_result.ulonglong_value= var->value->val_int(); #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (thd->master_access & SUPER_ACL) + if (thd->security_ctx->master_access & SUPER_ACL) return 0; else { @@ -2710,13 +2952,14 @@ static byte *get_warning_count(THD *thd) { thd->sys_var_tmp.long_value= (thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_NOTE] + + thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR] + thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_WARN]); return (byte*) &thd->sys_var_tmp.long_value; } static byte *get_error_count(THD *thd) { - thd->sys_var_tmp.long_value= + thd->sys_var_tmp.long_value= thd->warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR]; return (byte*) &thd->sys_var_tmp.long_value; } @@ -2728,6 +2971,30 @@ static byte *get_have_innodb(THD *thd) } +/* + Get the tmpdir that was specified or chosen by default + + SYNOPSIS + get_tmpdir() + thd thread handle + + DESCRIPTION + This is necessary because if the user does not specify a temporary + directory via the command line, one is chosen based on the environment + or system defaults. But we can't just always use mysql_tmpdir, because + that is actually a call to my_tmpdir() which cycles among possible + temporary directories. + + RETURN VALUES + ptr pointer to NUL-terminated string + */ +static byte *get_tmpdir(THD *thd) +{ + if (opt_mysql_tmpdir) + return (byte *)opt_mysql_tmpdir; + return (byte*)mysql_tmpdir; +} + /**************************************************************************** Main handling of variables: - Initialisation @@ -2748,7 +3015,7 @@ static byte *get_have_innodb(THD *thd) ptr pointer to option structure */ -static struct my_option *find_option(struct my_option *opt, const char *name) +static struct my_option *find_option(struct my_option *opt, const char *name) { uint length=strlen(name); for (; opt->name; opt++) @@ -2823,9 +3090,6 @@ void set_var_free() length Length of variable. zero means that we should use strlen() on the variable - NOTE - We have to use net_printf() as this is called during the parsing stage - RETURN VALUES pointer pointer to variable definitions 0 Unknown variable (error message is given) @@ -2838,7 +3102,7 @@ sys_var *find_sys_var(const char *str, uint length) length ? length : strlen(str)); if (!var) - net_printf(current_thd, ER_UNKNOWN_SYSTEM_VARIABLE, (char*) str); + my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (char*) str); return var; } @@ -2926,11 +3190,15 @@ bool not_all_support_one_shot(List<set_var_base> *var_list) int set_var::check(THD *thd) { + if (var->is_readonly()) + { + my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), var->name, "read only"); + return -1; + } if (var->check_type(type)) { - my_error(type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE, - MYF(0), - var->name); + int err= type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE; + my_error(err, MYF(0), var->name); return -1; } if ((type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL))) @@ -2946,8 +3214,8 @@ int set_var::check(THD *thd) return 0; } - if ((!value->fixed && - value->fix_fields(thd, 0, &value)) || value->check_cols(1)) + if ((!value->fixed && + value->fix_fields(thd, &value)) || value->check_cols(1)) return -1; if (var->check_update_type(value->result_type())) { @@ -2974,15 +3242,14 @@ int set_var::light_check(THD *thd) { if (var->check_type(type)) { - my_error(type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE, - MYF(0), - var->name); + int err= type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE; + my_error(err, MYF(0), var->name); return -1; } if (type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL)) return 1; - if (value && ((!value->fixed && value->fix_fields(thd, 0, &value)) || + if (value && ((!value->fixed && value->fix_fields(thd, &value)) || value->check_cols(1))) return -1; return 0; @@ -3011,8 +3278,8 @@ int set_var_user::check(THD *thd) Item_func_set_user_var can't substitute something else on its place => 0 can be passed as last argument (reference on item) */ - return (user_var_item->fix_fields(thd, 0, (Item**) 0) || - user_var_item->check()) ? -1 : 0; + return (user_var_item->fix_fields(thd, (Item**) 0) || + user_var_item->check(0)) ? -1 : 0; } @@ -3034,7 +3301,7 @@ int set_var_user::light_check(THD *thd) Item_func_set_user_var can't substitute something else on its place => 0 can be passed as last argument (reference on item) */ - return (user_var_item->fix_fields(thd, 0, (Item**) 0)); + return (user_var_item->fix_fields(thd, (Item**) 0)); } @@ -3043,7 +3310,7 @@ int set_var_user::update(THD *thd) if (user_var_item->update()) { /* Give an error if it's not given already */ - my_error(ER_SET_CONSTANTS_ONLY, MYF(0)); + my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY), MYF(0)); return -1; } return 0; @@ -3059,10 +3326,10 @@ int set_var_password::check(THD *thd) #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!user->host.str) { - if (thd->priv_host != 0) + if (*thd->security_ctx->priv_host != 0) { - user->host.str= (char *) thd->priv_host; - user->host.length= strlen(thd->priv_host); + user->host.str= (char *) thd->security_ctx->priv_host; + user->host.length= strlen(thd->security_ctx->priv_host); } else { @@ -3097,7 +3364,7 @@ int set_var_password::update(THD *thd) bool sys_var_thd_storage_engine::check(THD *thd, set_var *var) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; const char *value; String str(buff, sizeof(buff), &my_charset_latin1), *res; @@ -3107,7 +3374,7 @@ bool sys_var_thd_storage_engine::check(THD *thd, set_var *var) if (!(res=var->value->val_str(&str)) || !(var->save_result.ulong_value= (ulong) (db_type= ha_resolve_by_name(res->ptr(), res->length()))) || - ha_checktype(db_type) != db_type) + ha_checktype(thd, db_type, 1, 0) != db_type) { value= res ? res->c_ptr() : "NULL"; goto err; @@ -3118,7 +3385,7 @@ bool sys_var_thd_storage_engine::check(THD *thd, set_var *var) err: my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), value); - return 1; + return 1; } @@ -3156,7 +3423,7 @@ void sys_var_thd_table_type::warn_deprecated(THD *thd) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX), "table_type", - "storage_engine"); + "storage_engine"); } void sys_var_thd_table_type::set_default(THD *thd, enum_var_type type) @@ -3176,27 +3443,50 @@ bool sys_var_thd_table_type::update(THD *thd, set_var *var) Functions to handle sql_mode ****************************************************************************/ -byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type, - LEX_STRING *base) +/* + Make string representation of mode + + SYNOPSIS + thd in thread handler + val in sql_mode value + len out pointer on length of string + + RETURN + pointer to string with sql_mode representation +*/ + +byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, ulong val, + ulong *len) { - ulong val; char buff[256]; String tmp(buff, sizeof(buff), &my_charset_latin1); + ulong length; tmp.length(0); - val= ((type == OPT_GLOBAL) ? global_system_variables.*offset : - thd->variables.*offset); for (uint i= 0; val; val>>= 1, i++) { if (val & 1) { - tmp.append(enum_names->type_names[i]); + tmp.append(sql_mode_typelib.type_names[i], + sql_mode_typelib.type_lengths[i]); tmp.append(','); } } - if (tmp.length()) - tmp.length(tmp.length() - 1); - return (byte*) thd->strmake(tmp.ptr(), tmp.length()); + + if ((length= tmp.length())) + length--; + *len= length; + return (byte*) thd->strmake(tmp.ptr(), length); +} + + +byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + ulong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset : + thd->variables.*offset); + ulong length_unused; + return symbolic_mode_representation(thd, val, &length_unused); } @@ -3208,13 +3498,23 @@ void sys_var_thd_sql_mode::set_default(THD *thd, enum_var_type type) thd->variables.*offset= global_system_variables.*offset; } + void fix_sql_mode_var(THD *thd, enum_var_type type) { if (type == OPT_GLOBAL) global_system_variables.sql_mode= fix_sql_mode(global_system_variables.sql_mode); else + { thd->variables.sql_mode= fix_sql_mode(thd->variables.sql_mode); + /* + Update thd->server_status + */ + if (thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) + thd->server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES; + else + thd->server_status&= ~SERVER_STATUS_NO_BACKSLASH_ESCAPES; + } } /* Map database specific bits to function bits */ @@ -3222,7 +3522,7 @@ void fix_sql_mode_var(THD *thd, enum_var_type type) ulong fix_sql_mode(ulong sql_mode) { /* - Note that we dont set + Note that we dont set MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | MODE_NO_FIELD_OPTIONS to allow one to get full use of MySQL in this mode. */ @@ -3231,7 +3531,7 @@ ulong fix_sql_mode(ulong sql_mode) { sql_mode|= (MODE_REAL_AS_FLOAT | MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE); - /* + /* MODE_ONLY_FULL_GROUP_BY removed from ANSI mode because it is currently overly restrictive (see BUG#8510). */ @@ -3240,7 +3540,7 @@ ulong fix_sql_mode(ulong sql_mode) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | - MODE_NO_FIELD_OPTIONS); + MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER); if (sql_mode & MODE_MSSQL) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | @@ -3260,7 +3560,15 @@ ulong fix_sql_mode(ulong sql_mode) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | - MODE_NO_FIELD_OPTIONS); + MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER); + if (sql_mode & MODE_MYSQL40) + sql_mode|= MODE_HIGH_NOT_PRECEDENCE; + if (sql_mode & MODE_MYSQL323) + sql_mode|= MODE_HIGH_NOT_PRECEDENCE; + if (sql_mode & MODE_TRADITIONAL) + sql_mode|= (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES | + MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_NO_AUTO_CREATE_USER); return sql_mode; } @@ -3308,7 +3616,7 @@ static KEY_CACHE *create_key_cache(const char *name, uint length) KEY_CACHE *key_cache; DBUG_ENTER("create_key_cache"); DBUG_PRINT("enter",("name: %.*s", length, name)); - + if ((key_cache= (KEY_CACHE*) my_malloc(sizeof(KEY_CACHE), MYF(MY_ZEROFILL | MY_WME)))) { @@ -3370,11 +3678,31 @@ bool process_key_caches(int (* func) (const char *name, KEY_CACHE *)) } +void sys_var_trust_routine_creators::warn_deprecated(THD *thd) +{ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_SYNTAX, + ER(ER_WARN_DEPRECATED_SYNTAX), "log_bin_trust_routine_creators", + "log_bin_trust_function_creators"); +} + +void sys_var_trust_routine_creators::set_default(THD *thd, enum_var_type type) +{ + warn_deprecated(thd); + sys_var_bool_ptr::set_default(thd, type); +} + +bool sys_var_trust_routine_creators::update(THD *thd, set_var *var) +{ + warn_deprecated(thd); + return sys_var_bool_ptr::update(thd, var); +} + /**************************************************************************** Used templates ****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List<set_var_base>; template class List_iterator_fast<set_var_base>; template class I_List_iterator<NAMED_LIST>; diff --git a/sql/set_var.h b/sql/set_var.h index 78b34963e9d..6000e155db9 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2002-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -46,22 +45,18 @@ public: const char *name; sys_after_update_func after_update; -#if MYSQL_VERSION_ID < 50000 bool no_support_one_shot; -#endif sys_var(const char *name_arg, sys_after_update_func func= NULL) :name(name_arg), after_update(func) -#if MYSQL_VERSION_ID < 50000 , no_support_one_shot(1) -#endif {} virtual ~sys_var() {} virtual bool check(THD *thd, set_var *var); bool check_enum(THD *thd, set_var *var, TYPELIB *enum_names); bool check_set(THD *thd, set_var *var, TYPELIB *enum_names); virtual bool update(THD *thd, set_var *var)=0; - virtual void set_default(THD *thd, enum_var_type type) {} - virtual SHOW_TYPE type() { return SHOW_UNDEF; } + virtual void set_default(THD *thd_arg, enum_var_type type) {} + virtual SHOW_TYPE show_type() { return SHOW_UNDEF; } virtual byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { return 0; } virtual bool check_type(enum_var_type type) @@ -72,6 +67,7 @@ public: { return option_limits == 0; } Item *item(THD *thd, enum_var_type type, LEX_STRING *base); virtual bool is_struct() { return 0; } + virtual bool is_readonly() const { return 0; } }; @@ -100,14 +96,16 @@ class sys_var_long_ptr_global: public sys_var_global { public: ulong *value; - sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr, + sys_var_long_ptr_global(const char *name_arg, ulong *value_ptr_arg, pthread_mutex_t *guard_arg, sys_after_update_func after_update_arg= NULL) - :sys_var_global(name_arg, after_update_arg, guard_arg), value(value_ptr) {} + :sys_var_global(name_arg, after_update_arg, guard_arg), + value(value_ptr_arg) + {} bool check(THD *thd, set_var *var); bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_LONG; } + SHOW_TYPE show_type() { return SHOW_LONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { return (byte*) value; } }; @@ -129,14 +127,14 @@ class sys_var_ulonglong_ptr :public sys_var { public: ulonglong *value; - sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr) - :sys_var(name_arg),value(value_ptr) {} - sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr, + sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr_arg) + :sys_var(name_arg),value(value_ptr_arg) {} + sys_var_ulonglong_ptr(const char *name_arg, ulonglong *value_ptr_arg, sys_after_update_func func) - :sys_var(name_arg,func), value(value_ptr) {} + :sys_var(name_arg,func), value(value_ptr_arg) {} bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_LONGLONG; } + SHOW_TYPE show_type() { return SHOW_LONGLONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { return (byte*) value; } }; @@ -155,7 +153,7 @@ public: } bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_MY_BOOL; } + SHOW_TYPE show_type() { return SHOW_MY_BOOL; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { return (byte*) value; } bool check_update_type(Item_result type) { return 0; } @@ -187,7 +185,7 @@ public: { (*set_default_func)(thd, type); } - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { return (byte*) value; } bool check_update_type(Item_result type) @@ -213,7 +211,7 @@ public: { return 1; } - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { return (byte*) value; @@ -223,6 +221,36 @@ public: return 1; } bool check_default(enum_var_type type) { return 1; } + bool is_readonly() const { return 1; } +}; + + +class sys_var_const_str_ptr :public sys_var +{ +public: + char **value; // Pointer to const value + sys_var_const_str_ptr(const char *name_arg, char **value_arg) + :sys_var(name_arg),value(value_arg) + {} + bool check(THD *thd, set_var *var) + { + return 1; + } + bool update(THD *thd, set_var *var) + { + return 1; + } + SHOW_TYPE show_type() { return SHOW_CHAR; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) + { + return (byte*) *value; + } + bool check_update_type(Item_result type) + { + return 1; + } + bool check_default(enum_var_type type) { return 1; } + bool is_readonly() const { return 1; } }; @@ -240,7 +268,7 @@ public: return check_enum(thd, var, enum_names); } bool update(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_update_type(Item_result type) { return 0; } }; @@ -275,7 +303,7 @@ public: bool check(THD *thd, set_var *var); bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_LONG; } + SHOW_TYPE show_type() { return SHOW_LONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -293,7 +321,7 @@ public: {} bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_HA_ROWS; } + SHOW_TYPE show_type() { return SHOW_HA_ROWS; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -313,7 +341,7 @@ public: {} bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_LONGLONG; } + SHOW_TYPE show_type() { return SHOW_LONGLONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_default(enum_var_type type) { @@ -339,7 +367,7 @@ public: {} bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_MY_BOOL; } + SHOW_TYPE show_type() { return SHOW_MY_BOOL; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check(THD *thd, set_var *var) { @@ -370,7 +398,7 @@ public: } bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); bool check_update_type(Item_result type) { return 0; } }; @@ -391,6 +419,8 @@ public: } void set_default(THD *thd, enum_var_type type); byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); + static byte *symbolic_mode_representation(THD *thd, ulong sql_mode, + ulong *length); }; @@ -403,7 +433,7 @@ public: :sys_var_thd(name_arg), offset(offset_arg) {} bool check(THD *thd, set_var *var); -SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check_update_type(Item_result type) { return type != STRING_RESULT; /* Only accept strings */ @@ -429,11 +459,11 @@ class sys_var_thd_bit :public sys_var_thd sys_check_func check_func; sys_update_func update_func; public: - ulong bit_flag; + ulonglong bit_flag; bool reverse; sys_var_thd_bit(const char *name_arg, sys_check_func c_func, sys_update_func u_func, - ulong bit, bool reverse_arg=0) + ulonglong bit, bool reverse_arg=0) :sys_var_thd(name_arg), check_func(c_func), update_func(u_func), bit_flag(bit), reverse(reverse_arg) {} @@ -441,7 +471,7 @@ public: bool update(THD *thd, set_var *var); bool check_update_type(Item_result type) { return 0; } bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } - SHOW_TYPE type() { return SHOW_MY_BOOL; } + SHOW_TYPE show_type() { return SHOW_MY_BOOL; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -456,7 +486,7 @@ public: void set_default(THD *thd, enum_var_type type); bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } bool check_default(enum_var_type type) { return 0; } - SHOW_TYPE type() { return SHOW_LONG; } + SHOW_TYPE show_type() { return SHOW_LONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -467,7 +497,7 @@ public: sys_var_last_insert_id(const char *name_arg) :sys_var(name_arg) {} bool update(THD *thd, set_var *var); bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } - SHOW_TYPE type() { return SHOW_LONGLONG; } + SHOW_TYPE show_type() { return SHOW_LONGLONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -478,7 +508,7 @@ public: sys_var_insert_id(const char *name_arg) :sys_var(name_arg) {} bool update(THD *thd, set_var *var); bool check_type(enum_var_type type) { return type == OPT_GLOBAL; } - SHOW_TYPE type() { return SHOW_LONGLONG; } + SHOW_TYPE show_type() { return SHOW_LONGLONG; } byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); }; @@ -500,8 +530,8 @@ public: class sys_var_sync_binlog_period :public sys_var_long_ptr { public: - sys_var_sync_binlog_period(const char *name_arg, ulong *value_ptr) - :sys_var_long_ptr(name_arg,value_ptr) {} + sys_var_sync_binlog_period(const char *name_arg, ulong *value_ptr_arg) + :sys_var_long_ptr(name_arg,value_ptr_arg) {} bool update(THD *thd, set_var *var); }; #endif @@ -528,12 +558,10 @@ class sys_var_collation :public sys_var_thd public: sys_var_collation(const char *name_arg) :sys_var_thd(name_arg) { -#if MYSQL_VERSION_ID < 50000 no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); -SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check_update_type(Item_result type) { return ((type != STRING_RESULT) && (type != INT_RESULT)); @@ -550,16 +578,14 @@ public: sys_var_thd(name_arg) { nullable= 0; -#if MYSQL_VERSION_ID < 50000 /* In fact only almost all variables derived from sys_var_character_set support ONE_SHOT; character_set_results doesn't. But that's good enough. */ no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check_update_type(Item_result type) { return ((type != STRING_RESULT) && (type != INT_RESULT)); @@ -571,6 +597,15 @@ public: virtual CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type)= 0; }; +class sys_var_character_set_filesystem :public sys_var_character_set +{ +public: + sys_var_character_set_filesystem(const char *name_arg) : + sys_var_character_set(name_arg) {} + void set_default(THD *thd, enum_var_type type); + CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); +}; + class sys_var_character_set_client :public sys_var_character_set { public: @@ -595,9 +630,6 @@ class sys_var_character_set_server :public sys_var_character_set public: sys_var_character_set_server(const char *name_arg) : sys_var_character_set(name_arg) {} -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) - bool check(THD *thd, set_var *var); -#endif void set_default(THD *thd, enum_var_type type); CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); }; @@ -633,9 +665,6 @@ class sys_var_collation_server :public sys_var_collation { public: sys_var_collation_server(const char *name_arg) :sys_var_collation(name_arg) {} -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) - bool check(THD *thd, set_var *var); -#endif bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); @@ -672,7 +701,7 @@ public: :sys_var_key_cache_param(name_arg, offsetof(KEY_CACHE, param_buff_size)) {} bool update(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_LONGLONG; } + SHOW_TYPE show_type() { return SHOW_LONGLONG; } }; @@ -683,7 +712,7 @@ public: :sys_var_key_cache_param(name_arg, offset_arg) {} bool update(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_LONG; } + SHOW_TYPE show_type() { return SHOW_LONG; } }; @@ -698,7 +727,7 @@ public: :sys_var_thd(name_arg), offset(offset_arg), date_time_type(date_time_type_arg) {} - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check_update_type(Item_result type) { return type != STRING_RESULT; /* Only accept strings */ @@ -718,13 +747,13 @@ class sys_var_readonly: public sys_var { public: enum_var_type var_type; - SHOW_TYPE show_type; + SHOW_TYPE show_type_value; sys_value_ptr_func value_ptr_func; sys_var_readonly(const char *name_arg, enum_var_type type, SHOW_TYPE show_type_arg, sys_value_ptr_func value_ptr_func_arg) :sys_var(name_arg), var_type(type), - show_type(show_type_arg), value_ptr_func(value_ptr_func_arg) + show_type_value(show_type_arg), value_ptr_func(value_ptr_func_arg) {} bool update(THD *thd, set_var *var) { return 1; } bool check_default(enum_var_type type) { return 1; } @@ -734,7 +763,8 @@ public: { return (*value_ptr_func)(thd); } - SHOW_TYPE type() { return show_type; } + SHOW_TYPE show_type() { return show_type_value; } + bool is_readonly() const { return 1; } }; class sys_var_thd_time_zone :public sys_var_thd @@ -743,12 +773,10 @@ public: sys_var_thd_time_zone(const char *name_arg): sys_var_thd(name_arg) { -#if MYSQL_VERSION_ID < 50000 no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check_update_type(Item_result type) { return type != STRING_RESULT; /* Only accept strings */ @@ -759,6 +787,35 @@ public: virtual void set_default(THD *thd, enum_var_type type); }; + +class sys_var_max_user_conn : public sys_var_thd +{ +public: + sys_var_max_user_conn(const char *name_arg): + sys_var_thd(name_arg) {} + bool check(THD *thd, set_var *var); + bool update(THD *thd, set_var *var); + bool check_default(enum_var_type type) + { + return type != OPT_GLOBAL || !option_limits; + } + void set_default(THD *thd, enum_var_type type); + SHOW_TYPE show_type() { return SHOW_INT; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + +class sys_var_trust_routine_creators :public sys_var_bool_ptr +{ + /* We need a derived class only to have a warn_deprecated() */ +public: + sys_var_trust_routine_creators(const char *name_arg, my_bool *value_arg) : + sys_var_bool_ptr(name_arg, value_arg) {}; + void warn_deprecated(THD *thd); + void set_default(THD *thd, enum_var_type type); + bool update(THD *thd, set_var *var); +}; + + class sys_var_thd_lc_time_names :public sys_var_thd { public: @@ -770,7 +827,7 @@ public: #endif } bool check(THD *thd, set_var *var); - SHOW_TYPE type() { return SHOW_CHAR; } + SHOW_TYPE show_type() { return SHOW_CHAR; } bool check_update_type(Item_result type) { return ((type != STRING_RESULT) && (type != INT_RESULT)); @@ -794,9 +851,7 @@ public: virtual int update(THD *thd)=0; /* To set the value */ /* light check for PS */ virtual int light_check(THD *thd) { return check(thd); } -#if MYSQL_VERSION_ID < 50000 virtual bool no_support_one_shot() { return 1; } -#endif }; @@ -819,8 +874,8 @@ public: } save_result; LEX_STRING base; /* for structs */ - set_var(enum_var_type type_arg, sys_var *var_arg, LEX_STRING *base_name_arg, - Item *value_arg) + set_var(enum_var_type type_arg, sys_var *var_arg, + const LEX_STRING *base_name_arg, Item *value_arg) :var(var_arg), type(type_arg), base(*base_name_arg) { /* @@ -830,7 +885,8 @@ public: if (value_arg && value_arg->type() == Item::FIELD_ITEM) { Item_field *item= (Item_field*) value_arg; - if (!(value=new Item_string(item->field_name, strlen(item->field_name), + if (!(value=new Item_string(item->field_name, + (uint) strlen(item->field_name), item->collation.collation))) value=value_arg; /* Give error message later */ } @@ -840,9 +896,7 @@ public: int check(THD *thd); int update(THD *thd); int light_check(THD *thd); -#if MYSQL_VERSION_ID < 50000 bool no_support_one_shot() { return var->no_support_one_shot; } -#endif }; @@ -908,7 +962,7 @@ public: uint name_length_arg, gptr data_arg) :name_length(name_length_arg), data(data_arg) { - name= my_strdup_with_length((byte*) name_arg, name_length, MYF(MY_WME)); + name= my_strdup_with_length(name_arg, name_length, MYF(MY_WME)); links->push_back(this); } inline bool cmp(const char *name_cmp, uint length) @@ -948,10 +1002,11 @@ int sql_set_variables(THD *thd, List<set_var_base> *var_list); bool not_all_support_one_shot(List<set_var_base> *var_list); void fix_delay_key_write(THD *thd, enum_var_type type); ulong fix_sql_mode(ulong sql_mode); -extern sys_var_str sys_charset_system; +extern sys_var_const_str sys_charset_system; extern sys_var_str sys_init_connect; extern sys_var_str sys_init_slave; extern sys_var_thd_time_zone sys_time_zone; +extern sys_var_thd_bit sys_autocommit; CHARSET_INFO *get_old_charset_by_name(const char *old_name); gptr find_named(I_List<NAMED_LIST> *list, const char *name, uint length, NAMED_LIST **found); diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am index 3b13d73e8da..68b393e619f 100644 --- a/sql/share/Makefile.am +++ b/sql/share/Makefile.am @@ -2,8 +2,7 @@ # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. +# the Free Software Foundation; version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -16,6 +15,8 @@ ## Process this file with automake to create Makefile.in +EXTRA_DIST= errmsg.txt + dist-hook: for dir in charsets @AVAILABLE_LANGUAGES@; do \ test -d $(distdir)/$$dir || mkdir $(distdir)/$$dir; \ @@ -25,10 +26,14 @@ dist-hook: $(INSTALL_DATA) $(srcdir)/charsets/README $(distdir)/charsets $(INSTALL_DATA) $(srcdir)/charsets/Index.xml $(distdir)/charsets -all-local: @AVAILABLE_LANGUAGES_ERRORS@ +all-local: english/errmsg.sys + +# Use the english errmsg.sys as a flag that all errmsg.sys needs to be +# created. Normally these are created by extra/Makefile -# this is ugly, but portable -@AVAILABLE_LANGUAGES_ERRORS_RULES@ +english/errmsg.sys: errmsg.txt + rm -f $(top_builddir)/include/mysqld_error.h + (cd $(top_builddir)/extra && $(MAKE)) install-data-local: for lang in @AVAILABLE_LANGUAGES@; \ @@ -36,10 +41,10 @@ install-data-local: $(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/$$lang; \ $(INSTALL_DATA) $(srcdir)/$$lang/errmsg.sys \ $(DESTDIR)$(pkgdatadir)/$$lang/errmsg.sys; \ - $(INSTALL_DATA) $(srcdir)/$$lang/errmsg.txt \ - $(DESTDIR)$(pkgdatadir)/$$lang/errmsg.txt; \ done $(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/charsets + $(INSTALL_DATA) $(srcdir)/errmsg.txt \ + $(DESTDIR)$(pkgdatadir)/errmsg.txt; \ $(INSTALL_DATA) $(srcdir)/charsets/README $(DESTDIR)$(pkgdatadir)/charsets/README $(INSTALL_DATA) $(srcdir)/charsets/*.xml $(DESTDIR)$(pkgdatadir)/charsets @@ -47,14 +52,11 @@ install-data-local: uninstall-local: @RM@ -f -r $(DESTDIR)$(pkgdatadir) +distclean-local: + @RM@ -f */errmsg.sys + # Do nothing link_sources: -fix_errors: - for lang in @AVAILABLE_LANGUAGES@; \ - do \ - ../../extra/comp_err -C$(srcdir)/charsets/ $(srcdir)/$$lang/errmsg.txt $(srcdir)/$$lang/errmsg.sys; \ - done - # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/sql/share/charsets/Index.xml b/sql/share/charsets/Index.xml index 97fc27e1431..ae72daa8ca2 100644 --- a/sql/share/charsets/Index.xml +++ b/sql/share/charsets/Index.xml @@ -1,14 +1,13 @@ <?xml version='1.0' encoding="utf-8"?> -<charsets max-id="96"> +<charsets max-id="98"> <copyright> Copyright (C) 2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -562,13 +561,9 @@ To make maintaining easier please: <charset name="cp932"> <family>Japanese</family> <description>SJIS for Windows Japanese</description> - <alias>windows-31j</alias> - <alias>cswindows31j</alias> - <alias>sjisms</alias> - <alias>windows-95j</alias> - <alias>x-sjis-cp932</alias> - <alias>ms932</alias> - <alias>sjisms</alias> + <alias>ms_cp932</alias> + <alias>sjis_cp932</alias> + <alias>sjis_ms</alias> <collation name="cp932_japanese_ci" id="95" order="Japanese"> <flag>primary</flag> <flag>compiled</flag> @@ -579,5 +574,22 @@ To make maintaining easier please: </collation> </charset> +<charset name="eucjpms"> + <family>Japanese</family> + <description>UJIS for Windows Japanese</description> + <alias>eucjpms</alias> + <alias>eucJP_ms</alias> + <alias>ujis_ms</alias> + <alias>ujis_cp932</alias> + <collation name="eucjpms_japanese_ci" id="97" order="Japanese"> + <flag>primary</flag> + <flag>compiled</flag> + </collation> + <collation name="eucjpms_bin" id="98" order="Japanese"> + <flag>binary</flag> + <flag>compiled</flag> + </collation> +</charset> + </charsets> diff --git a/sql/share/charsets/armscii8.xml b/sql/share/charsets/armscii8.xml index d0ab428345f..714e57bb12e 100644 --- a/sql/share/charsets/armscii8.xml +++ b/sql/share/charsets/armscii8.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/ascii.xml b/sql/share/charsets/ascii.xml index 3813bd42601..97006c53680 100644 --- a/sql/share/charsets/ascii.xml +++ b/sql/share/charsets/ascii.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/cp1250.xml b/sql/share/charsets/cp1250.xml index 1e62e64ad5a..0bda643c910 100644 --- a/sql/share/charsets/cp1250.xml +++ b/sql/share/charsets/cp1250.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/cp1251.xml b/sql/share/charsets/cp1251.xml index 7f94788c0d0..b80db9f8ec0 100644 --- a/sql/share/charsets/cp1251.xml +++ b/sql/share/charsets/cp1251.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/cp1256.xml b/sql/share/charsets/cp1256.xml index 69eb6a68238..64cb253145c 100644 --- a/sql/share/charsets/cp1256.xml +++ b/sql/share/charsets/cp1256.xml @@ -9,8 +9,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/cp1257.xml b/sql/share/charsets/cp1257.xml index 93a1bd47a77..0c2688c264e 100644 --- a/sql/share/charsets/cp1257.xml +++ b/sql/share/charsets/cp1257.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/cp850.xml b/sql/share/charsets/cp850.xml index 79497aa17f1..4076a5f6a56 100644 --- a/sql/share/charsets/cp850.xml +++ b/sql/share/charsets/cp850.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/cp852.xml b/sql/share/charsets/cp852.xml index 73a81e54b02..25b622d2a4b 100644 --- a/sql/share/charsets/cp852.xml +++ b/sql/share/charsets/cp852.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/cp866.xml b/sql/share/charsets/cp866.xml index 1a72b396c7c..fa2e1865de6 100644 --- a/sql/share/charsets/cp866.xml +++ b/sql/share/charsets/cp866.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/dec8.xml b/sql/share/charsets/dec8.xml index 2cb28cb0f4f..2cd52de464a 100644 --- a/sql/share/charsets/dec8.xml +++ b/sql/share/charsets/dec8.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/geostd8.xml b/sql/share/charsets/geostd8.xml index c09aa078fb7..5e3816975d6 100644 --- a/sql/share/charsets/geostd8.xml +++ b/sql/share/charsets/geostd8.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/greek.xml b/sql/share/charsets/greek.xml index 1cfe6b49610..000019a8ce0 100644 --- a/sql/share/charsets/greek.xml +++ b/sql/share/charsets/greek.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/hebrew.xml b/sql/share/charsets/hebrew.xml index 981f308bfb5..20d68487301 100644 --- a/sql/share/charsets/hebrew.xml +++ b/sql/share/charsets/hebrew.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/hp8.xml b/sql/share/charsets/hp8.xml index 35224f8c544..3ab383ef386 100644 --- a/sql/share/charsets/hp8.xml +++ b/sql/share/charsets/hp8.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/keybcs2.xml b/sql/share/charsets/keybcs2.xml index 6332891ef23..7335a0f428d 100644 --- a/sql/share/charsets/keybcs2.xml +++ b/sql/share/charsets/keybcs2.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/koi8r.xml b/sql/share/charsets/koi8r.xml index 033597e9bfc..2d8473f6440 100644 --- a/sql/share/charsets/koi8r.xml +++ b/sql/share/charsets/koi8r.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/koi8u.xml b/sql/share/charsets/koi8u.xml index 4f5fa35af3d..16177627ffe 100644 --- a/sql/share/charsets/koi8u.xml +++ b/sql/share/charsets/koi8u.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/latin1.xml b/sql/share/charsets/latin1.xml index 5814a17b0e1..88ceff440d5 100644 --- a/sql/share/charsets/latin1.xml +++ b/sql/share/charsets/latin1.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/latin2.xml b/sql/share/charsets/latin2.xml index 7f00148a1df..6b887b927a4 100644 --- a/sql/share/charsets/latin2.xml +++ b/sql/share/charsets/latin2.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/latin5.xml b/sql/share/charsets/latin5.xml index 5004f045889..9c23200a46d 100644 --- a/sql/share/charsets/latin5.xml +++ b/sql/share/charsets/latin5.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/latin7.xml b/sql/share/charsets/latin7.xml index dd87a1a2d89..02d3ff8b17e 100644 --- a/sql/share/charsets/latin7.xml +++ b/sql/share/charsets/latin7.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/macce.xml b/sql/share/charsets/macce.xml index 61f6d79b34f..21e303609cf 100644 --- a/sql/share/charsets/macce.xml +++ b/sql/share/charsets/macce.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/macroman.xml b/sql/share/charsets/macroman.xml index 36c8e8cf13a..2b43fe73b07 100644 --- a/sql/share/charsets/macroman.xml +++ b/sql/share/charsets/macroman.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/charsets/swe7.xml b/sql/share/charsets/swe7.xml index 2b8ff4edcce..17fa6b7d9bc 100644 --- a/sql/share/charsets/swe7.xml +++ b/sql/share/charsets/swe7.xml @@ -7,8 +7,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt deleted file mode 100644 index d1fcfc5bb60..00000000000 --- a/sql/share/czech/errmsg.txt +++ /dev/null @@ -1,333 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Modifikoval Petr -B©najdr, snajdr@pvt.net, snajdr@cpress.cz v.0.01 - ISO LATIN-8852-2 - Dal-B¹í verze Jan Pazdziora, adelton@fi.muni.cz - Tue Nov 18 17:53:55 MET 1997 - Tue Dec 2 19:08:54 MET 1997 podle 3.21.15c - Thu May 7 17:40:49 MET DST 1998 podle 3.21.29 - Thu Apr 1 20:49:57 CEST 1999 podle 3.22.20 - Mon Aug 9 13:30:09 MET DST 1999 podle 3.23.2 - Thu Nov 30 14:02:52 MET 2000 podle 3.23.28 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NE", -"ANO", -"Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)", -"Nemohu vytvo-Bøit tabulku '%-.64s' (chybový kód: %d)", -"Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)", -"Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje", -"Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje", -"Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)", -"Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)", -"Chyba p-Bøi výmazu '%-.64s' (chybový kód: %d)", -"Nemohu -Bèíst záznam v systémové tabulce", -"Nemohu z-Bískat stav '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi zji¹»ování pracovní adresáø (chybový kód: %d)", -"Nemohu uzamknout soubor (chybov-Bý kód: %d)", -"Nemohu otev-Bøít soubor '%-.64s' (chybový kód: %d)", -"Nemohu naj-Bít soubor '%-.64s' (chybový kód: %d)", -"Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)", -"Nemohu zm-Bìnit adresáø na '%-.64s' (chybový kód: %d)", -"Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.64s'", -"Disk je pln-Bý (%s), èekám na uvolnìní nìjakého místa ...", -"Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.64s'", -"Chyba p-Bøi zavírání '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi ètení souboru '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi pøejmenování '%-.64s' na '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi zápisu do souboru '%-.64s' (chybový kód: %d)", -"'%-.64s' je zam-Bèen proti zmìnám", -"T-Bøídìní pøeru¹eno", -"Pohled '%-.64s' pro '%-.64s' neexistuje", -"Obsluha tabulky vr-Bátila chybu %d", -"Obsluha tabulky '%-.64s' nem-Bá tento parametr", -"Nemohu naj-Bít záznam v '%-.64s'", -"Nespr-Bávná informace v souboru '%-.64s'", -"Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit", -"Star-Bý klíèový soubor pro '%-.64s'; opravte ho.", -"'%-.64s' je jen pro -Bètení", -"M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)", -"M-Bálo pamìti pro tøídìní. Zvy¹te velikost tøídícího bufferu", -"Neo-Bèekávaný konec souboru pøi ètení '%-.64s' (chybový kód: %d)", -"P-Bøíli¹ mnoho spojení", -"M-Bálo prostoru/pamìti pro thread", -"Nemohu zjistit jm-Béno stroje pro Va¹i adresu", -"Chyba p-Bøi ustavování spojení", -"P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen", -"P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)", -"Nebyla vybr-Bána ¾ádná databáze", -"Nezn-Bámý pøíkaz", -"Sloupec '%-.64s' nem-Bù¾e být null", -"Nezn-Bámá databáze '%-.64s'", -"Tabulka '%-.64s' ji-B¾ existuje", -"Nezn-Bámá tabulka '%-.64s'", -"Sloupec '%-.64s' v %s nen-Bí zcela jasný", -"Prob-Bíhá ukonèování práce serveru", -"Nezn-Bámý sloupec '%-.64s' v %s", -"Pou-B¾ité '%-.64s' nebylo v group by", -"Nemohu pou-B¾ít group na '%-.64s'", -"P-Bøíkaz obsahuje zároveò funkci sum a sloupce", -"Po-Bèet sloupcù neodpovídá zadané hodnotì", -"Jm-Béno identifikátoru '%-.64s' je pøíli¹ dlouhé", -"Zdvojen-Bé jméno sloupce '%-.64s'", -"Zdvojen-Bé jméno klíèe '%-.64s'", -"Zvojen-Bý klíè '%-.64s' (èíslo klíèe %d)", -"Chybn-Bá specifikace sloupce '%-.64s'", -"%s bl-Bízko '%-.64s' na øádku %d", -"V-Býsledek dotazu je prázdný", -"Nejednozna-Bèná tabulka/alias: '%-.64s'", -"Chybn-Bá defaultní hodnota pro '%-.64s'", -"Definov-Báno více primárních klíèù", -"Zad-Báno pøíli¹ mnoho klíèù, je povoleno nejvíce %d klíèù", -"Zad-Báno pøíli¹ mnoho èást klíèù, je povoleno nejvíce %d èástí", -"Zadan-Bý klíè byl pøíli¹ dlouhý, nejvìt¹í délka klíèe je %d", -"Kl-Bíèový sloupec '%-.64s' v tabulce neexistuje", -"Blob sloupec '%-.64s' nem-Bù¾e být pou¾it jako klíè", -"P-Bøíli¹ velká délka sloupce '%-.64s' (nejvíce %d). Pou¾ijte BLOB", -"M-Bù¾ete mít pouze jedno AUTO pole a to musí být definováno jako klíè", -"%s: p-Bøipraven na spojení", -"%s: norm-Bální ukonèení\n", -"%s: p-Bøijat signal %d, konèím\n", -"%s: ukon-Bèení práce hotovo\n", -"%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.64s'\n", -"Nemohu vytvo-Bøit IP socket", -"Tabulka '%-.64s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu", -"Argument separ-Bátoru polo¾ek nebyl oèekáván. Pøeètìte si manuál", -"Nen-Bí mo¾né pou¾ít pevný rowlength s BLOBem. Pou¾ijte 'fields terminated by'.", -"Soubor '%-.64s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny", -"Soubor '%-.64s' ji-B¾ existuje", -"Z-Báznamù: %ld Vymazáno: %ld Pøeskoèeno: %ld Varování: %ld", -"Z-Báznamù: %ld Zdvojených: %ld", -"Chybn-Bá podèást klíèe -- není to øetìzec nebo je del¹í ne¾ délka èásti klíèe", -"Nen-Bí mo¾né vymazat v¹echny polo¾ky s ALTER TABLE. Pou¾ijte DROP TABLE", -"Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe", -"Z-Báznamù: %ld Zdvojených: %ld Varování: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Nezn-Bámá identifikace threadu: %lu", -"Nejste vlastn-Bíkem threadu %lu", -"Nejsou pou-B¾ity ¾ádné tabulky", -"P-Bøíli¹ mnoho øetìzcù pro sloupec %s a SET", -"Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %s.(1-999)\n", -"Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna", -"Tabulka '%-.64s' nebyla zam-Bèena s LOCK TABLES", -"Blob polo-B¾ka '%-.64s' nemù¾e mít defaultní hodnotu", -"Nep-Bøípustné jméno databáze '%-.64s'", -"Nep-Bøípustné jméno tabulky '%-.64s'", -"Zadan-Bý SELECT by procházel pøíli¹ mnoho záznamù a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v poøádku, pou¾ijte SET SQL_BIG_SELECTS=1", -"Nezn-Bámá chyba", -"Nezn-Bámá procedura %s", -"Chybn-Bý poèet parametrù procedury %s", -"Chybn-Bé parametry procedury %s", -"Nezn-Bámá tabulka '%-.64s' v %s", -"Polo-B¾ka '%-.64s' je zadána dvakrát", -"Nespr-Bávné pou¾ití funkce group", -"Tabulka '%-.64s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není", -"Tabulka mus-Bí mít alespoò jeden sloupec", -"Tabulka '%-.64s' je pln-Bá", -"Nezn-Bámá znaková sada: '%-.64s'", -"P-Bøíli¹ mnoho tabulek, MySQL jich mù¾e mít v joinu jen %d", -"P-Bøíli¹ mnoho polo¾ek", -"-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %d. Musíte zmìnit nìkteré polo¾ky na blob", -"P-Bøeteèení zásobníku threadu: pou¾ito %ld z %ld. Pou¾ijte 'mysqld -O thread_stack=#' k zadání vìt¹ího zásobníku", -"V OUTER JOIN byl nalezen k-Bøí¾ový odkaz. Provìøte ON podmínky", -"Sloupec '%-.32s' je pou-B¾it s UNIQUE nebo INDEX, ale není definován jako NOT NULL", -"Nemohu na-Bèíst funkci '%-.64s'", -"Nemohu inicializovat funkci '%-.64s'; %-.80s", -"Pro sd-Bílenou knihovnu nejsou povoleny cesty", -"Funkce '%-.64s' ji-B¾ existuje", -"Nemohu otev-Bøít sdílenou knihovnu '%-.64s' (errno: %d %s)", -"Nemohu naj-Bít funkci '%-.64s' v knihovnì'", -"Funkce '%-.64s' nen-Bí definována", -"Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'", -"Stroj '%-.64s' nem-Bá povoleno se k tomuto MySQL serveru pøipojit", -"Pou-B¾íváte MySQL jako anonymní u¾ivatel a anonymní u¾ivatelé nemají povoleno mìnit hesla", -"Na zm-Bìnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql", -"V tabulce user nen-Bí ¾ádný odpovídající øádek", -"Nalezen-Bých øádkù: %ld Zmìnìno: %ld Varování: %ld", -"Nemohu vytvo-Bøit nový thread (errno %d). Pokud je je¹tì nìjaká volná pamì», podívejte se do manuálu na èást o chybách specifických pro jednotlivé operaèní systémy", -"Po-Bèet sloupcù neodpovídá poètu hodnot na øádku %ld", -"Nemohu znovuotev-Bøít tabulku: '%-.64s", -"Neplatn-Bé u¾ití hodnoty NULL", -"Regul-Bární výraz vrátil chybu '%-.64s'", -"Pokud nen-Bí ¾ádná GROUP BY klauzule, není dovoleno souèasné pou¾ití GROUP polo¾ek (MIN(),MAX(),COUNT()...) s ne GROUP polo¾kami", -"Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s'", -"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'", -"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'", -"Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít.", -"Argument p-Bøíkazu GRANT u¾ivatel nebo stroj je pøíli¹ dlouhý", -"Tabulka '%-.64s.%s' neexistuje", -"Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'", -"Pou-B¾itý pøíkaz není v této verzi MySQL povolen", -"Va-B¹e syntaxe je nìjaká divná", -"Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.64s", -"P-Bøíli¹ mnoho zpo¾dìných threadù", -"Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.64s' (%s)", -"Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'", -"Zji-B¹tìna chyba pøi ètení z roury spojení", -"Zji-B¹tìna chyba fcntl()", -"P-Bøíchozí packety v chybném poøadí", -"Nemohu rozkomprimovat komunika-Bèní packet", -"Zji-B¹tìna chyba pøi ètení komunikaèního packetu", -"Zji-B¹tìn timeout pøi ètení komunikaèního packetu", -"Zji-B¹tìna chyba pøi zápisu komunikaèního packetu", -"Zji-B¹tìn timeout pøi zápisu komunikaèního packetu", -"V-Býsledný øetìzec je del¹í ne¾ 'max_allowed_packet'", -"Typ pou-B¾ité tabulky nepodporuje BLOB/TEXT sloupce", -"Typ pou-B¾ité tabulky nepodporuje AUTO_INCREMENT sloupce", -"INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES", -"Nespr-Bávné jméno sloupce '%-.100s'", -"Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.64s'", -"V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì", -"Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.64s'", -"BLOB sloupec '%-.64s' je pou-B¾it ve specifikaci klíèe bez délky", -"V-B¹echny èásti primárního klíèe musejí být NOT NULL; pokud potøebujete NULL, pou¾ijte UNIQUE", -"V-Býsledek obsahuje více ne¾ jeden øádek", -"Tento typ tabulky vy-B¾aduje primární klíè", -"Tato verze MySQL nen-Bí zkompilována s podporou RAID", -"Update tabulky bez WHERE s kl-Bíèem není v módu bezpeèných update dovoleno", -"Kl-Bíè '%-.64s' v tabulce '%-.64s' neexistuje", -"Nemohu otev-Bøít tabulku", -"Handler tabulky nepodporuje %s", -"Proveden-Bí tohoto pøíkazu není v transakci dovoleno", -"Chyba %d p-Bøi COMMIT", -"Chyba %d p-Bøi ROLLBACK", -"Chyba %d p-Bøi FLUSH_LOGS", -"Chyba %d p-Bøi CHECKPOINT", -"Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: `%-.64s' (%-.64s) bylo pøeru¹eno", -"Handler tabulky nepodporuje bin-Bární dump", -"Binlog uzav-Bøen pøi pokusu o FLUSH MASTER", -"P-Bøebudování indexu dumpnuté tabulky '%-.64s' nebylo úspì¹né", -"Chyba masteru: '%-.64s'", -"S-Bí»ová chyba pøi ètení z masteru", -"S-Bí»ová chyba pøi zápisu na master", -"-B®ádný sloupec nemá vytvoøen fulltextový index", -"Nemohu prov-Bést zadaný pøíkaz, proto¾e existují aktivní zamèené tabulky nebo aktivní transakce", -"Nezn-Bámá systémová promìnná '%-.64s'", -"Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena", -"Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updatable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt deleted file mode 100644 index b708fe88e8a..00000000000 --- a/sql/share/danish/errmsg.txt +++ /dev/null @@ -1,324 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Knud Riishøjgård knudriis@post.tele.dk 99 && - Carsten H. Pedersen, carsten.pedersen@bitbybit.dk oct. 1999 / aug. 2001. */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEJ", -"JA", -"Kan ikke oprette filen '%-.64s' (Fejlkode: %d)", -"Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)", -"Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)", -"Kan ikke oprette databasen '%-.64s'; databasen eksisterer", -"Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke", -"Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)", -"Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)", -"Fejl ved sletning af '%-.64s' (Fejlkode: %d)", -"Kan ikke læse posten i systemfolderen", -"Kan ikke læse status af '%-.64s' (Fejlkode: %d)", -"Kan ikke læse aktive folder (Fejlkode: %d)", -"Kan ikke låse fil (Fejlkode: %d)", -"Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)", -"Kan ikke finde fila: '%-.64s' (Fejlkode: %d)", -"Kan ikke læse folder '%-.64s' (Fejlkode: %d)", -"Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)", -"Posten er ændret siden sidste læsning '%-.64s'", -"Ikke mere diskplads (%s). Venter på at få frigjort plads...", -"Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'", -"Fejl ved lukning af '%-.64s' (Fejlkode: %d)", -"Fejl ved læsning af '%-.64s' (Fejlkode: %d)", -"Fejl ved omdøbning af '%-.64s' til '%-.64s' (Fejlkode: %d)", -"Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)", -"'%-.64s' er låst mod opdateringer", -"Sortering afbrudt", -"View '%-.64s' eksisterer ikke for '%-.64s'", -"Modtog fejl %d fra tabel håndteringen", -"Denne mulighed eksisterer ikke for tabeltypen '%-.64s'", -"Kan ikke finde posten i '%-.64s'", -"Forkert indhold i: '%-.64s'", -"Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den", -"Gammel indeksfil for tabellen '%-.64s'; reparer den", -"'%-.64s' er skrivebeskyttet", -"Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)", -"Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren", -"Uventet afslutning på fil (eof) ved læsning af filen '%-.64s' (Fejlkode: %d)", -"For mange forbindelser (connections)", -"Udgået for tråde/hukommelse", -"Kan ikke få værtsnavn for din adresse", -"Forkert håndtryk (handshake)", -"Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'", -"Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)", -"Ingen database valgt", -"Ukendt kommando", -"Kolonne '%-.64s' kan ikke være NULL", -"Ukendt database '%-.64s'", -"Tabellen '%-.64s' findes allerede", -"Ukendt tabel '%-.64s'", -"Felt: '%-.64s' i tabel %s er ikke entydigt", -"Database nedlukning er i gang", -"Ukendt kolonne '%-.64s' i tabel %s", -"Brugte '%-.64s' som ikke var i group by", -"Kan ikke gruppere på '%-.64s'", -"Udtrykket har summer (sum) funktioner og kolonner i samme udtryk", -"Kolonne tæller stemmer ikke med antallet af værdier", -"Navnet '%-.64s' er for langt", -"Feltnavnet '%-.64s' findes allerede", -"Indeksnavnet '%-.64s' findes allerede", -"Ens værdier '%-.64s' for indeks %d", -"Forkert kolonnespecifikaton for felt '%-.64s'", -"%s nær '%-.64s' på linje %d", -"Forespørgsel var tom", -"Tabellen/aliaset: '%-.64s' er ikke unikt", -"Ugyldig standardværdi for '%-.64s'", -"Flere primærnøgler specificeret", -"For mange nøgler specificeret. Kun %d nøgler må bruges", -"For mange nøgledele specificeret. Kun %d dele må bruges", -"Specificeret nøgle var for lang. Maksimal nøglelængde er %d", -"Nøglefeltet '%-.64s' eksisterer ikke i tabellen", -"BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks", -"For stor feltlængde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet", -"Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret", -"%s: klar til tilslutninger", -"%s: Normal nedlukning\n", -"%s: Fangede signal %d. Afslutter!!\n", -"%s: Server lukket\n", -"%s: Forceret nedlukning af tråd: %ld bruger: '%-.64s'\n", -"Kan ikke oprette IP socket", -"Tabellen '%-.64s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen", -"Felt adskiller er ikke som forventet, se dokumentationen", -"Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'.", -"Filen '%-.64s' skal være i database-folderen og kunne læses af alle", -"Filen '%-.64s' eksisterer allerede", -"Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld", -"Poster: %ld Ens: %ld", -"Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden", -"Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet.", -"Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer.", -"Poster: %ld Ens: %ld Advarsler: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ukendt tråd id: %lu", -"Du er ikke ejer af tråden %lu", -"Ingen tabeller i brug", -"For mange tekststrenge til specifikationen af SET i kolonne %-.64s", -"Kan ikke lave unikt log-filnavn %s.(1-999)\n", -"Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres", -"Tabellen '%-.64s' var ikke låst med LOCK TABLES", -"BLOB feltet '%-.64s' kan ikke have en standard værdi", -"Ugyldigt database navn '%-.64s'", -"Ugyldigt tabel navn '%-.64s'", -"SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt", -"Ukendt fejl", -"Ukendt procedure %s", -"Forkert antal parametre til proceduren %s", -"Forkert(e) parametre til proceduren %s", -"Ukendt tabel '%-.64s' i %s", -"Feltet '%-.64s' er anvendt to gange", -"Forkert brug af grupperings-funktion", -"Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version", -"En tabel skal have mindst een kolonne", -"Tabellen '%-.64s' er fuld", -"Ukendt tegnsæt: '%-.64s'", -"For mange tabeller. MySQL kan kun bruge %d tabeller i et join", -"For mange felter", -"For store poster. Max post størrelse, uden BLOB's, er %d. Du må lave nogle felter til BLOB's", -"Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt", -"Krydsreferencer fundet i OUTER JOIN; check dine ON conditions", -"Kolonne '%-.32s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL", -"Kan ikke læse funktionen '%-.64s'", -"Kan ikke starte funktionen '%-.64s'; %-.80s", -"Angivelse af sti ikke tilladt for delt bibliotek", -"Funktionen '%-.64s' findes allerede", -"Kan ikke åbne delt bibliotek '%-.64s' (errno: %d %s)", -"Kan ikke finde funktionen '%-.64s' i bibliotek'", -"Funktionen '%-.64s' er ikke defineret", -"Værten er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'", -"Værten '%-.64s' kan ikke tilkoble denne MySQL-server", -"Du bruger MySQL som anonym bruger. Anonyme brugere må ikke ændre adgangskoder", -"Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ændre andres adgangskoder", -"Kan ikke finde nogen tilsvarende poster i bruger tabellen", -"Poster fundet: %ld Ændret: %ld Advarsler: %ld", -"Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl", -"Kolonne antallet stemmer ikke overens med antallet af værdier i post %ld", -"Kan ikke genåbne tabel '%-.64s", -"Forkert brug af nulværdi (NULL)", -"Fik fejl '%-.64s' fra regexp", -"Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat", -"Denne tilladelse findes ikke for brugeren '%-.32s' på vært '%-.64s'", -"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'", -"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'", -"Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres.", -"Værts- eller brugernavn for langt til GRANT", -"Tabellen '%-.64s.%-.64s' eksisterer ikke", -"Denne tilladelse eksisterer ikke for brugeren '%-.32s' på vært '%-.64s' for tabellen '%-.64s'", -"Den brugte kommando er ikke tilladt med denne udgave af MySQL", -"Der er en fejl i SQL syntaksen", -"Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.64s", -"For mange slettede tråde (threads) i brug", -"Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)", -"Modtog en datapakke som var større end 'max_allowed_packet'", -"Fik læsefejl fra forbindelse (connection pipe)", -"Fik fejlmeddelelse fra fcntl()", -"Modtog ikke datapakker i korrekt rækkefølge", -"Kunne ikke dekomprimere kommunikations-pakke (communication packet)", -"Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)", -"Timeout-fejl ved læsning af kommunukations-pakker (communication packets)", -"Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)", -"Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)", -"Strengen med resultater er større end 'max_allowed_packet'", -"Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner", -"Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner", -"INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES", -"Forkert kolonnenavn '%-.100s'", -"Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'", -"Tabellerne i MERGE er ikke defineret ens", -"Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler", -"BLOB kolonnen '%-.64s' brugt i nøglespecifikation uden nøglelængde", -"Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet", -"Resultatet bestod af mere end een række", -"Denne tabeltype kræver en primærnøgle", -"Denne udgave af MySQL er ikke oversat med understøttelse af RAID", -"Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt", -"Nøglen '%-.64s' eksisterer ikke i tabellen '%-.64s'", -"Kan ikke åbne tabellen", -"Denne tabeltype understøtter ikke %s", -"Du må ikke bruge denne kommando i en transaktion", -"Modtog fejl %d mens kommandoen COMMIT blev udført", -"Modtog fejl %d mens kommandoen ROLLBACK blev udført", -"Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført", -"Modtog fejl %d mens kommandoen CHECKPOINT blev udført", -"Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: `%-.64s' (%-.64s)", -"Denne tabeltype unserstøtter ikke binært tabeldump", -"Binlog blev lukket mens kommandoen FLUSH MASTER blev udført", -"Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'", -"Fejl fra master: '%-.64s'", -"Netværksfejl ved læsning fra master", -"Netværksfejl ved skrivning til master", -"Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen", -"Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion", -"Ukendt systemvariabel '%-.64s'", -"Tabellen '%-.64s' er markeret med fejl og bør repareres", -"Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede", -"Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles", -"Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen", -"Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE", -"Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE", -"Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Kunne ikke danne en slave-tråd; check systemressourcerne", -"Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser", -"Du må kun bruge konstantudtryk med SET", -"Lock wait timeout overskredet", -"Det totale antal låse overstiger størrelsen på låse-tabellen", -"Update lås kan ikke opnås under en READ UNCOMMITTED transaktion", -"DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock", -"CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Modtog fejl %d '%-.100s' fra %s", -"Modtog temporary fejl %d '%-.100s' fra %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt deleted file mode 100644 index 66a80e5ddda..00000000000 --- a/sql/share/dutch/errmsg.txt +++ /dev/null @@ -1,333 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Dutch error messages (share/dutch/errmsg.txt) - 2001-08-02 - Arjen Lentz (agl@bitbike.com) - Completed earlier partial translation; worked on consistency and spelling. - 2002-01-29 - Arjen Lentz (arjen@mysql.com) - 2002-04-11 - Arjen Lentz (arjen@mysql.com) - 2002-06-13 - Arjen Lentz (arjen@mysql.com) - 2002-08-08 - Arjen Lentz (arjen@mysql.com) - 2002-08-22 - Arjen Lentz (arjen@mysql.com) - Translated new error messages. -*/ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEE", -"JA", -"Kan file '%-.64s' niet aanmaken (Errcode: %d)", -"Kan tabel '%-.64s' niet aanmaken (Errcode: %d)", -"Kan database '%-.64s' niet aanmaken (Errcode: %d)", -"Kan database '%-.64s' niet aanmaken; database bestaat reeds", -"Kan database '%-.64s' niet verwijderen; database bestaat niet", -"Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)", -"Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)", -"Fout bij het verwijderen van '%-.64s' (Errcode: %d)", -"Kan record niet lezen in de systeem tabel", -"Kan de status niet krijgen van '%-.64s' (Errcode: %d)", -"Kan de werkdirectory niet krijgen (Errcode: %d)", -"Kan de file niet blokeren (Errcode: %d)", -"Kan de file '%-.64s' niet openen (Errcode: %d)", -"Kan de file: '%-.64s' niet vinden (Errcode: %d)", -"Kan de directory niet lezen van '%-.64s' (Errcode: %d)", -"Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)", -"Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'", -"Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt...", -"Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'", -"Fout bij het sluiten van '%-.64s' (Errcode: %d)", -"Fout bij het lezen van file '%-.64s' (Errcode: %d)", -"Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)", -"Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)", -"'%-.64s' is geblokeerd tegen veranderingen", -"Sorteren afgebroken", -"View '%-.64s' bestaat niet voor '%-.64s'", -"Fout %d van tabel handler", -"Tabel handler voor '%-.64s' heeft deze optie niet", -"Kan record niet vinden in '%-.64s'", -"Verkeerde info in file: '%-.64s'", -"Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren", -"Oude zoeksleutel file voor tabel '%-.64s'; repareer het!", -"'%-.64s' is alleen leesbaar", -"Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)", -"Geen geheugen om te sorteren. Verhoog de server sort buffer size", -"Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)", -"Te veel verbindingen", -"Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen", -"Kan de hostname niet krijgen van uw adres", -"Verkeerde handshake", -"Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'", -"Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)", -"Geen database geselecteerd", -"Onbekend commando", -"Kolom '%-.64s' kan niet null zijn", -"Onbekende database '%-.64s'", -"Tabel '%-.64s' bestaat al", -"Onbekende tabel '%-.64s'", -"Kolom: '%-.64s' in %s is niet eenduidig", -"Bezig met het stoppen van de server", -"Onbekende kolom '%-.64s' in %s", -"Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt", -"Kan '%-.64s' niet groeperen", -"Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht", -"Het aantal kolommen komt niet overeen met het aantal opgegeven waardes", -"Naam voor herkenning '%-.64s' is te lang", -"Dubbele kolom naam '%-.64s'", -"Dubbele zoeksleutel naam '%-.64s'", -"Dubbele ingang '%-.64s' voor zoeksleutel %d", -"Verkeerde kolom specificatie voor kolom '%-.64s'", -"%s bij '%-.64s' in regel %d", -"Query was leeg", -"Niet unieke waarde tabel/alias: '%-.64s'", -"Foutieve standaard waarde voor '%-.64s'", -"Meerdere primaire zoeksleutels gedefinieerd", -"Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan", -"Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan", -"Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d", -"Zoeksleutel kolom '%-.64s' bestaat niet in tabel", -"BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie", -"Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB", -"Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd.", -"%s: klaar voor verbindingen", -"%s: Normaal afgesloten \n", -"%s: Signaal %d. Systeem breekt af!\n", -"%s: Afsluiten afgerond\n", -"%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n", -"Kan IP-socket niet openen", -"Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw", -"De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding", -"Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'.", -"Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn.", -"Het bestand '%-.64s' bestaat reeds", -"Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld", -"Records: %ld Dubbel: %ld", -"Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel", -"Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!", -"Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat.", -"Records: %ld Dubbel: %ld Waarschuwing: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Onbekend thread id: %lu", -"U bent geen bezitter van thread %lu", -"Geen tabellen gebruikt.", -"Teveel strings voor kolom %s en SET", -"Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n", -"Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen.", -"Tabel '%-.64s' was niet gelocked met LOCK TABLES", -"Blob veld '%-.64s' can geen standaardwaarde bevatten", -"Databasenaam '%-.64s' is niet getoegestaan", -"Niet toegestane tabelnaam '%-.64s'", -"Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is.", -"Onbekende Fout", -"Onbekende procedure %s", -"Foutief aantal parameters doorgegeven aan procedure %s", -"Foutieve parameters voor procedure %s", -"Onbekende tabel '%-.64s' in %s", -"Veld '%-.64s' is dubbel gespecificeerd", -"Ongeldig gebruik van GROUP-functie", -"Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt.", -"Een tabel moet minstens 1 kolom bevatten", -"De tabel '%-.64s' is vol", -"Onbekende character set: '%-.64s'", -"Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten", -"Te veel velden", -"Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen.", -"Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk).", -"Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions", -"Kolom '%-.64s' wordt gebruikt met UNIQUE of INDEX maar is niet gedefinieerd als NOT NULL", -"Kan functie '%-.64s' niet laden", -"Kan functie '%-.64s' niet initialiseren; %-.80s", -"Geen pad toegestaan voor shared library", -"Functie '%-.64s' bestaat reeds", -"Kan shared library '%-.64s' niet openen (Errcode: %d %s)", -"Kan functie '%-.64s' niet in library vinden", -"Functie '%-.64s' is niet gedefinieerd", -"Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'", -"Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server", -"U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen", -"U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen", -"Kan geen enkele passende rij vinden in de gebruikers tabel", -"Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld", -"Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout", -"Kolom aantal komt niet overeen met waarde aantal in rij %ld", -"Kan tabel niet opnieuw openen: '%-.64s", -"Foutief gebruik van de NULL waarde", -"Fout '%-.64s' ontvangen van regexp", -"Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is", -"Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'", -"%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'", -"%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'", -"Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden.", -"De host of gebruiker parameter voor GRANT is te lang", -"Tabel '%-.64s.%s' bestaat niet", -"Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'", -"Het used commando is niet toegestaan in deze MySQL versie", -"Er is iets fout in de gebruikte syntax", -"'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s", -"Te veel 'delayed' threads in gebruik", -"Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)", -"Groter pakket ontvangen dan 'max_allowed_packet'", -"Kreeg leesfout van de verbindings pipe", -"Kreeg fout van fcntl()", -"Pakketten in verkeerde volgorde ontvangen", -"Communicatiepakket kon niet worden gedecomprimeerd", -"Fout bij het lezen van communicatiepakketten", -"Timeout bij het lezen van communicatiepakketten", -"Fout bij het schrijven van communicatiepakketten", -"Timeout bij het schrijven van communicatiepakketten", -"Resultaat string is langer dan 'max_allowed_packet'", -"Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen", -"Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen", -"INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES", -"Incorrecte kolom naam '%-.100s'", -"De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren", -"Niet alle tabellen in de MERGE tabel hebben identieke gedefinities", -"Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking", -"BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte", -"Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken", -"Resultaat bevatte meer dan een rij", -"Dit tabel type heeft een primaire zoeksleutel nodig", -"Deze versie van MySQL is niet gecompileerd met RAID ondersteuning", -"U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom", -"Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'", -"Kan tabel niet openen", -"De 'handler' voor de tabel ondersteund geen %s", -"Het is u niet toegestaan dit commando uit te voeren binnen een transactie", -"Kreeg fout %d tijdens COMMIT", -"Kreeg fout %d tijdens ROLLBACK", -"Kreeg fout %d tijdens FLUSH_LOGS", -"Kreeg fout %d tijdens CHECKPOINT", -"Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: `%-.64s' (%-.64s)", -"De 'handler' voor de tabel ondersteund geen binaire tabel dump", -"Binlog gesloten tijdens FLUSH MASTER poging", -"Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'", -"Fout van master: '%-.64s'", -"Net fout tijdens lezen van master", -"Net fout tijdens schrijven naar master", -"Kan geen FULLTEXT index vinden passend bij de kolom lijst", -"Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie", -"Onbekende systeem variabele '%-.64s'", -"Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd", -"Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte", -"Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen", -"Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw", -"Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE", -"Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE", -"De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Kon slave thread niet aanmaken, controleer systeem resources", -"Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen", -"U mag alleen constante expressies gebruiken bij SET", -"Lock wacht tijd overschreden", -"Het totale aantal locks overschrijdt de lock tabel grootte", -"Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie", -"DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit", -"CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit", -"Foutieve parameters voor %s", -"'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren", -"Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren", -"Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie", -"Het gebruikte tabel type ondersteund geen FULLTEXT indexen", -"Kan foreign key beperking niet toevoegen", -"Kan onderliggende rij niet toevoegen: foreign key beperking gefaald", -"Kan bovenliggende rij nite verwijderen: foreign key beperking gefaald", -"Fout bij opbouwen verbinding naar master: %-.128s", -"Fout bij uitvoeren query op master: %-.128s", -"Fout tijdens uitvoeren van commando %s: %-.128s", -"Foutief gebruik van %s en %s", -"De gebruikte SELECT commando's hebben een verschillend aantal kolommen", -"Kan de query niet uitvoeren vanwege een conflicterende read lock", -"Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld.", -"Optie '%s' tweemaal gebruikt in opdracht", -"Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)", -"Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie", -"Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL", -"Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL", -"Variabele '%-.64s' heeft geen standaard waarde", -"Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'", -"Foutief argumenttype voor variabele '%-.64s'", -"Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen", -"Foutieve toepassing/plaatsing van '%s'", -"Deze versie van MySQL ondersteunt nog geen '%s'", -"Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt deleted file mode 100644 index 62c8f4f9991..00000000000 --- a/sql/share/english/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"YES", -"Can't create file '%-.64s' (errno: %d)", -"Can't create table '%-.64s' (errno: %d)", -"Can't create database '%-.64s' (errno: %d)", -"Can't create database '%-.64s'; database exists", -"Can't drop database '%-.64s'; database doesn't exist", -"Error dropping database (can't delete '%-.64s', errno: %d)", -"Error dropping database (can't rmdir '%-.64s', errno: %d)", -"Error on delete of '%-.64s' (errno: %d)", -"Can't read record in system table", -"Can't get status of '%-.64s' (errno: %d)", -"Can't get working directory (errno: %d)", -"Can't lock file (errno: %d)", -"Can't open file: '%-.64s' (errno: %d)", -"Can't find file: '%-.64s' (errno: %d)", -"Can't read dir of '%-.64s' (errno: %d)", -"Can't change dir to '%-.64s' (errno: %d)", -"Record has changed since last read in table '%-.64s'", -"Disk full (%s); waiting for someone to free some space...", -"Can't write; duplicate key in table '%-.64s'", -"Error on close of '%-.64s' (errno: %d)", -"Error reading file '%-.64s' (errno: %d)", -"Error on rename of '%-.64s' to '%-.64s' (errno: %d)", -"Error writing file '%-.64s' (errno: %d)", -"'%-.64s' is locked against change", -"Sort aborted", -"View '%-.64s' doesn't exist for '%-.64s'", -"Got error %d from storage engine", -"Table storage engine for '%-.64s' doesn't have this option", -"Can't find record in '%-.64s'", -"Incorrect information in file: '%-.64s'", -"Incorrect key file for table '%-.64s'; try to repair it", -"Old key file for table '%-.64s'; repair it!", -"Table '%-.64s' is read only", -"Out of memory; restart server and try again (needed %d bytes)", -"Out of sort memory; increase server sort buffer size", -"Unexpected EOF found when reading file '%-.64s' (errno: %d)", -"Too many connections", -"Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space", -"Can't get hostname for your address", -"Bad handshake", -"Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'", -"Access denied for user '%-.32s'@'%-.64s' (using password: %s)", -"No database selected", -"Unknown command", -"Column '%-.64s' cannot be null", -"Unknown database '%-.64s'", -"Table '%-.64s' already exists", -"Unknown table '%-.100s'", -"Column '%-.64s' in %-.64s is ambiguous", -"Server shutdown in progress", -"Unknown column '%-.64s' in '%-.64s'", -"'%-.64s' isn't in GROUP BY", -"Can't group on '%-.64s'", -"Statement has sum functions and columns in same statement", -"Column count doesn't match value count", -"Identifier name '%-.100s' is too long", -"Duplicate column name '%-.64s'", -"Duplicate key name '%-.64s'", -"Duplicate entry '%-.64s' for key %d", -"Incorrect column specifier for column '%-.64s'", -"%s near '%-.80s' at line %d", -"Query was empty", -"Not unique table/alias: '%-.64s'", -"Invalid default value for '%-.64s'", -"Multiple primary key defined", -"Too many keys specified; max %d keys allowed", -"Too many key parts specified; max %d parts allowed", -"Specified key was too long; max key length is %d bytes", -"Key column '%-.64s' doesn't exist in table", -"BLOB column '%-.64s' can't be used in key specification with the used table type", -"Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead", -"Incorrect table definition; there can be only one auto column and it must be defined as a key", -"%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d", -"%s: Normal shutdown\n", -"%s: Got signal %d. Aborting!\n", -"%s: Shutdown complete\n", -"%s: Forcing close of thread %ld user: '%-.32s'\n", -"Can't create IP socket", -"Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table", -"Field separator argument is not what is expected; check the manual", -"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'", -"The file '%-.64s' must be in the database directory or be readable by all", -"File '%-.80s' already exists", -"Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld", -"Records: %ld Duplicates: %ld", -"Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys", -"You can't delete all columns with ALTER TABLE; use DROP TABLE instead", -"Can't DROP '%-.64s'; check that column/key exists", -"Records: %ld Duplicates: %ld Warnings: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Unknown thread id: %lu", -"You are not owner of thread %lu", -"No tables used", -"Too many strings for column %-.64s and SET", -"Can't generate a unique log-filename %-.64s.(1-999)\n", -"Table '%-.64s' was locked with a READ lock and can't be updated", -"Table '%-.64s' was not locked with LOCK TABLES", -"BLOB/TEXT column '%-.64s' can't have a default value", -"Incorrect database name '%-.100s'", -"Incorrect table name '%-.100s'", -"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", -"Unknown error", -"Unknown procedure '%-.64s'", -"Incorrect parameter count to procedure '%-.64s'", -"Incorrect parameters to procedure '%-.64s'", -"Unknown table '%-.64s' in %-.32s", -"Column '%-.64s' specified twice", -"Invalid use of group function", -"Table '%-.64s' uses an extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %-.64s)", -"Can't find function '%-.64s' in library", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s'", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%-.64s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used storage engine can't index column '%-.64s'", -"Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB/TEXT column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The storage engine for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The storage engine for the table does not support binary table dump", -"Binlog closed, cannot RESET MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add or update a child row: a foreign key constraint fails", -"Cannot delete or update a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu; new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated; use '%s' instead", -"The target table %-.100s of the %s is not updatable", -"The '%s' feature is disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated incorrect %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt new file mode 100644 index 00000000000..1230287656e --- /dev/null +++ b/sql/share/errmsg.txt @@ -0,0 +1,5636 @@ +languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, japanese-sjis=jps sjis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u; + +default-language eng + +start-error-number 1000 + +ER_HASHCHK + eng "hashchk" +ER_NISAMCHK + eng "isamchk" +ER_NO + cze "NE" + dan "NEJ" + nla "NEE" + eng "NO" + est "EI" + fre "NON" + ger "Nein" + greek "Ï×É" + hun "NEM" + kor "¾Æ´Ï¿À" + nor "NEI" + norwegian-ny "NEI" + pol "NIE" + por "NÃO" + rum "NU" + rus "îåô" + serbian "NE" + slo "NIE" + ukr "î¶" +ER_YES + cze "ANO" + dan "JA" + nla "JA" + eng "YES" + est "JAH" + fre "OUI" + ger "Ja" + greek "ÍÁÉ" + hun "IGEN" + ita "SI" + kor "¿¹" + nor "JA" + norwegian-ny "JA" + pol "TAK" + por "SIM" + rum "DA" + rus "äá" + serbian "DA" + slo "Áno" + spa "SI" + ukr "ôáë" +ER_CANT_CREATE_FILE + cze "Nemohu vytvo-Bøit soubor '%-.200s' (chybový kód: %d)" + dan "Kan ikke oprette filen '%-.200s' (Fejlkode: %d)" + nla "Kan file '%-.200s' niet aanmaken (Errcode: %d)" + eng "Can't create file '%-.200s' (errno: %d)" + est "Ei suuda luua faili '%-.200s' (veakood: %d)" + fre "Ne peut créer le fichier '%-.200s' (Errcode: %d)" + ger "Kann Datei '%-.200s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.200s' file nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il file '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏ '%-.200s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette fila '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette fila '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ pliku '%-.200s' (Kod b³êdu: %d)" + por "Não pode criar o arquivo '%-.200s' (erro no. %d)" + rum "Nu pot sa creez fisierul '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram file '%-.200s' (errno: %d)" + slo "Nemô¾em vytvori» súbor '%-.200s' (chybový kód: %d)" + spa "No puedo crear archivo '%-.200s' (Error: %d)" + swe "Kan inte skapa filen '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.200s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_CREATE_TABLE + cze "Nemohu vytvo-Bøit tabulku '%-.200s' (chybový kód: %d)" + dan "Kan ikke oprette tabellen '%-.200s' (Fejlkode: %d)" + nla "Kan tabel '%-.200s' niet aanmaken (Errcode: %d)" + eng "Can't create table '%-.200s' (errno: %d)" + jps "'%-.200s' ƒe[ƒuƒ‹‚ªì‚ê‚Ü‚¹‚ñ.(errno: %d)", + est "Ei suuda luua tabelit '%-.200s' (veakood: %d)" + fre "Ne peut créer la table '%-.200s' (Errcode: %d)" + ger "Kann Tabelle '%-.200s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.200s' tabla nem hozhato letre (hibakod: %d)" + ita "Impossibile creare la tabella '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)" + kor "Å×À̺í '%-.200s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette tabellen '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette tabellen '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ tabeli '%-.200s' (Kod b³êdu: %d)" + por "Não pode criar a tabela '%-.200s' (erro no. %d)" + rum "Nu pot sa creez tabla '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram tabelu '%-.200s' (errno: %d)" + slo "Nemô¾em vytvori» tabuµku '%-.200s' (chybový kód: %d)" + spa "No puedo crear tabla '%-.200s' (Error: %d)" + swe "Kan inte skapa tabellen '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.200s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_CREATE_DB + cze "Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)" + dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)" + nla "Kan database '%-.64s' niet aanmaken (Errcode: %d)" + eng "Can't create database '%-.64s' (errno: %d)" + jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)", + est "Ei suuda luua andmebaasi '%-.64s' (veakood: %d)" + fre "Ne peut créer la base '%-.64s' (Erreur %d)" + ger "Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il database '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" + kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette databasen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)" + por "Não pode criar o banco de dados '%-.64s' (erro no. %d)" + rum "Nu pot sa creez baza de date '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram bazu '%-.64s' (errno: %d)" + slo "Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)" + spa "No puedo crear base de datos '%-.64s' (Error: %d)" + swe "Kan inte skapa databasen '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_DB_CREATE_EXISTS + cze "Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje" + dan "Kan ikke oprette databasen '%-.64s'; databasen eksisterer" + nla "Kan database '%-.64s' niet aanmaken; database bestaat reeds" + eng "Can't create database '%-.64s'; database exists" + jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ.Šù‚É‚»‚̃f[ƒ^ƒx[ƒX‚ª‘¶Ý‚µ‚Ü‚·", + est "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib" + fre "Ne peut créer la base '%-.64s'; elle existe déjà" + ger "Kann Datenbank '%-.64s' nicht erzeugen. Datenbank existiert bereits" + greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç" + hun "Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik" + ita "Impossibile creare il database '%-.64s'; il database esiste" + jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹" + kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ" + nor "Kan ikke opprette databasen '%-.64s'; databasen eksisterer" + norwegian-ny "Kan ikkje opprette databasen '%-.64s'; databasen eksisterer" + pol "Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje" + por "Não pode criar o banco de dados '%-.64s'; este banco de dados já existe" + rum "Nu pot sa creez baza de date '%-.64s'; baza de date exista deja" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji." + slo "Nemô¾em vytvori» databázu '%-.64s'; databáza existuje" + spa "No puedo crear base de datos '%-.64s'; la base de datos ya existe" + swe "Databasen '%-.64s' existerar redan" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤" +ER_DB_DROP_EXISTS + cze "Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje" + dan "Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke" + nla "Kan database '%-.64s' niet verwijderen; database bestaat niet" + eng "Can't drop database '%-.64s'; database doesn't exist" + jps "'%-.64s' ƒf[ƒ^ƒx[ƒX‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ. ‚»‚̃f[ƒ^ƒx[ƒX‚ª‚È‚¢‚̂ł·.", + est "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri" + fre "Ne peut effacer la base '%-.64s'; elle n'existe pas" + ger "Kann Datenbank '%-.64s' nicht löschen; Datenbank nicht vorhanden" + greek "Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé" + hun "A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" + ita "Impossibile cancellare '%-.64s'; il database non esiste" + jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹." + kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ " + nor "Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke" + norwegian-ny "Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje" + pol "Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje" + por "Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe" + rum "Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ" + serbian "Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji." + slo "Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje" + spa "No puedo eliminar base de datos '%-.64s'; la base de datos no existe" + swe "Kan inte radera databasen '%-.64s'; databasen finns inte" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤" +ER_DB_DROP_DELETE + cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)" + dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)" + eng "Error dropping database (can't delete '%-.64s', errno: %d)" + jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð휂ł«‚Ü‚¹‚ñ, errno: %d)", + est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)" + fre "Ne peut effacer la base '%-.64s' (erreur %d)" + ger "Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)" + hun "Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤òºï½ü¤Ç¤¤Þ¤»¤ó, errno: %d)" + kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.64s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" + nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)" + norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)" + pol "B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)" + por "Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)" + slo "Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)" + spa "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)" +ER_DB_DROP_RMDIR + cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)" + dan "Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)" + eng "Error dropping database (can't rmdir '%-.64s', errno: %d)" + jps "ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð rmdir ‚Å‚«‚Ü‚¹‚ñ, errno: %d)", + est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)" + fre "Erreur en effaçant la base (rmdir '%-.64s', erreur %d)" + ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)" + hun "Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤ò rmdir ¤Ç¤¤Þ¤»¤ó, errno: %d)" + kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.64s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" + nor "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)" + norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)" + pol "B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)" + por "Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.64s', ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)" + slo "Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)" + spa "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.64s', ÐÏÍÉÌËÁ: %d)" +ER_CANT_DELETE_FILE + cze "Chyba p-Bøi výmazu '%-.64s' (chybový kód: %d)" + dan "Fejl ved sletning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het verwijderen van '%-.64s' (Errcode: %d)" + eng "Error on delete of '%-.64s' (errno: %d)" + jps "'%-.64s' ‚Ì휂ªƒGƒ‰[ (errno: %d)", + est "Viga '%-.64s' kustutamisel (veakood: %d)" + fre "Erreur en effaçant '%-.64s' (Errcode: %d)" + ger "Fehler beim Löschen von '%-.64s' (Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Torlesi hiba: '%-.64s' (hibakod: %d)" + ita "Errore durante la cancellazione di '%-.64s' (errno: %d)" + jpn "'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)" + kor "'%-.64s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved sletting av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved sletting av '%-.64s' (Feilkode: %d)" + pol "B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)" + por "Erro na remoção de '%-.64s' (erro no. %d)" + rum "Eroare incercind sa delete '%-.64s' (Eroare: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri brisanju '%-.64s' (errno: %d)" + slo "Chyba pri mazaní '%-.64s' (chybový kód: %d)" + spa "Error en el borrado de '%-.64s' (Error: %d)" + swe "Kan inte radera filen '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_FIND_SYSTEM_REC + cze "Nemohu -Bèíst záznam v systémové tabulce" + dan "Kan ikke læse posten i systemfolderen" + nla "Kan record niet lezen in de systeem tabel" + eng "Can't read record in system table" + jps "system table ‚̃ŒƒR[ƒh‚ð“ǂގ–‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½", + est "Ei suuda lugeda kirjet süsteemsest tabelist" + fre "Ne peut lire un enregistrement de la table 'system'" + ger "Datensatz in der Systemtabelle nicht lesbar" + greek "Áäýíáôç ç áíÜãíùóç åããñáöÞò áðü ðßíáêá ôïõ óõóôÞìáôïò" + hun "Nem olvashato rekord a rendszertablaban" + ita "Impossibile leggere il record dalla tabella di sistema" + jpn "system table ¤Î¥ì¥³¡¼¥É¤òÆÉ¤à»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿" + kor "system Å×ÀÌºí¿¡¼ ·¹Äڵ带 ÀÐÀ» ¼ö ¾ø½À´Ï´Ù." + nor "Kan ikke lese posten i systemkatalogen" + norwegian-ny "Kan ikkje lese posten i systemkatalogen" + pol "Nie mo¿na odczytaæ rekordu z tabeli systemowej" + por "Não pode ler um registro numa tabela do sistema" + rum "Nu pot sa citesc cimpurile in tabla de system (system table)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ÚÁÐÉÓØ × ÓÉÓÔÅÍÎÏÊ ÔÁÂÌÉÃÅ" + serbian "Ne mogu da proèitam slog iz sistemske tabele" + slo "Nemô¾em èíta» záznam v systémovej tabuµke" + spa "No puedo leer el registro en la tabla del sistema" + swe "Hittar inte posten i systemregistret" + ukr "îÅ ÍÏÖÕ ÚÞÉÔÁÔÉ ÚÁÐÉÓ Ú ÓÉÓÔÅÍÎϧ ÔÁÂÌÉæ" +ER_CANT_GET_STAT + cze "Nemohu z-Bískat stav '%-.200s' (chybový kód: %d)" + dan "Kan ikke læse status af '%-.200s' (Fejlkode: %d)" + nla "Kan de status niet krijgen van '%-.200s' (Errcode: %d)" + eng "Can't get status of '%-.200s' (errno: %d)" + jps "'%-.200s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)", + est "Ei suuda lugeda '%-.200s' olekut (veakood: %d)" + fre "Ne peut obtenir le status de '%-.200s' (Errcode: %d)" + ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %d)" + greek "Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %d)" + ita "Impossibile leggere lo stato di '%-.200s' (errno: %d)" + jpn "'%-.200s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)" + kor "'%-.200s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese statusen til '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese statusen til '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na otrzymaæ statusu '%-.200s' (Kod b³êdu: %d)" + por "Não pode obter o status de '%-.200s' (erro no. %d)" + rum "Nu pot sa obtin statusul lui '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da dobijem stanje file-a '%-.200s' (errno: %d)" + slo "Nemô¾em zisti» stav '%-.200s' (chybový kód: %d)" + spa "No puedo obtener el estado de '%-.200s' (Error: %d)" + swe "Kan inte läsa filinformationen (stat) från '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.200s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_GET_WD + cze "Chyba p-Bøi zji¹»ování pracovní adresáø (chybový kód: %d)" + dan "Kan ikke læse aktive folder (Fejlkode: %d)" + nla "Kan de werkdirectory niet krijgen (Errcode: %d)" + eng "Can't get working directory (errno: %d)" + jps "working directory ‚𓾂鎖‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½ (errno: %d)", + est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)" + fre "Ne peut obtenir le répertoire de travail (Errcode: %d)" + ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)" + greek "Ï öÜêåëëïò åñãáóßáò äåí âñÝèçêå (êùäéêüò ëÜèïõò: %d)" + hun "A munkakonyvtar nem allapithato meg (hibakod: %d)" + ita "Impossibile leggere la directory di lavoro (errno: %d)" + jpn "working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿ (errno: %d)" + kor "¼öÇà µð·ºÅ丮¸¦ ãÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese aktiv katalog(Feilkode: %d)" + norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)" + pol "Nie mo¿na rozpoznaæ aktualnego katalogu (Kod b³êdu: %d)" + por "Não pode obter o diretório corrente (erro no. %d)" + rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÐÒÅÄÅÌÉÔØ ÒÁÂÏÞÉÊ ËÁÔÁÌÏÇ (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)" + slo "Nemô¾em zisti» pracovný adresár (chybový kód: %d)" + spa "No puedo acceder al directorio (Error: %d)" + swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ÒÏÂÏÞÕ ÔÅËÕ (ÐÏÍÉÌËÁ: %d)" +ER_CANT_LOCK + cze "Nemohu uzamknout soubor (chybov-Bý kód: %d)" + dan "Kan ikke låse fil (Fejlkode: %d)" + nla "Kan de file niet blokeren (Errcode: %d)" + eng "Can't lock file (errno: %d)" + jps "ƒtƒ@ƒCƒ‹‚ðƒƒbƒN‚Å‚«‚Ü‚¹‚ñ (errno: %d)", + est "Ei suuda lukustada faili (veakood: %d)" + fre "Ne peut verrouiller le fichier (Errcode: %d)" + ger "Datei kann nicht gesperrt werden (Fehler: %d)" + greek "Ôï áñ÷åßï äåí ìðïñåß íá êëåéäùèåß (êùäéêüò ëÜèïõò: %d)" + hun "A file nem zarolhato. (hibakod: %d)" + ita "Impossibile il locking il file (errno: %d)" + jpn "¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏÀ» Àá±×Áö(lock) ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke låse fila (Feilkode: %d)" + norwegian-ny "Kan ikkje låse fila (Feilkode: %d)" + pol "Nie mo¿na zablokowaæ pliku (Kod b³êdu: %d)" + por "Não pode travar o arquivo (erro no. %d)" + rum "Nu pot sa lock fisierul (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÓÔÁ×ÉÔØ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÆÁÊÌÅ (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da zakljuèam file (errno: %d)" + slo "Nemô¾em zamknú» súbor (chybový kód: %d)" + spa "No puedo bloquear archivo: (Error: %d)" + swe "Kan inte låsa filen. (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÁÂÌÏËÕ×ÁÔÉ ÆÁÊÌ (ÐÏÍÉÌËÁ: %d)" +ER_CANT_OPEN_FILE + cze "Nemohu otev-Bøít soubor '%-.200s' (chybový kód: %d)" + dan "Kan ikke åbne fil: '%-.200s' (Fejlkode: %d)" + nla "Kan de file '%-.200s' niet openen (Errcode: %d)" + eng "Can't open file: '%-.200s' (errno: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", + est "Ei suuda avada faili '%-.200s' (veakood: %d)" + fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %d)" + ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %d)" + greek "Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.200s' file nem nyithato meg (hibakod: %d)" + ita "Impossibile aprire il file: '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.200s' (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke åpne fila: '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje åpne fila: '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na otworzyæ pliku: '%-.200s' (Kod b³êdu: %d)" + por "Não pode abrir o arquivo '%-.200s' (erro no. %d)" + rum "Nu pot sa deschid fisierul: '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da otvorim file: '%-.200s' (errno: %d)" + slo "Nemô¾em otvori» súbor: '%-.200s' (chybový kód: %d)" + spa "No puedo abrir archivo: '%-.200s' (Error: %d)" + swe "Kan inte använda '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.200s' (ÐÏÍÉÌËÁ: %d)" +ER_FILE_NOT_FOUND + cze "Nemohu naj-Bít soubor '%-.200s' (chybový kód: %d)" + dan "Kan ikke finde fila: '%-.200s' (Fejlkode: %d)" + nla "Kan de file: '%-.200s' niet vinden (Errcode: %d)" + eng "Can't find file: '%-.200s' (errno: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", + est "Ei suuda leida faili '%-.200s' (veakood: %d)" + fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %d)" + ger "Kann Datei '%-.200s' nicht finden (Fehler: %d)" + greek "Äåí âñÝèçêå ôï áñ÷åßï: '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.200s' file nem talalhato (hibakod: %d)" + ita "Impossibile trovare il file: '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)" + kor "ÈÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.200s' (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke finne fila: '%-.200s' (Feilkode: %d)" + norwegian-ny "Kan ikkje finne fila: '%-.200s' (Feilkode: %d)" + pol "Nie mo¿na znale¥æ pliku: '%-.200s' (Kod b³êdu: %d)" + por "Não pode encontrar o arquivo '%-.200s' (erro no. %d)" + rum "Nu pot sa gasesc fisierul: '%-.200s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da pronaðem file: '%-.200s' (errno: %d)" + slo "Nemô¾em nájs» súbor: '%-.200s' (chybový kód: %d)" + spa "No puedo encontrar archivo: '%-.200s' (Error: %d)" + swe "Hittar inte filen '%-.200s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.200s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_READ_DIR + cze "Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)" + dan "Kan ikke læse folder '%-.64s' (Fejlkode: %d)" + nla "Kan de directory niet lezen van '%-.64s' (Errcode: %d)" + eng "Can't read dir of '%-.64s' (errno: %d)" + jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚ª“ǂ߂܂¹‚ñ.(errno: %d)", + est "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)" + fre "Ne peut lire le répertoire de '%-.64s' (Errcode: %d)" + ger "Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)" + greek "Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)" + ita "Impossibile leggere la directory di '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬ÆÉ¤á¤Þ¤»¤ó.(errno: %d)" + kor "'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)" + por "Não pode ler o diretório de '%-.64s' (erro no. %d)" + rum "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)" + slo "Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)" + spa "No puedo leer el directorio de '%-.64s' (Error: %d)" + swe "Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_SET_WD + cze "Nemohu zm-Bìnit adresáø na '%-.64s' (chybový kód: %d)" + dan "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)" + nla "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)" + eng "Can't change dir to '%-.64s' (errno: %d)" + jps "'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚É chdir ‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", + est "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)" + fre "Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)" + ger "Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)" + greek "Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)" + ita "Impossibile cambiare la directory in '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤¤Þ¤»¤ó.(errno: %d)" + kor "'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)" + por "Não pode mudar para o diretório '%-.64s' (erro no. %d)" + rum "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)" + slo "Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)" + spa "No puedo cambiar al directorio de '%-.64s' (Error: %d)" + swe "Kan inte byta till '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CHECKREAD + cze "Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.64s'" + dan "Posten er ændret siden sidste læsning '%-.64s'" + nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'" + eng "Record has changed since last read in table '%-.64s'" + est "Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik" + fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.64s'" + ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert" + greek "Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'" + hun "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" + ita "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'" + kor "Å×À̺í '%-.64s'¿¡¼ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù." + nor "Posten har blitt endret siden den ble lest '%-.64s'" + norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.64s'" + pol "Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'" + por "Registro alterado desde a última leitura da tabela '%-.64s'" + rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'" + rus "úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "Slog je promenjen od zadnjeg èitanja tabele '%-.64s'" + slo "Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'" + spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'" + swe "Posten har förändrats sedan den lästes i register '%-.64s'" + ukr "úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.64s'" +ER_DISK_FULL + cze "Disk je pln-Bý (%s), èekám na uvolnìní nìjakého místa ..." + dan "Ikke mere diskplads (%s). Venter på at få frigjort plads..." + nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..." + eng "Disk full (%s); waiting for someone to free some space..." + jps "Disk full (%s). ’N‚©‚ª‰½‚©‚ðŒ¸‚ç‚·‚܂ł܂Á‚Ä‚‚¾‚³‚¢...", + est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..." + fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..." + ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ..." + greek "Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò..." + hun "A lemez megtelt (%s)." + ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..." + jpn "Disk full (%s). 狼¤¬²¿¤«¤ò¸º¤é¤¹¤Þ¤Ç¤Þ¤Ã¤Æ¤¯¤À¤µ¤¤..." + kor "Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù..." + nor "Ikke mer diskplass (%s). Venter på å få frigjort plass..." + norwegian-ny "Ikkje meir diskplass (%s). Ventar på å få frigjort plass..." + pol "Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca..." + por "Disco cheio (%s). Aguardando alguém liberar algum espaço..." + rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..." + rus "äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ..." + serbian "Disk je pun (%s). Èekam nekoga da doðe i oslobodi nešto mesta..." + slo "Disk je plný (%s), èakám na uvoµnenie miesta..." + spa "Disco lleno (%s). Esperando para que se libere algo de espacio..." + swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..." + ukr "äÉÓË ÚÁÐÏ×ÎÅÎÉÊ (%s). ÷ÉÞÉËÕÀ, ÄÏËÉ Ú×¦ÌØÎÉÔØÓÑ ÔÒÏÈÉ Í¦ÓÃÑ..." +ER_DUP_KEY 23000 + cze "Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.64s'" + dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'" + nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'" + eng "Can't write; duplicate key in table '%-.64s'" + jps "table '%-.64s' ‚É key ‚ªd•¡‚µ‚Ä‚¢‚Ä‘‚«‚±‚߂܂¹‚ñ", + est "Ei saa kirjutada, korduv võti tabelis '%-.64s'" + fre "Ecriture impossible, doublon dans une clé de la table '%-.64s'" + ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'" + greek "Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'" + hun "Irasi hiba, duplikalt kulcs a '%-.64s' tablaban." + ita "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'" + jpn "table '%-.64s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤¤³¤á¤Þ¤»¤ó" + kor "±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼ Áߺ¹ Ű" + nor "Kan ikke skrive, flere like nøkler i tabellen '%-.64s'" + norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'" + pol "Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'" + por "Não pode gravar. Chave duplicada na tabela '%-.64s'" + rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'" + slo "Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'" + spa "No puedo escribir, clave duplicada en la tabla '%-.64s'" + swe "Kan inte skriva, dubbel söknyckel i register '%-.64s'" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.64s'" +ER_ERROR_ON_CLOSE + cze "Chyba p-Bøi zavírání '%-.64s' (chybový kód: %d)" + dan "Fejl ved lukning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het sluiten van '%-.64s' (Errcode: %d)" + eng "Error on close of '%-.64s' (errno: %d)" + est "Viga faili '%-.64s' sulgemisel (veakood: %d)" + fre "Erreur a la fermeture de '%-.64s' (Errcode: %d)" + ger "Fehler beim Schließen von '%-.64s' (Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)" + ita "Errore durante la chiusura di '%-.64s' (errno: %d)" + kor "'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved lukking av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved lukking av '%-.64s' (Feilkode: %d)" + pol "B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)" + por "Erro ao fechar '%-.64s' (erro no. %d)" + rum "Eroare inchizind '%-.64s' (errno: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri zatvaranju '%-.64s' (errno: %d)" + slo "Chyba pri zatváraní '%-.64s' (chybový kód: %d)" + spa "Error en el cierre de '%-.64s' (Error: %d)" + swe "Fick fel vid stängning av '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_ERROR_ON_READ + cze "Chyba p-Bøi ètení souboru '%-.200s' (chybový kód: %d)" + dan "Fejl ved læsning af '%-.200s' (Fejlkode: %d)" + nla "Fout bij het lezen van file '%-.200s' (Errcode: %d)" + eng "Error reading file '%-.200s' (errno: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚̓ǂݞ‚݃Gƒ‰[ (errno: %d)", + est "Viga faili '%-.200s' lugemisel (veakood: %d)" + fre "Erreur en lecture du fichier '%-.200s' (Errcode: %d)" + ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %d)" + ita "Errore durante la lettura del file '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ÎÆÉ¤ß¹þ¤ß¥¨¥é¡¼ (errno: %d)" + kor "'%-.200s'ÈÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved lesing av '%-.200s' (Feilkode: %d)" + norwegian-ny "Feil ved lesing av '%-.200s' (Feilkode: %d)" + pol "B³?d podczas odczytu pliku '%-.200s' (Kod b³êdu: %d)" + por "Erro ao ler arquivo '%-.200s' (erro no. %d)" + rum "Eroare citind fisierul '%-.200s' (errno: %d)" + rus "ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri èitanju file-a '%-.200s' (errno: %d)" + slo "Chyba pri èítaní súboru '%-.200s' (chybový kód: %d)" + spa "Error leyendo el fichero '%-.200s' (Error: %d)" + swe "Fick fel vid läsning av '%-.200s' (Felkod %d)" + ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.200s' (ÐÏÍÉÌËÁ: %d)" +ER_ERROR_ON_RENAME + cze "Chyba p-Bøi pøejmenování '%-.150s' na '%-.150s' (chybový kód: %d)" + dan "Fejl ved omdøbning af '%-.150s' til '%-.150s' (Fejlkode: %d)" + nla "Fout bij het hernoemen van '%-.150s' naar '%-.150s' (Errcode: %d)" + eng "Error on rename of '%-.150s' to '%-.150s' (errno: %d)" + jps "'%-.150s' ‚ð '%-.150s' ‚É rename ‚Å‚«‚Ü‚¹‚ñ (errno: %d)", + est "Viga faili '%-.150s' ümbernimetamisel '%-.150s'-ks (veakood: %d)" + fre "Erreur en renommant '%-.150s' en '%-.150s' (Errcode: %d)" + ger "Fehler beim Umbenennen von '%-.150s' in '%-.150s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.150s' to '%-.150s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.150s' file atnevezesekor '%-.150s'. (hibakod: %d)" + ita "Errore durante la rinominazione da '%-.150s' a '%-.150s' (errno: %d)" + jpn "'%-.150s' ¤ò '%-.150s' ¤Ë rename ¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "'%-.150s'¸¦ '%-.150s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved omdøping av '%-.150s' til '%-.150s' (Feilkode: %d)" + norwegian-ny "Feil ved omdøyping av '%-.150s' til '%-.150s' (Feilkode: %d)" + pol "B³?d podczas zmieniania nazwy '%-.150s' na '%-.150s' (Kod b³êdu: %d)" + por "Erro ao renomear '%-.150s' para '%-.150s' (erro no. %d)" + rum "Eroare incercind sa renumesc '%-.150s' in '%-.150s' (errno: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.150s' × '%-.150s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri promeni imena '%-.150s' na '%-.150s' (errno: %d)" + slo "Chyba pri premenovávaní '%-.150s' na '%-.150s' (chybový kód: %d)" + spa "Error en el renombrado de '%-.150s' a '%-.150s' (Error: %d)" + swe "Kan inte byta namn från '%-.150s' till '%-.150s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.150s' Õ '%-.150s' (ÐÏÍÉÌËÁ: %d)" +ER_ERROR_ON_WRITE + cze "Chyba p-Bøi zápisu do souboru '%-.200s' (chybový kód: %d)" + dan "Fejl ved skriving av filen '%-.200s' (Fejlkode: %d)" + nla "Fout bij het wegschrijven van file '%-.200s' (Errcode: %d)" + eng "Error writing file '%-.200s' (errno: %d)" + jps "'%-.200s' ƒtƒ@ƒCƒ‹‚ð‘‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", + est "Viga faili '%-.200s' kirjutamisel (veakood: %d)" + fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %d)" + ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.200s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.200s' file irasakor. (hibakod: %d)" + ita "Errore durante la scrittura del file '%-.200s' (errno: %d)" + jpn "'%-.200s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "'%-.200s'ÈÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved skriving av fila '%-.200s' (Feilkode: %d)" + norwegian-ny "Feil ved skriving av fila '%-.200s' (Feilkode: %d)" + pol "B³?d podczas zapisywania pliku '%-.200s' (Kod b³êdu: %d)" + por "Erro ao gravar arquivo '%-.200s' (erro no. %d)" + rum "Eroare scriind fisierul '%-.200s' (errno: %d)" + rus "ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.200s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri upisu '%-.200s' (errno: %d)" + slo "Chyba pri zápise do súboru '%-.200s' (chybový kód: %d)" + spa "Error escribiendo el archivo '%-.200s' (Error: %d)" + swe "Fick fel vid skrivning till '%-.200s' (Felkod %d)" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.200s' (ÐÏÍÉÌËÁ: %d)" +ER_FILE_USED + cze "'%-.64s' je zam-Bèen proti zmìnám" + dan "'%-.64s' er låst mod opdateringer" + nla "'%-.64s' is geblokeerd tegen veranderingen" + eng "'%-.64s' is locked against change" + jps "'%-.64s' ‚̓ƒbƒN‚³‚ê‚Ä‚¢‚Ü‚·", + est "'%-.64s' on lukustatud muudatuste vastu" + fre "'%-.64s' est verrouillé contre les modifications" + ger "'%-.64s' ist für Änderungen gesperrt" + greek "'%-.64s' äåí åðéôñÝðïíôáé áëëáãÝò" + hun "'%-.64s' a valtoztatas ellen zarolva" + ita "'%-.64s' e` soggetto a lock contro i cambiamenti" + jpn "'%-.64s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹" + kor "'%-.64s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù." + nor "'%-.64s' er låst mot oppdateringer" + norwegian-ny "'%-.64s' er låst mot oppdateringar" + pol "'%-.64s' jest zablokowany na wypadek zmian" + por "'%-.64s' está com travamento contra alterações" + rum "'%-.64s' este blocat pentry schimbari (loccked against change)" + rus "'%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ" + serbian "'%-.64s' je zakljuèan za upis" + slo "'%-.64s' je zamknutý proti zmenám" + spa "'%-.64s' esta bloqueado contra cambios" + swe "'%-.64s' är låst mot användning" + ukr "'%-.64s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ" +ER_FILSORT_ABORT + cze "T-Bøídìní pøeru¹eno" + dan "Sortering afbrudt" + nla "Sorteren afgebroken" + eng "Sort aborted" + jps "Sort ’†’f", + est "Sorteerimine katkestatud" + fre "Tri alphabétique abandonné" + ger "Sortiervorgang abgebrochen" + greek "Ç äéáäéêáóßá ôáîéíüìéóçò áêõñþèçêå" + hun "Sikertelen rendezes" + ita "Operazione di ordinamento abbandonata" + jpn "Sort ÃæÃÇ" + kor "¼ÒÆ®°¡ ÁߴܵǾú½À´Ï´Ù." + nor "Sortering avbrutt" + norwegian-ny "Sortering avbrote" + pol "Sortowanie przerwane" + por "Ordenação abortada" + rum "Sortare intrerupta" + rus "óÏÒÔÉÒÏ×ËÁ ÐÒÅÒ×ÁÎÁ" + serbian "Sortiranje je prekinuto" + slo "Triedenie preru¹ené" + spa "Ordeancion cancelada" + swe "Sorteringen avbruten" + ukr "óÏÒÔÕ×ÁÎÎÑ ÐÅÒÅÒ×ÁÎÏ" +ER_FORM_NOT_FOUND + cze "Pohled '%-.64s' pro '%-.64s' neexistuje" + dan "View '%-.64s' eksisterer ikke for '%-.64s'" + nla "View '%-.64s' bestaat niet voor '%-.64s'" + eng "View '%-.64s' doesn't exist for '%-.64s'" + jps "View '%-.64s' ‚ª '%-.64s' ‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks" + fre "La vue (View) '%-.64s' n'existe pas pour '%-.64s'" + ger "View '%-.64s' existiert für '%-.64s' nicht" + greek "Ôï View '%-.64s' äåí õðÜñ÷åé ãéá '%-.64s'" + hun "A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz" + ita "La view '%-.64s' non esiste per '%-.64s'" + jpn "View '%-.64s' ¤¬ '%-.64s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "ºä '%-.64s'°¡ '%-.64s'¿¡¼´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù." + nor "View '%-.64s' eksisterer ikke for '%-.64s'" + norwegian-ny "View '%-.64s' eksisterar ikkje for '%-.64s'" + pol "Widok '%-.64s' nie istnieje dla '%-.64s'" + por "Visão '%-.64s' não existe para '%-.64s'" + rum "View '%-.64s' nu exista pentru '%-.64s'" + rus "ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.64s'" + serbian "View '%-.64s' ne postoji za '%-.64s'" + slo "Pohµad '%-.64s' neexistuje pre '%-.64s'" + spa "La vista '%-.64s' no existe para '%-.64s'" + swe "Formulär '%-.64s' finns inte i '%-.64s'" + ukr "÷ÉÇÌÑÄ '%-.64s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.64s'" +ER_GET_ERRNO + cze "Obsluha tabulky vr-Bátila chybu %d" + dan "Modtog fejl %d fra tabel håndteringen" + nla "Fout %d van tabel handler" + eng "Got error %d from storage engine" + est "Tabeli handler tagastas vea %d" + fre "Reçu l'erreur %d du handler de la table" + ger "Fehler %d (Speicher-Engine)" + greek "ÅëÞöèç ìÞíõìá ëÜèïõò %d áðü ôïí ÷åéñéóôÞ ðßíáêá (table handler)" + hun "%d hibajelzes a tablakezelotol" + ita "Rilevato l'errore %d dal gestore delle tabelle" + jpn "Got error %d from table handler" + kor "Å×À̺í handler¿¡¼ %d ¿¡·¯°¡ ¹ß»ý ÇÏ¿´½À´Ï´Ù." + nor "Mottok feil %d fra tabell håndterer" + norwegian-ny "Mottok feil %d fra tabell handterar" + pol "Otrzymano b³?d %d z obs³ugi tabeli" + por "Obteve erro %d no manipulador de tabelas" + rum "Eroarea %d obtinuta din handlerul tabelei" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d ÏÔ ÏÂÒÁÂÏÔÞÉËÁ ÔÁÂÌÉÃ" + serbian "Handler tabela je vratio grešku %d" + slo "Obsluha tabuµky vrátila chybu %d" + spa "Error %d desde el manejador de la tabla" + swe "Fick felkod %d från databashanteraren" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d ×¦Ä ÄÅÓËÒÉÐÔÏÒÁ ÔÁÂÌÉæ" +ER_ILLEGAL_HA + cze "Obsluha tabulky '%-.64s' nem-Bá tento parametr" + dan "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'" + nla "Tabel handler voor '%-.64s' heeft deze optie niet" + eng "Table storage engine for '%-.64s' doesn't have this option" + est "Tabeli '%-.64s' handler ei toeta antud operatsiooni" + fre "Le handler de la table '%-.64s' n'a pas cette option" + ger "Diese Option gibt es nicht (Speicher-Engine für '%-.64s')" + greek "Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ" + hun "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja" + ita "Il gestore delle tabelle per '%-.64s' non ha questa opzione" + jpn "Table handler for '%-.64s' doesn't have this option" + kor "'%-.64s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù." + nor "Tabell håndtereren for '%-.64s' har ikke denne muligheten" + norwegian-ny "Tabell håndteraren for '%-.64s' har ikkje denne moglegheita" + pol "Obs³uga tabeli '%-.64s' nie posiada tej opcji" + por "Manipulador de tabela para '%-.64s' não tem esta opção" + rum "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune" + rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ" + serbian "Handler tabela za '%-.64s' nema ovu opciju" + slo "Obsluha tabuµky '%-.64s' nemá tento parameter" + spa "El manejador de la tabla de '%-.64s' no tiene esta opcion" + swe "Tabellhanteraren for tabell '%-.64s' stödjer ej detta" + ukr "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦" +ER_KEY_NOT_FOUND + cze "Nemohu naj-Bít záznam v '%-.64s'" + dan "Kan ikke finde posten i '%-.64s'" + nla "Kan record niet vinden in '%-.64s'" + eng "Can't find record in '%-.64s'" + jps "'%-.64s'‚̂Ȃ©‚ɃŒƒR[ƒh‚ªŒ©•t‚©‚è‚Ü‚¹‚ñ", + est "Ei suuda leida kirjet '%-.64s'-s" + fre "Ne peut trouver l'enregistrement dans '%-.64s'" + ger "Kann Datensatz in '%-.64s' nicht finden" + greek "Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'" + hun "Nem talalhato a rekord '%-.64s'-ben" + ita "Impossibile trovare il record in '%-.64s'" + jpn "'%-.64s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó" + kor "'%-.64s'¿¡¼ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù." + nor "Kan ikke finne posten i '%-.64s'" + norwegian-ny "Kan ikkje finne posten i '%-.64s'" + pol "Nie mo¿na znale¥æ rekordu w '%-.64s'" + por "Não pode encontrar registro em '%-.64s'" + rum "Nu pot sa gasesc recordul in '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.64s'" + serbian "Ne mogu da pronaðem slog u '%-.64s'" + slo "Nemô¾em nájs» záznam v '%-.64s'" + spa "No puedo encontrar el registro en '%-.64s'" + swe "Hittar inte posten '%-.64s'" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'" +ER_NOT_FORM_FILE + cze "Nespr-Bávná informace v souboru '%-.200s'" + dan "Forkert indhold i: '%-.200s'" + nla "Verkeerde info in file: '%-.200s'" + eng "Incorrect information in file: '%-.200s'" + jps "ƒtƒ@ƒCƒ‹ '%-.200s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚邿‚¤‚Å‚·", + est "Vigane informatsioon failis '%-.200s'" + fre "Information erronnée dans le fichier: '%-.200s'" + ger "Falsche Information in Datei '%-.200s'" + greek "ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.200s'" + hun "Ervenytelen info a file-ban: '%-.200s'" + ita "Informazione errata nel file: '%-.200s'" + jpn "¥Õ¥¡¥¤¥ë '%-.200s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹" + kor "ÈÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.200s'" + nor "Feil informasjon i filen: '%-.200s'" + norwegian-ny "Feil informasjon i fila: '%-.200s'" + pol "Niew³a?ciwa informacja w pliku: '%-.200s'" + por "Informação incorreta no arquivo '%-.200s'" + rum "Informatie incorecta in fisierul: '%-.200s'" + rus "îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.200s'" + serbian "Pogrešna informacija u file-u: '%-.200s'" + slo "Nesprávna informácia v súbore: '%-.200s'" + spa "Informacion erronea en el archivo: '%-.200s'" + swe "Felaktig fil: '%-.200s'" + ukr "èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.200s'" +ER_NOT_KEYFILE + cze "Nespr-Bávný klíè pro tabulku '%-.200s'; pokuste se ho opravit" + dan "Fejl i indeksfilen til tabellen '%-.200s'; prøv at reparere den" + nla "Verkeerde zoeksleutel file voor tabel: '%-.200s'; probeer het te repareren" + eng "Incorrect key file for table '%-.200s'; try to repair it" + jps "'%-.200s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚邿‚¤‚Å‚·. C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", + est "Tabeli '%-.200s' võtmefail on vigane; proovi seda parandada" + fre "Index corrompu dans la table: '%-.200s'; essayez de le réparer" + ger "Fehlerhafte Index-Datei für Tabelle '%-.200s'; versuche zu reparieren" + greek "ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.200s'; Ðáñáêáëþ, äéïñèþóôå ôï!" + hun "Ervenytelen kulcsfile a tablahoz: '%-.200s'; probalja kijavitani!" + ita "File chiave errato per la tabella : '%-.200s'; prova a riparalo" + jpn "'%-.200s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" + kor "'%-.200s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" + nor "Tabellen '%-.200s' har feil i nøkkelfilen; forsøk å reparer den" + norwegian-ny "Tabellen '%-.200s' har feil i nykkelfila; prøv å reparere den" + pol "Niew³a?ciwy plik kluczy dla tabeli: '%-.200s'; spróbuj go naprawiæ" + por "Arquivo de índice incorreto para tabela '%-.200s'; tente repará-lo" + rum "Cheia fisierului incorecta pentru tabela: '%-.200s'; incearca s-o repari" + rus "îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.200s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ" + serbian "Pogrešan key file za tabelu: '%-.200s'; probajte da ga ispravite" + slo "Nesprávny kµúè pre tabuµku '%-.200s'; pokúste sa ho opravi»" + spa "Clave de archivo erronea para la tabla: '%-.200s'; intente repararlo" + swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation" + ukr "èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.200s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ" +ER_OLD_KEYFILE + cze "Star-Bý klíèový soubor pro '%-.64s'; opravte ho." + dan "Gammel indeksfil for tabellen '%-.64s'; reparer den" + nla "Oude zoeksleutel file voor tabel '%-.64s'; repareer het!" + eng "Old key file for table '%-.64s'; repair it!" + jps "'%-.64s' ƒe[ƒuƒ‹‚͌¢Œ`Ž®‚Ì key file ‚̂悤‚Å‚·; C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", + est "Tabeli '%-.64s' võtmefail on aegunud; paranda see!" + fre "Vieux fichier d'index pour la table '%-.64s'; réparez le!" + ger "Alte Index-Datei für Tabelle '%-.64s'. Bitte reparieren" + greek "Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!" + hun "Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!" + ita "File chiave vecchio per la tabella '%-.64s'; riparalo!" + jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" + kor "'%-.64s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" + nor "Gammel nøkkelfil for tabellen '%-.64s'; reparer den!" + norwegian-ny "Gammel nykkelfil for tabellen '%-.64s'; reparer den!" + pol "Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!" + por "Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!" + rum "Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!" + rus "óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!" + serbian "Zastareo key file za tabelu '%-.64s'; ispravite ga" + slo "Starý kµúèový súbor pre '%-.64s'; opravte ho!" + spa "Clave de archivo antigua para la tabla '%-.64s'; reparelo!" + swe "Gammal nyckelfil '%-.64s'; reparera registret" + ukr "óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏ×¦ÔØ ÊÏÇÏ!" +ER_OPEN_AS_READONLY + cze "'%-.64s' je jen pro -Bètení" + dan "'%-.64s' er skrivebeskyttet" + nla "'%-.64s' is alleen leesbaar" + eng "Table '%-.64s' is read only" + jps "'%-.64s' ‚͓ǂݞ‚Ýê—p‚Å‚·", + est "Tabel '%-.64s' on ainult lugemiseks" + fre "'%-.64s' est en lecture seulement" + ger "Tabelle '%-.64s' ist nur lesbar" + greek "'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç" + hun "'%-.64s' irasvedett" + ita "'%-.64s' e` di sola lettura" + jpn "'%-.64s' ¤ÏÆÉ¤ß¹þ¤ßÀìÍѤǤ¹" + kor "Å×À̺í '%-.64s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù." + nor "'%-.64s' er skrivebeskyttet" + norwegian-ny "'%-.64s' er skrivetryggja" + pol "'%-.64s' jest tylko do odczytu" + por "Tabela '%-.64s' é somente para leitura" + rum "Tabela '%-.64s' e read-only" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ" + serbian "Tabelu '%-.64s' je dozvoljeno samo èitati" + slo "'%-.64s' is èíta» only" + spa "'%-.64s' es de solo lectura" + swe "'%-.64s' är skyddad mot förändring" + ukr "ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ" +ER_OUTOFMEMORY HY001 S1001 + cze "M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)" + dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)" + nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)" + eng "Out of memory; restart server and try again (needed %d bytes)" + jps "Out of memory. ƒf[ƒ‚ƒ“‚ðƒŠƒXƒ^[ƒg‚µ‚Ă݂Ă‚¾‚³‚¢ (%d bytes •K—v)", + est "Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)" + fre "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)" + ger "Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten" + greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç. ÐñïóðáèÞóôå ðÜëé, åðáíåêéíþíôáò ôç äéáäéêáóßá (demon) (÷ñåéÜæïíôáé %d bytes)" + hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)" + ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)" + jpn "Out of memory. ¥Ç¡¼¥â¥ó¤ò¥ê¥¹¥¿¡¼¥È¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤ (%d bytes ɬÍ×)" + kor "Out of memory. µ¥¸óÀ» Àç ½ÇÇà ÈÄ ´Ù½Ã ½ÃÀÛÇϽÿÀ (needed %d bytes)" + nor "Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)" + norwegian-ny "Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)" + pol "Zbyt ma³o pamiêci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)" + por "Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)" + rum "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)" + rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ. ðÅÒÅÚÁÐÕÓÔÉÔÅ ÓÅÒ×ÅÒ É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ (ÎÕÖÎÏ %d ÂÁÊÔ)" + serbian "Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)" + slo "Málo pamäti. Re¹tartujte daemona a skúste znova (je potrebných %d bytov)" + spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)" + swe "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)" + ukr "âÒÁË ÐÁÍ'ÑÔ¦. òÅÓÔÁÒÔÕÊÔÅ ÓÅÒ×ÅÒ ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ (ÐÏÔÒ¦ÂÎÏ %d ÂÁÊÔ¦×)" +ER_OUT_OF_SORTMEMORY HY001 S1001 + cze "M-Bálo pamìti pro tøídìní. Zvy¹te velikost tøídícího bufferu" + dan "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren" + nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size" + eng "Out of sort memory; increase server sort buffer size" + jps "Out of sort memory. sort buffer size ‚ª‘«‚è‚È‚¢‚悤‚Å‚·.", + est "Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit" + fre "Manque de mémoire pour le tri. Augmentez-la." + ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhöht werden" + greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç ãéá ôáîéíüìéóç. ÁõîÞóôå ôï sort buffer size ãéá ôç äéáäéêáóßá (demon)" + hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet" + ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone" + jpn "Out of sort memory. sort buffer size ¤¬Â¤ê¤Ê¤¤¤è¤¦¤Ç¤¹." + kor "Out of sort memory. daemon sort bufferÀÇ Å©±â¸¦ Áõ°¡½ÃŰ¼¼¿ä" + nor "Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten" + norwegian-ny "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten" + pol "Zbyt ma³o pamiêci dla sortowania. Zwiêksz wielko?æ bufora demona dla sortowania" + por "Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação" + rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)" + rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ ÄÌÑ ÓÏÒÔÉÒÏ×ËÉ. õ×ÅÌÉÞØÔÅ ÒÁÚÍÅÒ ÂÕÆÅÒÁ ÓÏÒÔÉÒÏ×ËÉ ÎÁ ÓÅÒ×ÅÒÅ" + serbian "Nema memorije za sortiranje. Poveæajte velièinu sort buffer-a MySQL server-u" + slo "Málo pamäti pre triedenie, zvý¹te veµkos» triediaceho bufferu" + spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion" + swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna" + ukr "âÒÁË ÐÁÍ'ÑÔ¦ ÄÌÑ ÓÏÒÔÕ×ÁÎÎÑ. ôÒÅÂÁ ÚÂ¦ÌØÛÉÔÉ ÒÏÚÍ¦Ò ÂÕÆÅÒÁ ÓÏÒÔÕ×ÁÎÎÑ Õ ÓÅÒ×ÅÒÁ" +ER_UNEXPECTED_EOF + cze "Neo-Bèekávaný konec souboru pøi ètení '%-.64s' (chybový kód: %d)" + dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.64s' (Fejlkode: %d)" + nla "Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)" + eng "Unexpected EOF found when reading file '%-.64s' (errno: %d)" + jps "'%-.64s' ƒtƒ@ƒCƒ‹‚ð“ǂݞ‚Ý’†‚É EOF ‚ª—\Šú‚¹‚ÊŠ‚ÅŒ»‚ê‚Ü‚µ‚½. (errno: %d)", + est "Ootamatu faililõpumärgend faili '%-.64s' lugemisel (veakood: %d)" + fre "Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)" + ger "Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)" + greek "ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)" + ita "Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤òÆÉ¤ß¹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)" + kor "'%-.64s' ÈÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)" + nor "Uventet slutt på fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)" + norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)" + pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod b³êdu: %d)" + por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)" + rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)" + slo "Neoèakávaný koniec súboru pri èítaní '%-.64s' (chybový kód: %d)" + spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)" + swe "Oväntat filslut vid läsning från '%-.64s' (Felkod: %d)" + ukr "èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CON_COUNT_ERROR 08004 + cze "P-Bøíli¹ mnoho spojení" + dan "For mange forbindelser (connections)" + nla "Te veel verbindingen" + eng "Too many connections" + jps "Ú‘±‚ª‘½‚·‚¬‚Ü‚·", + est "Liiga palju samaaegseid ühendusi" + fre "Trop de connections" + ger "Zu viele Verbindungen" + greek "ÕðÜñ÷ïõí ðïëëÝò óõíäÝóåéò..." + hun "Tul sok kapcsolat" + ita "Troppe connessioni" + jpn "Àܳ¤¬Â¿¤¹¤®¤Þ¤¹" + kor "³Ê¹« ¸¹Àº ¿¬°á... max_connectionÀ» Áõ°¡ ½ÃŰ½Ã¿À..." + nor "For mange tilkoblinger (connections)" + norwegian-ny "For mange tilkoplingar (connections)" + pol "Zbyt wiele po³?czeñ" + por "Excesso de conexões" + rum "Prea multe conectiuni" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÏÅÄÉÎÅÎÉÊ" + serbian "Previše konekcija" + slo "Príli¹ mnoho spojení" + spa "Demasiadas conexiones" + swe "För många anslutningar" + ukr "úÁÂÁÇÁÔÏ Ú'¤ÄÎÁÎØ" +ER_OUT_OF_RESOURCES + cze "M-Bálo prostoru/pamìti pro thread" + dan "Udgået for tråde/hukommelse" + nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen" + eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space" + jps "Out of memory; mysqld ‚©‚»‚Ì‘¼‚̃vƒƒZƒX‚ªƒƒ‚ƒŠ[‚ð‘S‚ÄŽg‚Á‚Ä‚¢‚é‚©Šm”F‚µ‚Ä‚‚¾‚³‚¢. ƒƒ‚ƒŠ[‚ðŽg‚¢Ø‚Á‚Ä‚¢‚È‚¢ê‡A'ulimit' ‚ðݒ肵‚Ä mysqld ‚̃ƒ‚ƒŠ[Žg—pŒÀŠE—ʂ𑽂‚·‚é‚©Aswap space ‚ð‘‚₵‚Ă݂Ă‚¾‚³‚¢", + est "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine" + fre "Manque de 'threads'/mémoire" + ger "Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten" + greek "Ðñüâëçìá ìå ôç äéáèÝóéìç ìíÞìç (Out of thread space/memory)" + hun "Elfogyott a thread-memoria" + ita "Fine dello spazio/memoria per i thread" + jpn "Out of memory; mysqld ¤«¤½¤Î¾¤Î¥×¥í¥»¥¹¤¬¥á¥â¥ê¡¼¤òÁ´¤Æ»È¤Ã¤Æ¤¤¤ë¤«³Îǧ¤·¤Æ¤¯¤À¤µ¤¤. ¥á¥â¥ê¡¼¤ò»È¤¤ÀڤäƤ¤¤Ê¤¤¾ì¹ç¡¢'ulimit' ¤òÀßÄꤷ¤Æ mysqld ¤Î¥á¥â¥ê¡¼»ÈÍѸ³¦Î̤ò¿¤¯¤¹¤ë¤«¡¢swap space ¤òÁý¤ä¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤" + kor "Out of memory; mysqld³ª ¶Ç´Ù¸¥ ÇÁ·Î¼¼¼¿¡¼ »ç¿ë°¡´ÉÇÑ ¸Þ¸ð¸®¸¦ »ç¿ëÇÑÁö äũÇϽÿÀ. ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é ulimit ¸í·ÉÀ» ÀÌ¿¿ëÇÏ¿© ´õ¸¹Àº ¸Þ¸ð¸®¸¦ »ç¿ëÇÒ ¼ö ÀÖµµ·Ï Çϰųª ½º¿Ò ½ºÆÐÀ̽º¸¦ Áõ°¡½ÃŰ½Ã¿À" + nor "Tomt for tråd plass/minne" + norwegian-ny "Tomt for tråd plass/minne" + pol "Zbyt ma³o miejsca/pamiêci dla w?tku" + por "Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'" + rum "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)" + rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ; ÕÄÏÓÔÏ×ÅÒØÔÅÓØ, ÞÔÏ mysqld ÉÌÉ ËÁËÏÊ-ÌÉÂÏ ÄÒÕÇÏÊ ÐÒÏÃÅÓÓ ÎÅ ÚÁÎÉÍÁÅÔ ×ÓÀ ÄÏÓÔÕÐÎÕÀ ÐÁÍÑÔØ. åÓÌÉ ÎÅÔ, ÔÏ ×Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ ulimit, ÞÔÏÂÙ ×ÙÄÅÌÉÔØ ÄÌÑ mysqld ÂÏÌØÛÅ ÐÁÍÑÔÉ, ÉÌÉ Õ×ÅÌÉÞÉÔØ ÏÂßÅÍ ÆÁÊÌÁ ÐÏÄËÁÞËÉ" + serbian "Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)" + slo "Málo miesta-pamäti pre vlákno" + spa "Memoria/espacio de tranpaso insuficiente" + swe "Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap" + ukr "âÒÁË ÐÁÍ'ÑÔ¦; ðÅÒÅצÒÔÅ ÞÉ mysqld ÁÂÏ ÑË¦ÓØ ¦ÎÛ¦ ÐÒÏÃÅÓÉ ×ÉËÏÒÉÓÔÏ×ÕÀÔØ ÕÓÀ ÄÏÓÔÕÐÎÕ ÐÁÍ'ÑÔØ. ñË Î¦, ÔÏ ×É ÍÏÖÅÔÅ ÓËÏÒÉÓÔÁÔÉÓÑ 'ulimit', ÁÂÉ ÄÏÚ×ÏÌÉÔÉ mysqld ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ Â¦ÌØÛÅ ÐÁÍ'ÑÔ¦ ÁÂÏ ×É ÍÏÖÅÔÅ ÄÏÄÁÔÉ Â¦ÌØÛŠͦÓÃÑ Ð¦Ä Ó×ÁÐ" +ER_BAD_HOST_ERROR 08S01 + cze "Nemohu zjistit jm-Béno stroje pro Va¹i adresu" + dan "Kan ikke få værtsnavn for din adresse" + nla "Kan de hostname niet krijgen van uw adres" + eng "Can't get hostname for your address" + jps "‚»‚Ì address ‚Ì hostname ‚ªˆø‚¯‚Ü‚¹‚ñ.", + est "Ei suuda lahendada IP aadressi masina nimeks" + fre "Ne peut obtenir de hostname pour votre adresse" + ger "Kann Hostnamen für diese Adresse nicht erhalten" + greek "Äåí Ýãéíå ãíùóôü ôï hostname ãéá ôçí address óáò" + hun "A gepnev nem allapithato meg a cimbol" + ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)" + jpn "¤½¤Î address ¤Î hostname ¤¬°ú¤±¤Þ¤»¤ó." + kor "´ç½ÅÀÇ ÄÄÇ»ÅÍÀÇ È£½ºÆ®À̸§À» ¾òÀ» ¼ö ¾øÀ¾´Ï´Ù." + nor "Kan ikke få tak i vertsnavn for din adresse" + norwegian-ny "Kan ikkje få tak i vertsnavn for di adresse" + pol "Nie mo¿na otrzymaæ nazwy hosta dla twojego adresu" + por "Não pode obter nome do 'host' para seu endereço" + rum "Nu pot sa obtin hostname-ul adresei tale" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÉÍÑ ÈÏÓÔÁ ÄÌÑ ×ÁÛÅÇÏ ÁÄÒÅÓÁ" + serbian "Ne mogu da dobijem ime host-a za vašu IP adresu" + slo "Nemô¾em zisti» meno hostiteµa pre va¹u adresu" + spa "No puedo obtener el nombre de maquina de tu direccion" + swe "Kan inte hitta 'hostname' för din adress" + ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ¦Í'Ñ ÈÏÓÔÕ ÄÌÑ ×ÁÛϧ ÁÄÒÅÓÉ" +ER_HANDSHAKE_ERROR 08S01 + cze "Chyba p-Bøi ustavování spojení" + dan "Forkert håndtryk (handshake)" + nla "Verkeerde handshake" + eng "Bad handshake" + est "Väär handshake" + fre "Mauvais 'handshake'" + ger "Ungültiger Handshake" + greek "Ç áíáãíþñéóç (handshake) äåí Ýãéíå óùóôÜ" + hun "A kapcsolatfelvetel nem sikerult (Bad handshake)" + ita "Negoziazione impossibile" + nor "Feil håndtrykk (handshake)" + norwegian-ny "Feil handtrykk (handshake)" + pol "Z³y uchwyt(handshake)" + por "Negociação de acesso falhou" + rum "Prost inceput de conectie (bad handshake)" + rus "îÅËÏÒÒÅËÔÎÏÅ ÐÒÉ×ÅÔÓÔ×ÉÅ" + serbian "Loš poèetak komunikacije (handshake)" + slo "Chyba pri nadväzovaní spojenia" + spa "Protocolo erroneo" + swe "Fel vid initiering av kommunikationen med klienten" + ukr "îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ" +ER_DBACCESS_DENIED_ERROR 42000 + cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen" + dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'" + nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'" + eng "Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'" + jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚Ì '%-.64s' ƒf[ƒ^ƒx[ƒX‚ւ̃AƒNƒZƒX‚ð‹‘”Û‚µ‚Ü‚·", + est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'" + fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'" + ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'" + greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'" + hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz." + ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'" + jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹" + kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù." + nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet" + norwegian-ny "Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta" + por "Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'" + rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'" + rus "äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ" + serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'" + slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'" + spa "Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'" + swe "Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s" + ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'" +ER_ACCESS_DENIED_ERROR 28000 + cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)" + dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)" + nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)" + eng "Access denied for user '%-.32s'@'%-.64s' (using password: %s)" + jps "ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)", + est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)" + fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)" + ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %s)" + greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)" + hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" + ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)" + jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)" + kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)" + nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)" + norwegian-ny "Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)" + por "Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)" + rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)" + rus "äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)" + serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')" + slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)" + spa "Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)" + swe "Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)" + ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)" +ER_NO_DB_ERROR 3D000 + cze "Nebyla vybr-Bána ¾ádná databáze" + dan "Ingen database valgt" + nla "Geen database geselecteerd" + eng "No database selected" + jps "ƒf[ƒ^ƒx[ƒX‚ª‘I‘ð‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.", + est "Andmebaasi ei ole valitud" + fre "Aucune base n'a été sélectionnée" + ger "Keine Datenbank ausgewählt" + greek "Äåí åðéëÝ÷èçêå âÜóç äåäïìÝíùí" + hun "Nincs kivalasztott adatbazis" + ita "Nessun database selezionato" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ÁªÂò¤µ¤ì¤Æ¤¤¤Þ¤»¤ó." + kor "¼±ÅÃµÈ µ¥ÀÌŸº£À̽º°¡ ¾ø½À´Ï´Ù." + nor "Ingen database valgt" + norwegian-ny "Ingen database vald" + pol "Nie wybrano ¿adnej bazy danych" + por "Nenhum banco de dados foi selecionado" + rum "Nici o baza de data nu a fost selectata inca" + rus "âÁÚÁ ÄÁÎÎÙÈ ÎÅ ×ÙÂÒÁÎÁ" + serbian "Ni jedna baza nije selektovana" + slo "Nebola vybraná databáza" + spa "Base de datos no seleccionada" + swe "Ingen databas i användning" + ukr "âÁÚÕ ÄÁÎÎÉÈ ÎÅ ×ÉÂÒÁÎÏ" +ER_UNKNOWN_COM_ERROR 08S01 + cze "Nezn-Bámý pøíkaz" + dan "Ukendt kommando" + nla "Onbekend commando" + eng "Unknown command" + jps "‚»‚̃Rƒ}ƒ“ƒh‚͉½H", + est "Tundmatu käsk" + fre "Commande inconnue" + ger "Unbekannter Befehl" + greek "Áãíùóôç åíôïëÞ" + hun "Ervenytelen parancs" + ita "Comando sconosciuto" + jpn "¤½¤Î¥³¥Þ¥ó¥É¤Ï²¿¡©" + kor "¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä..." + nor "Ukjent kommando" + norwegian-ny "Ukjent kommando" + pol "Nieznana komenda" + por "Comando desconhecido" + rum "Comanda invalida" + rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÍÁÎÄÁ ËÏÍÍÕÎÉËÁÃÉÏÎÎÏÇÏ ÐÒÏÔÏËÏÌÁ" + serbian "Nepoznata komanda" + slo "Neznámy príkaz" + spa "Comando desconocido" + swe "Okänt commando" + ukr "îÅצÄÏÍÁ ËÏÍÁÎÄÁ" +ER_BAD_NULL_ERROR 23000 + cze "Sloupec '%-.64s' nem-Bù¾e být null" + dan "Kolonne '%-.64s' kan ikke være NULL" + nla "Kolom '%-.64s' kan niet null zijn" + eng "Column '%-.64s' cannot be null" + jps "Column '%-.64s' ‚Í null ‚ɂ͂ł«‚È‚¢‚̂ł·", + est "Tulp '%-.64s' ei saa omada nullväärtust" + fre "Le champ '%-.64s' ne peut être vide (null)" + ger "Feld '%-.64s' darf nicht NULL sein" + greek "Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)" + hun "A(z) '%-.64s' oszlop erteke nem lehet nulla" + ita "La colonna '%-.64s' non puo` essere nulla" + jpn "Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤¤Ê¤¤¤Î¤Ç¤¹" + kor "Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. " + nor "Kolonne '%-.64s' kan ikke vere null" + norwegian-ny "Kolonne '%-.64s' kan ikkje vere null" + pol "Kolumna '%-.64s' nie mo¿e byæ null" + por "Coluna '%-.64s' não pode ser vazia" + rum "Coloana '%-.64s' nu poate sa fie null" + rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL" + serbian "Kolona '%-.64s' ne može biti NULL" + slo "Pole '%-.64s' nemô¾e by» null" + spa "La columna '%-.64s' no puede ser nula" + swe "Kolumn '%-.64s' får inte vara NULL" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ" +ER_BAD_DB_ERROR 42000 + cze "Nezn-Bámá databáze '%-.64s'" + dan "Ukendt database '%-.64s'" + nla "Onbekende database '%-.64s'" + eng "Unknown database '%-.64s'" + jps "'%-.64s' ‚È‚ñ‚ăf[ƒ^ƒx[ƒX‚Í’m‚è‚Ü‚¹‚ñ.", + est "Tundmatu andmebaas '%-.64s'" + fre "Base '%-.64s' inconnue" + ger "Unbekannte Datenbank '%-.64s'" + greek "Áãíùóôç âÜóç äåäïìÝíùí '%-.64s'" + hun "Ervenytelen adatbazis: '%-.64s'" + ita "Database '%-.64s' sconosciuto" + jpn "'%-.64s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó." + kor "µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½" + nor "Ukjent database '%-.64s'" + norwegian-ny "Ukjent database '%-.64s'" + pol "Nieznana baza danych '%-.64s'" + por "Banco de dados '%-.64s' desconhecido" + rum "Baza de data invalida '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.64s'" + serbian "Nepoznata baza '%-.64s'" + slo "Neznáma databáza '%-.64s'" + spa "Base de datos desconocida '%-.64s'" + swe "Okänd databas: '%-.64s'" + ukr "îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.64s'" +ER_TABLE_EXISTS_ERROR 42S01 + cze "Tabulka '%-.64s' ji-B¾ existuje" + dan "Tabellen '%-.64s' findes allerede" + nla "Tabel '%-.64s' bestaat al" + eng "Table '%-.64s' already exists" + jps "Table '%-.64s' ‚ÍŠù‚É‚ ‚è‚Ü‚·", + est "Tabel '%-.64s' juba eksisteerib" + fre "La table '%-.64s' existe déjà" + ger "Tabelle '%-.64s' bereits vorhanden" + greek "Ï ðßíáêáò '%-.64s' õðÜñ÷åé Þäç" + hun "A(z) '%-.64s' tabla mar letezik" + ita "La tabella '%-.64s' esiste gia`" + jpn "Table '%-.64s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹" + kor "Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ" + nor "Tabellen '%-.64s' eksisterer allerede" + norwegian-ny "Tabellen '%-.64s' eksisterar allereide" + pol "Tabela '%-.64s' ju¿ istnieje" + por "Tabela '%-.64s' já existe" + rum "Tabela '%-.64s' exista deja" + rus "ôÁÂÌÉÃÁ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Tabela '%-.64s' veæ postoji" + slo "Tabuµka '%-.64s' u¾ existuje" + spa "La tabla '%-.64s' ya existe" + swe "Tabellen '%-.64s' finns redan" + ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÖÅ ¦ÓÎÕ¤" +ER_BAD_TABLE_ERROR 42S02 + cze "Nezn-Bámá tabulka '%-.100s'" + dan "Ukendt tabel '%-.100s'" + nla "Onbekende tabel '%-.100s'" + eng "Unknown table '%-.100s'" + jps "table '%-.100s' ‚Í‚ ‚è‚Ü‚¹‚ñ.", + est "Tundmatu tabel '%-.100s'" + fre "Table '%-.100s' inconnue" + ger "Unbekannte Tabelle '%-.100s'" + greek "Áãíùóôïò ðßíáêáò '%-.100s'" + hun "Ervenytelen tabla: '%-.100s'" + ita "Tabella '%-.100s' sconosciuta" + jpn "table '%-.100s' ¤Ï¤¢¤ê¤Þ¤»¤ó." + kor "Å×À̺í '%-.100s'´Â ¾Ë¼ö ¾øÀ½" + nor "Ukjent tabell '%-.100s'" + norwegian-ny "Ukjent tabell '%-.100s'" + pol "Nieznana tabela '%-.100s'" + por "Tabela '%-.100s' desconhecida" + rum "Tabela '%-.100s' este invalida" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.100s'" + serbian "Nepoznata tabela '%-.100s'" + slo "Neznáma tabuµka '%-.100s'" + spa "Tabla '%-.100s' desconocida" + swe "Okänd tabell '%-.100s'" + ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.100s'" +ER_NON_UNIQ_ERROR 23000 + cze "Sloupec '%-.64s' v %-.64s nen-Bí zcela jasný" + dan "Felt: '%-.64s' i tabel %-.64s er ikke entydigt" + nla "Kolom: '%-.64s' in %-.64s is niet eenduidig" + eng "Column '%-.64s' in %-.64s is ambiguous" + est "Väli '%-.64s' %-.64s-s ei ole ühene" + fre "Champ: '%-.64s' dans %-.64s est ambigu" + ger "Feld '%-.64s' in %-.64s ist nicht eindeutig" + greek "Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß" + hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu" + ita "Colonna: '%-.64s' di %-.64s e` ambigua" + jpn "Column: '%-.64s' in %-.64s is ambiguous" + kor "Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ" + nor "Felt: '%-.64s' i tabell %-.64s er ikke entydig" + norwegian-ny "Kolonne: '%-.64s' i tabell %-.64s er ikkje eintydig" + pol "Kolumna: '%-.64s' w %-.64s jest dwuznaczna" + por "Coluna '%-.64s' em '%-.64s' é ambígua" + rum "Coloana: '%-.64s' in %-.64s este ambigua" + rus "óÔÏÌÂÅà '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ" + serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu" + slo "Pole: '%-.64s' v %-.64s je nejasné" + spa "La columna: '%-.64s' en %-.64s es ambigua" + swe "Kolumn '%-.64s' i %-.64s är inte unik" + ukr "óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ" +ER_SERVER_SHUTDOWN 08S01 + cze "Prob-Bíhá ukonèování práce serveru" + dan "Database nedlukning er i gang" + nla "Bezig met het stoppen van de server" + eng "Server shutdown in progress" + jps "Server ‚ð shutdown ’†...", + est "Serveri seiskamine käib" + fre "Arrêt du serveur en cours" + ger "Der Server wird heruntergefahren" + greek "Åíáñîç äéáäéêáóßáò áðïóýíäåóçò ôïõ åîõðçñåôçôÞ (server shutdown)" + hun "A szerver leallitasa folyamatban" + ita "Shutdown del server in corso" + jpn "Server ¤ò shutdown Ãæ..." + kor "Server°¡ ¼Ë´Ù¿î ÁßÀÔ´Ï´Ù." + nor "Database nedkobling er i gang" + norwegian-ny "Tenar nedkopling er i gang" + pol "Trwa koñczenie dzia³ania serwera" + por "'Shutdown' do servidor em andamento" + rum "Terminarea serverului este in desfasurare" + rus "óÅÒ×ÅÒ ÎÁÈÏÄÉÔÓÑ × ÐÒÏÃÅÓÓÅ ÏÓÔÁÎÏ×ËÉ" + serbian "Gašenje servera je u toku" + slo "Prebieha ukonèovanie práce servera" + spa "Desconexion de servidor en proceso" + swe "Servern går nu ned" + ukr "úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ" +ER_BAD_FIELD_ERROR 42S22 S0022 + cze "Nezn-Bámý sloupec '%-.64s' v %-.64s" + dan "Ukendt kolonne '%-.64s' i tabel %-.64s" + nla "Onbekende kolom '%-.64s' in %-.64s" + eng "Unknown column '%-.64s' in '%-.64s'" + jps "'%-.64s' column ‚Í '%-.64s' ‚ɂ͂ ‚è‚Ü‚¹‚ñ.", + est "Tundmatu tulp '%-.64s' '%-.64s'-s" + fre "Champ '%-.64s' inconnu dans %-.64s" + ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s" + greek "Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'" + hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben" + ita "Colonna sconosciuta '%-.64s' in '%-.64s'" + jpn "'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó." + kor "Unknown Ä®·³ '%-.64s' in '%-.64s'" + nor "Ukjent kolonne '%-.64s' i tabell %-.64s" + norwegian-ny "Ukjent felt '%-.64s' i tabell %-.64s" + pol "Nieznana kolumna '%-.64s' w %-.64s" + por "Coluna '%-.64s' desconhecida em '%-.64s'" + rum "Coloana invalida '%-.64s' in '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'" + serbian "Nepoznata kolona '%-.64s' u '%-.64s'" + slo "Neznáme pole '%-.64s' v '%-.64s'" + spa "La columna '%-.64s' en %-.64s es desconocida" + swe "Okänd kolumn '%-.64s' i %-.64s" + ukr "îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'" +ER_WRONG_FIELD_WITH_GROUP 42000 S1009 + cze "Pou-B¾ité '%-.64s' nebylo v group by" + dan "Brugte '%-.64s' som ikke var i group by" + nla "Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt" + eng "'%-.64s' isn't in GROUP BY" + jps "'%-.64s' isn't in GROUP BY", + est "'%-.64s' puudub GROUP BY klauslis" + fre "'%-.64s' n'est pas dans 'group by'" + ger "'%-.64s' ist nicht in GROUP BY vorhanden" + greek "×ñçóéìïðïéÞèçêå '%-.64s' ðïõ äåí õðÞñ÷å óôï group by" + hun "Used '%-.64s' with wasn't in group by" + ita "Usato '%-.64s' che non e` nel GROUP BY" + kor "'%-.64s'Àº GROUP BY¼Ó¿¡ ¾øÀ½" + nor "Brukte '%-.64s' som ikke var i group by" + norwegian-ny "Brukte '%-.64s' som ikkje var i group by" + pol "U¿yto '%-.64s' bez umieszczenia w group by" + por "'%-.64s' não está em 'GROUP BY'" + rum "'%-.64s' nu exista in clauza GROUP BY" + rus "'%-.64s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY" + serbian "Entitet '%-.64s' nije naveden u komandi 'GROUP BY'" + slo "Pou¾ité '%-.64s' nebolo v 'group by'" + spa "Usado '%-.64s' el cual no esta group by" + swe "'%-.64s' finns inte i GROUP BY" + ukr "'%-.64s' ÎÅ ¤ Õ GROUP BY" +ER_WRONG_GROUP_FIELD 42000 S1009 + cze "Nemohu pou-B¾ít group na '%-.64s'" + dan "Kan ikke gruppere på '%-.64s'" + nla "Kan '%-.64s' niet groeperen" + eng "Can't group on '%-.64s'" + est "Ei saa grupeerida '%-.64s' järgi" + fre "Ne peut regrouper '%-.64s'" + ger "Gruppierung über '%-.64s' nicht möglich" + greek "Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.64s'" + hun "A group nem hasznalhato: '%-.64s'" + ita "Impossibile raggruppare per '%-.64s'" + kor "'%-.64s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½" + nor "Kan ikke gruppere på '%-.64s'" + norwegian-ny "Kan ikkje gruppere på '%-.64s'" + pol "Nie mo¿na grupowaæ po '%-.64s'" + por "Não pode agrupar em '%-.64s'" + rum "Nu pot sa grupez pe (group on) '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.64s'" + serbian "Ne mogu da grupišem po '%-.64s'" + slo "Nemô¾em pou¾i» 'group' na '%-.64s'" + spa "No puedo agrupar por '%-.64s'" + swe "Kan inte använda GROUP BY med '%-.64s'" + ukr "îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.64s'" +ER_WRONG_SUM_SELECT 42000 S1009 + cze "P-Bøíkaz obsahuje zároveò funkci sum a sloupce" + dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk" + nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht" + eng "Statement has sum functions and columns in same statement" + est "Lauses on korraga nii tulbad kui summeerimisfunktsioonid" + fre "Vous demandez la fonction sum() et des champs dans la même commande" + ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt" + greek "Ç äéáôýðùóç ðåñéÝ÷åé sum functions êáé columns óôçí ßäéá äéáôýðùóç" + ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY" + kor "Statement °¡ sum±â´ÉÀ» µ¿ÀÛÁßÀ̰í Ä®·³µµ µ¿ÀÏÇÑ statementÀÔ´Ï´Ù." + nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk" + norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk" + pol "Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu" + por "Cláusula contém funções de soma e colunas juntas" + rum "Comanda are functii suma si coloane in aceeasi comanda" + rus "÷ÙÒÁÖÅÎÉÅ ÓÏÄÅÒÖÉÔ ÇÒÕÐÐÏ×ÙÅ ÆÕÎËÃÉÉ É ÓÔÏÌÂÃÙ, ÎÏ ÎÅ ×ËÌÀÞÁÅÔ GROUP BY. á ËÁË ×Ù ÕÍÕÄÒÉÌÉÓØ ÐÏÌÕÞÉÔØ ÜÔÏ ÓÏÏÂÝÅÎÉÅ Ï ÏÛÉÂËÅ?" + serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme" + slo "Príkaz obsahuje zároveò funkciu 'sum' a poµa" + spa "El estamento tiene funciones de suma y columnas en el mismo estamento" + swe "Kommandot har både sum functions och enkla funktioner" + ukr "õ ×ÉÒÁÚ¦ ×ÉËÏÒÉÓÔÁÎÏ Ð¦ÄÓÕÍÏ×ÕÀÞ¦ ÆÕÎËæ§ ÐÏÒÑÄ Ú ¦ÍÅÎÁÍÉ ÓÔÏ×Âæ×" +ER_WRONG_VALUE_COUNT 21S01 + cze "Po-Bèet sloupcù neodpovídá zadané hodnotì" + dan "Kolonne tæller stemmer ikke med antallet af værdier" + nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes" + eng "Column count doesn't match value count" + est "Tulpade arv erineb väärtuste arvust" + ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte" + greek "Ôï Column count äåí ôáéñéÜæåé ìå ôï value count" + hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel" + ita "Il numero delle colonne non e` uguale al numero dei valori" + kor "Ä®·³ÀÇ Ä«¿îÆ®°¡ °ªÀÇ Ä«¿îÆ®¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù." + nor "Felt telling stemmer verdi telling" + norwegian-ny "Kolonne telling stemmer verdi telling" + pol "Liczba kolumn nie odpowiada liczbie warto?ci" + por "Contagem de colunas não confere com a contagem de valores" + rum "Numarul de coloane nu este acelasi cu numarul valoarei" + rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ" + serbian "Broj kolona ne odgovara broju vrednosti" + slo "Poèet polí nezodpovedá zadanej hodnote" + spa "La columna con count no tiene valores para contar" + swe "Antalet kolumner motsvarar inte antalet värden" + ukr "ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ" +ER_TOO_LONG_IDENT 42000 S1009 + cze "Jm-Béno identifikátoru '%-.100s' je pøíli¹ dlouhé" + dan "Navnet '%-.100s' er for langt" + nla "Naam voor herkenning '%-.100s' is te lang" + eng "Identifier name '%-.100s' is too long" + jps "Identifier name '%-.100s' ‚Í’·‚·‚¬‚Ü‚·", + est "Identifikaatori '%-.100s' nimi on liiga pikk" + fre "Le nom de l'identificateur '%-.100s' est trop long" + ger "Name des Bezeichners '%-.100s' ist zu lang" + greek "Ôï identifier name '%-.100s' åßíáé ðïëý ìåãÜëï" + hun "A(z) '%-.100s' azonositonev tul hosszu." + ita "Il nome dell'identificatore '%-.100s' e` troppo lungo" + jpn "Identifier name '%-.100s' ¤ÏŤ¹¤®¤Þ¤¹" + kor "Identifier '%-.100s'´Â ³Ê¹« ±æ±º¿ä." + nor "Identifikator '%-.100s' er for lang" + norwegian-ny "Identifikator '%-.100s' er for lang" + pol "Nazwa identyfikatora '%-.100s' jest zbyt d³uga" + por "Nome identificador '%-.100s' é longo demais" + rum "Numele indentificatorului '%-.100s' este prea lung" + rus "óÌÉÛËÏÍ ÄÌÉÎÎÙÊ ÉÄÅÎÔÉÆÉËÁÔÏÒ '%-.100s'" + serbian "Ime '%-.100s' je predugaèko" + slo "Meno identifikátora '%-.100s' je príli¹ dlhé" + spa "El nombre del identificador '%-.100s' es demasiado grande" + swe "Kolumnnamn '%-.100s' är för långt" + ukr "¶Í'Ñ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ" +ER_DUP_FIELDNAME 42S21 S1009 + cze "Zdvojen-Bé jméno sloupce '%-.64s'" + dan "Feltnavnet '%-.64s' findes allerede" + nla "Dubbele kolom naam '%-.64s'" + eng "Duplicate column name '%-.64s'" + jps "'%-.64s' ‚Æ‚¢‚¤ column –¼‚Íd•¡‚µ‚Ă܂·", + est "Kattuv tulba nimi '%-.64s'" + fre "Nom du champ '%-.64s' déjà utilisé" + ger "Doppelter Spaltenname: '%-.64s'" + greek "ÅðáíÜëçøç column name '%-.64s'" + hun "Duplikalt oszlopazonosito: '%-.64s'" + ita "Nome colonna duplicato '%-.64s'" + jpn "'%-.64s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹" + kor "Áߺ¹µÈ Ä®·³ À̸§: '%-.64s'" + nor "Feltnavnet '%-.64s' eksisterte fra før" + norwegian-ny "Feltnamnet '%-.64s' eksisterte frå før" + pol "Powtórzona nazwa kolumny '%-.64s'" + por "Nome da coluna '%-.64s' duplicado" + rum "Numele coloanei '%-.64s' e duplicat" + rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.64s'" + serbian "Duplirano ime kolone '%-.64s'" + slo "Opakované meno poµa '%-.64s'" + spa "Nombre de columna duplicado '%-.64s'" + swe "Kolumnnamn '%-.64s finns flera gånger" + ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.64s'" +ER_DUP_KEYNAME 42000 S1009 + cze "Zdvojen-Bé jméno klíèe '%-.64s'" + dan "Indeksnavnet '%-.64s' findes allerede" + nla "Dubbele zoeksleutel naam '%-.64s'" + eng "Duplicate key name '%-.64s'" + jps "'%-.64s' ‚Æ‚¢‚¤ key ‚Ì–¼‘O‚Íd•¡‚µ‚Ä‚¢‚Ü‚·", + est "Kattuv võtme nimi '%-.64s'" + fre "Nom de clef '%-.64s' déjà utilisé" + ger "Doppelter Name für Schlüssel vorhanden: '%-.64s'" + greek "ÅðáíÜëçøç key name '%-.64s'" + hun "Duplikalt kulcsazonosito: '%-.64s'" + ita "Nome chiave duplicato '%-.64s'" + jpn "'%-.64s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" + kor "Áߺ¹µÈ Ű À̸§ : '%-.64s'" + nor "Nøkkelnavnet '%-.64s' eksisterte fra før" + norwegian-ny "Nøkkelnamnet '%-.64s' eksisterte frå før" + pol "Powtórzony nazwa klucza '%-.64s'" + por "Nome da chave '%-.64s' duplicado" + rum "Numele cheiei '%-.64s' e duplicat" + rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.64s'" + serbian "Duplirano ime kljuèa '%-.64s'" + slo "Opakované meno kµúèa '%-.64s'" + spa "Nombre de clave duplicado '%-.64s'" + swe "Nyckelnamn '%-.64s' finns flera gånger" + ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'" +ER_DUP_ENTRY 23000 S1009 + cze "Zdvojen-Bý klíè '%-.64s' (èíslo klíèe %d)" + dan "Ens værdier '%-.64s' for indeks %d" + nla "Dubbele ingang '%-.64s' voor zoeksleutel %d" + eng "Duplicate entry '%-.64s' for key %d" + jps "'%-.64s' ‚Í key %d ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·", + est "Kattuv väärtus '%-.64s' võtmele %d" + fre "Duplicata du champ '%-.64s' pour la clef %d" + ger "Doppelter Eintrag '%-.64s' für Schlüssel %d" + greek "ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß %d" + hun "Duplikalt bejegyzes '%-.64s' a %d kulcs szerint." + ita "Valore duplicato '%-.64s' per la chiave %d" + jpn "'%-.64s' ¤Ï key %d ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" + kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key %d" + nor "Like verdier '%-.64s' for nøkkel %d" + norwegian-ny "Like verdiar '%-.64s' for nykkel %d" + pol "Powtórzone wyst?pienie '%-.64s' dla klucza %d" + por "Entrada '%-.64s' duplicada para a chave %d" + rum "Cimpul '%-.64s' e duplicat pentru cheia %d" + rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ %d" + serbian "Dupliran unos '%-.64s' za kljuè '%d'" + slo "Opakovaný kµúè '%-.64s' (èíslo kµúèa %d)" + spa "Entrada duplicada '%-.64s' para la clave %d" + swe "Dubbel nyckel '%-.64s' för nyckel %d" + ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ %d" +ER_WRONG_FIELD_SPEC 42000 S1009 + cze "Chybn-Bá specifikace sloupce '%-.64s'" + dan "Forkert kolonnespecifikaton for felt '%-.64s'" + nla "Verkeerde kolom specificatie voor kolom '%-.64s'" + eng "Incorrect column specifier for column '%-.64s'" + est "Vigane tulba kirjeldus tulbale '%-.64s'" + fre "Mauvais paramètre de champ pour le champ '%-.64s'" + ger "Falsche Spezifikation für Feld '%-.64s'" + greek "ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.64s'" + hun "Rossz oszlopazonosito: '%-.64s'" + ita "Specifica errata per la colonna '%-.64s'" + kor "Ä®·³ '%-.64s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ" + nor "Feil kolonne spesifikator for felt '%-.64s'" + norwegian-ny "Feil kolonne spesifikator for kolonne '%-.64s'" + pol "B³êdna specyfikacja kolumny dla kolumny '%-.64s'" + por "Especificador de coluna incorreto para a coluna '%-.64s'" + rum "Specificandul coloanei '%-.64s' este incorect" + rus "îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s'" + serbian "Pogrešan naziv kolone za kolonu '%-.64s'" + slo "Chyba v ¹pecifikácii poµa '%-.64s'" + spa "Especificador de columna erroneo para la columna '%-.64s'" + swe "Felaktigt kolumntyp för kolumn '%-.64s'" + ukr "îÅצÒÎÉÊ ÓÐÅÃÉÆ¦ËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'" +ER_PARSE_ERROR 42000 + cze "%s bl-Bízko '%-.80s' na øádku %d" + dan "%s nær '%-.80s' på linje %d" + nla "%s bij '%-.80s' in regel %d" + eng "%s near '%-.80s' at line %d" + jps "%s : '%-.80s' •t‹ß : %d s–Ú", + est "%s '%-.80s' ligidal real %d" + fre "%s près de '%-.80s' à la ligne %d" + ger "%s bei '%-.80s' in Zeile %d" + greek "%s ðëçóßïí '%-.80s' óôç ãñáììÞ %d" + hun "A %s a '%-.80s'-hez kozeli a %d sorban" + ita "%s vicino a '%-.80s' linea %d" + jpn "%s : '%-.80s' ÉÕ¶á : %d ¹ÔÌÜ" + kor "'%s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)" + nor "%s nær '%-.80s' på linje %d" + norwegian-ny "%s attmed '%-.80s' på line %d" + pol "%s obok '%-.80s' w linii %d" + por "%s próximo a '%-.80s' na linha %d" + rum "%s linga '%-.80s' pe linia %d" + rus "%s ÏËÏÌÏ '%-.80s' ÎÁ ÓÔÒÏËÅ %d" + serbian "'%s' u iskazu '%-.80s' na liniji %d" + slo "%s blízko '%-.80s' na riadku %d" + spa "%s cerca '%-.80s' en la linea %d" + swe "%s nära '%-.80s' på rad %d" + ukr "%s ¦ÌÑ '%-.80s' × ÓÔÒÏæ %d" +ER_EMPTY_QUERY 42000 + cze "V-Býsledek dotazu je prázdný" + dan "Forespørgsel var tom" + nla "Query was leeg" + eng "Query was empty" + jps "Query ‚ª‹ó‚Å‚·.", + est "Tühi päring" + fre "Query est vide" + ger "Leere Abfrage" + greek "Ôï åñþôçìá (query) ðïõ èÝóáôå Þôáí êåíü" + hun "Ures lekerdezes." + ita "La query e` vuota" + jpn "Query ¤¬¶õ¤Ç¤¹." + kor "Äõ¸®°á°ú°¡ ¾ø½À´Ï´Ù." + nor "Forespørsel var tom" + norwegian-ny "Førespurnad var tom" + pol "Zapytanie by³o puste" + por "Consulta (query) estava vazia" + rum "Query-ul a fost gol" + rus "úÁÐÒÏÓ ÏËÁÚÁÌÓÑ ÐÕÓÔÙÍ" + serbian "Upit je bio prazan" + slo "Výsledok po¾iadavky bol prázdny" + spa "La query estaba vacia" + swe "Frågan var tom" + ukr "ðÕÓÔÉÊ ÚÁÐÉÔ" +ER_NONUNIQ_TABLE 42000 S1009 + cze "Nejednozna-Bèná tabulka/alias: '%-.64s'" + dan "Tabellen/aliaset: '%-.64s' er ikke unikt" + nla "Niet unieke waarde tabel/alias: '%-.64s'" + eng "Not unique table/alias: '%-.64s'" + jps "'%-.64s' ‚͈êˆÓ‚Ì table/alias –¼‚ł͂ ‚è‚Ü‚¹‚ñ", + est "Ei ole unikaalne tabel/alias '%-.64s'" + fre "Table/alias: '%-.64s' non unique" + ger "Tabellenname/Alias '%-.64s' nicht eindeutig" + greek "Áäýíáôç ç áíåýñåóç unique table/alias: '%-.64s'" + hun "Nem egyedi tabla/alias: '%-.64s'" + ita "Tabella/alias non unico: '%-.64s'" + jpn "'%-.64s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó" + kor "Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.64s'" + nor "Ikke unikt tabell/alias: '%-.64s'" + norwegian-ny "Ikkje unikt tabell/alias: '%-.64s'" + pol "Tabela/alias nie s? unikalne: '%-.64s'" + por "Tabela/alias '%-.64s' não única" + rum "Tabela/alias: '%-.64s' nu este unic" + rus "ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.64s'" + serbian "Tabela ili alias nisu bili jedinstveni: '%-.64s'" + slo "Nie jednoznaèná tabuµka/alias: '%-.64s'" + spa "Tabla/alias: '%-.64s' es no unica" + swe "Icke unikt tabell/alias: '%-.64s'" + ukr "îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.64s'" +ER_INVALID_DEFAULT 42000 S1009 + cze "Chybn-Bá defaultní hodnota pro '%-.64s'" + dan "Ugyldig standardværdi for '%-.64s'" + nla "Foutieve standaard waarde voor '%-.64s'" + eng "Invalid default value for '%-.64s'" + est "Vigane vaikeväärtus '%-.64s' jaoks" + fre "Valeur par défaut invalide pour '%-.64s'" + ger "Fehlerhafter Vorgabewert (DEFAULT) für '%-.64s'" + greek "ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.64s'" + hun "Ervenytelen ertek: '%-.64s'" + ita "Valore di default non valido per '%-.64s'" + kor "'%-.64s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù." + nor "Ugyldig standardverdi for '%-.64s'" + norwegian-ny "Ugyldig standardverdi for '%-.64s'" + pol "Niew³a?ciwa warto?æ domy?lna dla '%-.64s'" + por "Valor padrão (default) inválido para '%-.64s'" + rum "Valoarea de default este invalida pentru '%-.64s'" + rus "îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.64s'" + serbian "Loša default vrednost za '%-.64s'" + slo "Chybná implicitná hodnota pre '%-.64s'" + spa "Valor por defecto invalido para '%-.64s'" + swe "Ogiltigt DEFAULT värde för '%-.64s'" + ukr "îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.64s'" +ER_MULTIPLE_PRI_KEY 42000 S1009 + cze "Definov-Báno více primárních klíèù" + dan "Flere primærnøgler specificeret" + nla "Meerdere primaire zoeksleutels gedefinieerd" + eng "Multiple primary key defined" + jps "•¡”‚Ì primary key ‚ª’è‹`‚³‚ê‚Ü‚µ‚½", + est "Mitut primaarset võtit ei saa olla" + fre "Plusieurs clefs primaires définies" + ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert" + greek "Ðåñéóóüôåñá áðü Ýíá primary key ïñßóôçêáí" + hun "Tobbszoros elsodleges kulcs definialas." + ita "Definite piu` chiave primarie" + jpn "Ê£¿ô¤Î primary key ¤¬ÄêµÁ¤µ¤ì¤Þ¤·¤¿" + kor "Multiple primary key°¡ Á¤ÀǵǾî ÀÖ½¿" + nor "Fleire primærnøkle spesifisert" + norwegian-ny "Fleire primærnyklar spesifisert" + pol "Zdefiniowano wiele kluczy podstawowych" + por "Definida mais de uma chave primária" + rum "Chei primare definite de mai multe ori" + rus "õËÁÚÁÎÏ ÎÅÓËÏÌØËÏ ÐÅÒ×ÉÞÎÙÈ ËÌÀÞÅÊ" + serbian "Definisani višestruki primarni kljuèevi" + slo "Zadefinovaných viac primárnych kµúèov" + spa "Multiples claves primarias definidas" + swe "Flera PRIMARY KEY använda" + ukr "ðÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ ×ÉÚÎÁÞÅÎÏ ÎÅÏÄÎÏÒÁÚÏ×Ï" +ER_TOO_MANY_KEYS 42000 S1009 + cze "Zad-Báno pøíli¹ mnoho klíèù, je povoleno nejvíce %d klíèù" + dan "For mange nøgler specificeret. Kun %d nøgler må bruges" + nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan" + eng "Too many keys specified; max %d keys allowed" + jps "key ‚ÌŽw’肪‘½‚·‚¬‚Ü‚·. key ‚ÍÅ‘å %d ‚܂łł·", + est "Liiga palju võtmeid. Maksimaalselt võib olla %d võtit" + fre "Trop de clefs sont définies. Maximum de %d clefs alloué" + ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt" + greek "ÐÜñá ðïëëÜ key ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé" + hun "Tul sok kulcs. Maximum %d kulcs engedelyezett." + ita "Troppe chiavi. Sono ammesse max %d chiavi" + jpn "key ¤Î»ØÄ꤬¿¤¹¤®¤Þ¤¹. key ¤ÏºÇÂç %d ¤Þ¤Ç¤Ç¤¹" + kor "³Ê¹« ¸¹Àº ۰¡ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %dÀÇ Å°°¡ °¡´ÉÇÔ" + nor "For mange nøkler spesifisert. Maks %d nøkler tillatt" + norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt" + pol "Okre?lono zbyt wiele kluczy. Dostêpnych jest maksymalnie %d kluczy" + por "Especificadas chaves demais. O máximo permitido são %d chaves" + rum "Prea multe chei. Numarul de chei maxim este %d" + rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ËÌÀÞÅÊ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ËÌÀÞÅÊ" + serbian "Navedeno je previše kljuèeva. Maksimum %d kljuèeva je dozvoljeno" + slo "Zadaných ríli¹ veµa kµúèov. Najviac %d kµúèov je povolených" + spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas" + swe "För många nycklar använda. Man får ha högst %d nycklar" + ukr "úÁÂÁÇÁÔÏ ËÌÀÞ¦× ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ËÌÀÞ¦×" +ER_TOO_MANY_KEY_PARTS 42000 S1009 + cze "Zad-Báno pøíli¹ mnoho èást klíèù, je povoleno nejvíce %d èástí" + dan "For mange nøgledele specificeret. Kun %d dele må bruges" + nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan" + eng "Too many key parts specified; max %d parts allowed" + est "Võti koosneb liiga paljudest osadest. Maksimaalselt võib olla %d osa" + fre "Trop de parties specifiées dans la clef. Maximum de %d parties" + ger "Zu viele Teilschlüssel definiert. Maximal %d Teilschlüssel erlaubt" + greek "ÐÜñá ðïëëÜ key parts ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé" + hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett" + ita "Troppe parti di chiave specificate. Sono ammesse max %d parti" + kor "³Ê¹« ¸¹Àº Ű ºÎºÐ(parts)µéÀÌ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %d ºÎºÐÀÌ °¡´ÉÇÔ" + nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt" + norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt" + pol "Okre?lono zbyt wiele czê?ci klucza. Dostêpnych jest maksymalnie %d czê?ci" + por "Especificadas partes de chave demais. O máximo permitido são %d partes" + rum "Prea multe chei. Numarul de chei maxim este %d" + rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÞÁÓÔÅÊ ÓÏÓÔÁ×ÎÏÇÏ ËÌÀÞÁ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ÞÁÓÔÅÊ" + serbian "Navedeno je previše delova kljuèa. Maksimum %d delova je dozvoljeno" + slo "Zadaných ríli¹ veµa èastí kµúèov. Je povolených najviac %d èastí" + spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas" + swe "För många nyckeldelar använda. Man får ha högst %d nyckeldelar" + ukr "úÁÂÁÇÁÔÏ ÞÁÓÔÉÎ ËÌÀÞÁ ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ÞÁÓÔÉÎ" +ER_TOO_LONG_KEY 42000 S1009 + cze "Zadan-Bý klíè byl pøíli¹ dlouhý, nejvìt¹í délka klíèe je %d" + dan "Specificeret nøgle var for lang. Maksimal nøglelængde er %d" + nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d" + eng "Specified key was too long; max key length is %d bytes" + jps "key ‚ª’·‚·‚¬‚Ü‚·. key ‚Ì’·‚³‚ÍÅ‘å %d ‚Å‚·", + est "Võti on liiga pikk. Maksimaalne võtmepikkus on %d" + fre "La clé est trop longue. Longueur maximale: %d" + ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d" + greek "Ôï êëåéäß ðïõ ïñßóèçêå åßíáé ðïëý ìåãÜëï. Ôï ìÝãéóôï ìÞêïò åßíáé %d" + hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d" + ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d" + jpn "key ¤¬Ä¹¤¹¤®¤Þ¤¹. key ¤ÎŤµ¤ÏºÇÂç %d ¤Ç¤¹" + kor "Á¤ÀÇµÈ Å°°¡ ³Ê¹« ±é´Ï´Ù. ÃÖ´ë ŰÀÇ ±æÀÌ´Â %dÀÔ´Ï´Ù." + nor "Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d" + norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d" + pol "Zdefinowany klucz jest zbyt d³ugi. Maksymaln? d³ugo?ci? klucza jest %d" + por "Chave especificada longa demais. O comprimento de chave máximo permitido é %d" + rum "Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d" + rus "õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d ÂÁÊÔ" + serbian "Navedeni kljuè je predug. Maksimalna dužina kljuèa je %d" + slo "Zadaný kµúè je príli¹ dlhý, najväè¹ia då¾ka kµúèa je %d" + spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d" + swe "För lång nyckel. Högsta tillåtna nyckellängd är %d" + ukr "úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁÊÂ¦ÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×" +ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 + cze "Kl-Bíèový sloupec '%-.64s' v tabulce neexistuje" + dan "Nøglefeltet '%-.64s' eksisterer ikke i tabellen" + nla "Zoeksleutel kolom '%-.64s' bestaat niet in tabel" + eng "Key column '%-.64s' doesn't exist in table" + jps "Key column '%-.64s' ‚ªƒe[ƒuƒ‹‚É‚ ‚è‚Ü‚¹‚ñ.", + est "Võtme tulp '%-.64s' puudub tabelis" + fre "La clé '%-.64s' n'existe pas dans la table" + ger "In der Tabelle gibt es kein Schlüsselfeld '%-.64s'" + greek "Ôï ðåäßï êëåéäß '%-.64s' äåí õðÜñ÷åé óôïí ðßíáêá" + hun "A(z) '%-.64s'kulcsoszlop nem letezik a tablaban" + ita "La colonna chiave '%-.64s' non esiste nella tabella" + jpn "Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó." + kor "Key Ä®·³ '%-.64s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Nøkkel felt '%-.64s' eksiterer ikke i tabellen" + norwegian-ny "Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen" + pol "Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli" + por "Coluna chave '%-.64s' não existe na tabela" + rum "Coloana cheie '%-.64s' nu exista in tabela" + rus "ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Kljuèna kolona '%-.64s' ne postoji u tabeli" + slo "Kµúèový ståpec '%-.64s' v tabuµke neexistuje" + spa "La columna clave '%-.64s' no existe en la tabla" + swe "Nyckelkolumn '%-.64s' finns inte" + ukr "ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ" +ER_BLOB_USED_AS_KEY 42000 S1009 + cze "Blob sloupec '%-.64s' nem-Bù¾e být pou¾it jako klíè" + dan "BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks" + nla "BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie" + eng "BLOB column '%-.64s' can't be used in key specification with the used table type" + est "BLOB-tüüpi tulpa '%-.64s' ei saa kasutada võtmena" + fre "Champ BLOB '%-.64s' ne peut être utilisé dans une clé" + ger "BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden" + greek "Ðåäßï ôýðïõ Blob '%-.64s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)" + hun "Blob objektum '%-.64s' nem hasznalhato kulcskent" + ita "La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave" + kor "BLOB Ä®·³ '%-.64s'´Â Ű Á¤ÀÇ¿¡¼ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù." + nor "Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nøkler" + norwegian-ny "Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar" + pol "Kolumna typu Blob '%-.64s' nie mo¿e byæ u¿yta w specyfikacji klucza" + por "Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado" + rum "Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit" + rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ" + serbian "BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi" + slo "Blob pole '%-.64s' nemô¾e by» pou¾ité ako kµúè" + spa "La columna Blob '%-.64s' no puede ser usada en una declaracion de clave" + swe "En BLOB '%-.64s' kan inte vara nyckel med den använda tabelltypen" + ukr "BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ" +ER_TOO_BIG_FIELDLENGTH 42000 S1009 + cze "P-Bøíli¹ velká délka sloupce '%-.64s' (nejvíce %d). Pou¾ijte BLOB" + dan "For stor feltlængde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet" + nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB" + eng "Column length too big for column '%-.64s' (max = %d); use BLOB or TEXT instead" + jps "column '%-.64s' ‚Í,Šm•Û‚·‚é column ‚̑傫‚³‚ª‘½‚·‚¬‚Ü‚·. (Å‘å %d ‚Ü‚Å). BLOB ‚ð‚©‚í‚è‚ÉŽg—p‚µ‚Ä‚‚¾‚³‚¢.", + est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi" + fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB" + ger "Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB- oder TEXT-Spaltentyp verwenden!" + greek "Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.64s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB" + hun "A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb." + ita "La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB." + jpn "column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤." + kor "Ä®·³ '%-.64s'ÀÇ Ä®·³ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä." + nor "For stor nøkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor" + norwegian-ny "For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor" + pol "Zbyt du¿a d³ugo?æ kolumny '%-.64s' (maks. = %d). W zamian u¿yj typu BLOB" + por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar" + rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine" + rus "óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ÉÌÉ TEXT ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ" + serbian "Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje" + slo "Príli¹ veµká då¾ka pre pole '%-.64s' (maximum = %d). Pou¾ite BLOB" + spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar" + swe "För stor kolumnlängd angiven för '%-.64s' (max= %d). Använd en BLOB instället" + ukr "úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB" +ER_WRONG_AUTO_KEY 42000 S1009 + cze "M-Bù¾ete mít pouze jedno AUTO pole a to musí být definováno jako klíè" + dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret" + nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd." + eng "Incorrect table definition; there can be only one auto column and it must be defined as a key" + jps "ƒe[ƒuƒ‹‚Ì’è‹`‚ªˆá‚¢‚Ü‚·; there can be only one auto column and it must be defined as a key", + est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena" + fre "Un seul champ automatique est permis et il doit être indexé" + ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden" + greek "Ìðïñåß íá õðÜñ÷åé ìüíï Ýíá auto field êáé ðñÝðåé íá Ý÷åé ïñéóèåß óáí key" + hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni." + ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave" + jpn "¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; there can be only one auto column and it must be defined as a key" + kor "ºÎÁ¤È®ÇÑ Å×À̺í Á¤ÀÇ; Å×À̺íÀº ÇϳªÀÇ auto Ä®·³ÀÌ Á¸ÀçÇϰí Ű·Î Á¤ÀǵǾîÁ®¾ß ÇÕ´Ï´Ù." + nor "Bare ett auto felt kan være definert som nøkkel." + norwegian-ny "Bare eitt auto felt kan være definert som nøkkel." + pol "W tabeli mo¿e byæ tylko jedno pole auto i musi ono byæ zdefiniowane jako klucz" + por "Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave" + rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie" + rus "îÅËÏÒÒÅËÔÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ: ÍÏÖÅÔ ÓÕÝÅÓÔ×Ï×ÁÔØ ÔÏÌØËÏ ÏÄÉÎ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÊ ÓÔÏÌÂÅÃ, É ÏÎ ÄÏÌÖÅÎ ÂÙÔØ ÏÐÒÅÄÅÌÅÎ ËÁË ËÌÀÞ" + serbian "Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuèa" + slo "Mô¾ete ma» iba jedno AUTO pole a to musí by» definované ako kµúè" + spa "Puede ser solamente un campo automatico y este debe ser definido como una clave" + swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel" + ukr "îÅצÒÎÅ ×ÉÚÎÁÞÅÎÎÑ ÔÁÂÌÉæ; íÏÖÅ ÂÕÔÉ ÌÉÛÅ ÏÄÉÎ Á×ÔÏÍÁÔÉÞÎÉÊ ÓÔÏ×ÂÅÃØ, ÝÏ ÐÏ×ÉÎÅÎ ÂÕÔÉ ×ÉÚÎÁÞÅÎÉÊ ÑË ËÌÀÞ" +ER_READY + cze "%s: p-Bøipraven na spojení\nVersion: '%s' socket: '%s' port: %d" + dan "%s: klar til tilslutninger\nVersion: '%s' socket: '%s' port: %d" + nla "%s: klaar voor verbindingen\nVersion: '%s' socket: '%s' port: %d" + eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d" + jps "%s: €”õŠ®—¹\nVersion: '%s' socket: '%s' port: %d", + est "%s: ootab ühendusi\nVersion: '%s' socket: '%s' port: %d" + fre "%s: Prêt pour des connections\nVersion: '%s' socket: '%s' port: %d" + ger "%s: Bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d" + greek "%s: óå áíáìïíÞ óõíäÝóåùí\nVersion: '%s' socket: '%s' port: %d" + hun "%s: kapcsolatra kesz\nVersion: '%s' socket: '%s' port: %d" + ita "%s: Pronto per le connessioni\nVersion: '%s' socket: '%s' port: %d" + jpn "%s: ½àÈ÷´°Î»\nVersion: '%s' socket: '%s' port: %d" + kor "%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù\nVersion: '%s' socket: '%s' port: %d" + nor "%s: klar for tilkoblinger\nVersion: '%s' socket: '%s' port: %d" + norwegian-ny "%s: klar for tilkoblingar\nVersion: '%s' socket: '%s' port: %d" + pol "%s: gotowe do po³?czenia\nVersion: '%s' socket: '%s' port: %d" + por "%s: Pronto para conexões\nVersion: '%s' socket: '%s' port: %d" + rum "%s: sint gata pentru conectii\nVersion: '%s' socket: '%s' port: %d" + rus "%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d" + serbian "%s: Spreman za konekcije\nVersion: '%s' socket: '%s' port: %d" + slo "%s: pripravený na spojenie\nVersion: '%s' socket: '%s' port: %d" + spa "%s: preparado para conexiones\nVersion: '%s' socket: '%s' port: %d" + swe "%s: klar att ta emot klienter\nVersion: '%s' socket: '%s' port: %d" + ukr "%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!\nVersion: '%s' socket: '%s' port: %d" +ER_NORMAL_SHUTDOWN + cze "%s: norm-Bální ukonèení\n" + dan "%s: Normal nedlukning\n" + nla "%s: Normaal afgesloten \n" + eng "%s: Normal shutdown\n" + est "%s: MySQL lõpetas\n" + fre "%s: Arrêt normal du serveur\n" + ger "%s: Normal heruntergefahren\n" + greek "%s: ÖõóéïëïãéêÞ äéáäéêáóßá shutdown\n" + hun "%s: Normal leallitas\n" + ita "%s: Shutdown normale\n" + kor "%s: Á¤»óÀûÀÎ shutdown\n" + nor "%s: Normal avslutning\n" + norwegian-ny "%s: Normal nedkopling\n" + pol "%s: Standardowe zakoñczenie dzia³ania\n" + por "%s: 'Shutdown' normal\n" + rum "%s: Terminare normala\n" + rus "%s: ëÏÒÒÅËÔÎÁÑ ÏÓÔÁÎÏ×ËÁ\n" + serbian "%s: Normalno gašenje\n" + slo "%s: normálne ukonèenie\n" + spa "%s: Apagado normal\n" + swe "%s: Normal avslutning\n" + ukr "%s: îÏÒÍÁÌØÎÅ ÚÁ×ÅÒÛÅÎÎÑ\n" +ER_GOT_SIGNAL + cze "%s: p-Bøijat signal %d, konèím\n" + dan "%s: Fangede signal %d. Afslutter!!\n" + nla "%s: Signaal %d. Systeem breekt af!\n" + eng "%s: Got signal %d. Aborting!\n" + jps "%s: Got signal %d. ’†’f!\n", + est "%s: sain signaali %d. Lõpetan!\n" + fre "%s: Reçu le signal %d. Abandonne!\n" + ger "%s: Signal %d erhalten. Abbruch!\n" + greek "%s: ÅëÞöèç ôï ìÞíõìá %d. Ç äéáäéêáóßá åãêáôáëåßðåôáé!\n" + hun "%s: %d jelzes. Megszakitva!\n" + ita "%s: Ricevuto segnale %d. Interruzione!\n" + jpn "%s: Got signal %d. ÃæÃÇ!\n" + kor "%s: %d ½ÅÈ£°¡ µé¾î¿ÔÀ½. ÁßÁö!\n" + nor "%s: Oppdaget signal %d. Avslutter!\n" + norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n" + pol "%s: Otrzymano sygna³ %d. Koñczenie dzia³ania!\n" + por "%s: Obteve sinal %d. Abortando!\n" + rum "%s: Semnal %d obtinut. Aborting!\n" + rus "%s: ðÏÌÕÞÅÎ ÓÉÇÎÁÌ %d. ðÒÅËÒÁÝÁÅÍ!\n" + serbian "%s: Dobio signal %d. Prekidam!\n" + slo "%s: prijatý signál %d, ukonèenie (Abort)!\n" + spa "%s: Recibiendo signal %d. Abortando!\n" + swe "%s: Fick signal %d. Avslutar!\n" + ukr "%s: ïÔÒÉÍÁÎÏ ÓÉÇÎÁÌ %d. ðÅÒÅÒÉ×ÁÀÓØ!\n" +ER_SHUTDOWN_COMPLETE + cze "%s: ukon-Bèení práce hotovo\n" + dan "%s: Server lukket\n" + nla "%s: Afsluiten afgerond\n" + eng "%s: Shutdown complete\n" + jps "%s: Shutdown Š®—¹\n", + est "%s: Lõpp\n" + fre "%s: Arrêt du serveur terminé\n" + ger "%s: Herunterfahren beendet\n" + greek "%s: Ç äéáäéêáóßá Shutdown ïëïêëçñþèçêå\n" + hun "%s: A leallitas kesz\n" + ita "%s: Shutdown completato\n" + jpn "%s: Shutdown ´°Î»\n" + kor "%s: Shutdown ÀÌ ¿Ï·áµÊ!\n" + nor "%s: Avslutning komplett\n" + norwegian-ny "%s: Nedkopling komplett\n" + pol "%s: Zakoñczenie dzia³ania wykonane\n" + por "%s: 'Shutdown' completo\n" + rum "%s: Terminare completa\n" + rus "%s: ïÓÔÁÎÏ×ËÁ ÚÁ×ÅÒÛÅÎÁ\n" + serbian "%s: Gašenje završeno\n" + slo "%s: práca ukonèená\n" + spa "%s: Apagado completado\n" + swe "%s: Avslutning klar\n" + ukr "%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n" +ER_FORCING_CLOSE 08S01 + cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.32s'\n" + dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.32s'\n" + nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.32s'\n" + eng "%s: Forcing close of thread %ld user: '%-.32s'\n" + jps "%s: ƒXƒŒƒbƒh %ld ‹§I—¹ user: '%-.32s'\n", + est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n" + fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.32s'\n" + ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n" + greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.32s'\n" + hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.32s'\n" + ita "%s: Forzata la chiusura del thread %ld utente: '%-.32s'\n" + jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.32s'\n" + kor "%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.32s'\n" + nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.32s'\n" + norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.32s'\n" + pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.32s'\n" + por "%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n" + rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n" + rus "%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n" + serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n" + slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.32s'\n" + spa "%s: Forzando a cerrar el thread %ld usuario: '%-.32s'\n" + swe "%s: Stänger av tråd %ld; användare: '%-.32s'\n" + ukr "%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n" +ER_IPSOCK_ERROR 08S01 + cze "Nemohu vytvo-Bøit IP socket" + dan "Kan ikke oprette IP socket" + nla "Kan IP-socket niet openen" + eng "Can't create IP socket" + jps "IP socket ‚ªì‚ê‚Ü‚¹‚ñ", + est "Ei suuda luua IP socketit" + fre "Ne peut créer la connection IP (socket)" + ger "Kann IP-Socket nicht erzeugen" + greek "Äåí åßíáé äõíáôÞ ç äçìéïõñãßá IP socket" + hun "Az IP socket nem hozhato letre" + ita "Impossibile creare il socket IP" + jpn "IP socket ¤¬ºî¤ì¤Þ¤»¤ó" + kor "IP ¼ÒÄÏÀ» ¸¸µéÁö ¸øÇß½À´Ï´Ù." + nor "Kan ikke opprette IP socket" + norwegian-ny "Kan ikkje opprette IP socket" + pol "Nie mo¿na stworzyæ socket'u IP" + por "Não pode criar o soquete IP" + rum "Nu pot crea IP socket" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ IP-ÓÏËÅÔ" + serbian "Ne mogu da kreiram IP socket" + slo "Nemô¾em vytvori» IP socket" + spa "No puedo crear IP socket" + swe "Kan inte skapa IP-socket" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ IP ÒÏÚ'¤Í" +ER_NO_SUCH_INDEX 42S12 S1009 + cze "Tabulka '%-.64s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu" + dan "Tabellen '%-.64s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen" + nla "Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw" + eng "Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table" + jps "Table '%-.64s' ‚Í‚»‚̂悤‚È index ‚ðŽ‚Á‚Ä‚¢‚Ü‚¹‚ñ(CREATE INDEX ŽÀsŽž‚ÉŽw’肳‚ê‚Ä‚¢‚Ü‚¹‚ñ). ƒe[ƒuƒ‹‚ðì‚è’¼‚µ‚Ä‚‚¾‚³‚¢", + est "Tabelil '%-.64s' puuduvad võtmed. Loo tabel uuesti" + fre "La table '%-.64s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table" + ger "Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen" + greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá" + hun "A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" + ita "La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" + jpn "Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤" + kor "Å×À̺í '%-.64s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä..." + nor "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen" + norwegian-ny "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt" + pol "Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê" + por "Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela" + rum "Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela" + rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï" + serbian "Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo" + slo "Tabuµka '%-.64s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova" + spa "La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla" + swe "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" + ukr "ôÁÂÌÉÃÑ '%-.64s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ" +ER_WRONG_FIELD_TERMINATORS 42000 S1009 + cze "Argument separ-Bátoru polo¾ek nebyl oèekáván. Pøeètìte si manuál" + dan "Felt adskiller er ikke som forventet, se dokumentationen" + nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding" + eng "Field separator argument is not what is expected; check the manual" + est "Väljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga" + fre "Séparateur de champs inconnu. Vérifiez dans le manuel" + ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen" + greek "Ï äéá÷ùñéóôÞò ðåäßùí äåí åßíáé áõôüò ðïõ áíáìåíüôáí. Ðáñáêáëþ áíáôñÝîôå óôï manual" + hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!" + ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale" + kor "ÇÊµå ±¸ºÐÀÚ ÀμöµéÀÌ ¿ÏÀüÇÏÁö ¾Ê½À´Ï´Ù. ¸Þ´º¾óÀ» ã¾Æ º¸¼¼¿ä." + nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen" + norwegian-ny "Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen" + pol "Nie oczekiwano separatora. Sprawd¥ podrêcznik" + por "Argumento separador de campos não é o esperado. Cheque o manual" + rum "Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul" + rus "áÒÇÕÍÅÎÔ ÒÁÚÄÅÌÉÔÅÌÑ ÐÏÌÅÊ - ÎÅ ÔÏÔ, ËÏÔÏÒÙÊ ÏÖÉÄÁÌÓÑ. ïÂÒÁÝÁÊÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ" + serbian "Argument separatora polja nije ono što se oèekivalo. Proverite uputstvo MySQL server-a" + slo "Argument oddeµovaè polí nezodpovedá po¾iadavkám. Skontrolujte v manuáli" + spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual" + swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen" + ukr "èÉÂÎÉÊ ÒÏÚĦÌÀ×ÁÞ ÐÏ̦×. ðÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ" +ER_BLOBS_AND_NO_TERMINATED 42000 S1009 + cze "Nen-Bí mo¾né pou¾ít pevný rowlength s BLOBem. Pou¾ijte 'fields terminated by'." + dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'." + nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'." + eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'" + est "BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang." + fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'." + ger "Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden" + greek "Äåí ìðïñåßôå íá ÷ñçóéìïðïéÞóåôå fixed rowlength óå BLOBs. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'fields terminated by'." + hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ." + ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'." + jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'." + kor "BLOB·Î´Â °íÁ¤±æÀÌÀÇ lowlength¸¦ »ç¿ëÇÒ ¼ö ¾ø½À´Ï´Ù. 'fields terminated by'¸¦ »ç¿ëÇϼ¼¿ä." + nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." + norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." + pol "Nie mo¿na u¿yæ sta³ej d³ugo?ci wiersza z polami typu BLOB. U¿yj 'fields terminated by'." + por "Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado." + rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'." + rus "æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ, ÐÒÉÍÅÎÑÊÔÅ 'fields terminated by'" + serbian "Ne možete koristiti fiksnu velièinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju." + slo "Nie je mo¾né pou¾i» fixnú då¾ku s BLOBom. Pou¾ite 'fields terminated by'." + spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '." + swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'" + ukr "îÅ ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÓÔÁÌÕ ÄÏ×ÖÉÎÕ ÓÔÒÏËÉ Ú BLOB. úËÏÒÉÓÔÁÊÔÅÓÑ 'fields terminated by'" +ER_TEXTFILE_NOT_READABLE + cze "Soubor '%-.128s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny" + dan "Filen '%-.128s' skal være i database-folderen og kunne læses af alle" + nla "Het bestand '%-.128s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn." + eng "The file '%-.128s' must be in the database directory or be readable by all" + jps "ƒtƒ@ƒCƒ‹ '%-.128s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚æ‚¤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", + est "Fail '%-.128s' peab asuma andmebaasi kataloogis või olema kõigile loetav" + fre "Le fichier '%-.128s' doit être dans le répertoire de la base et lisible par tous" + ger "Datei '%-.128s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein" + greek "Ôï áñ÷åßï '%-.128s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò" + hun "A(z) '%-.128s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" + ita "Il file '%-.128s' deve essere nella directory del database e deve essere leggibile da tutti" + jpn "¥Õ¥¡¥¤¥ë '%-.128s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬ÆÉ¤á¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó." + kor "'%-.128s' ÈÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù." + nor "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle" + norwegian-ny "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle" + pol "Plik '%-.128s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich" + por "Arquivo '%-.128s' tem que estar no diretório do banco de dados ou ter leitura possível para todos" + rum "Fisierul '%-.128s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)" + rus "æÁÊÌ '%-.128s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ" + serbian "File '%-.128s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa" + slo "Súbor '%-.128s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých" + spa "El archivo '%-.128s' debe estar en el directorio de la base de datos o ser de lectura por todos" + swe "Textfilen '%-.128s' måste finnas i databasbiblioteket eller vara läsbar för alla" + ukr "æÁÊÌ '%-.128s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È" +ER_FILE_EXISTS_ERROR + cze "Soubor '%-.200s' ji-B¾ existuje" + dan "Filen '%-.200s' eksisterer allerede" + nla "Het bestand '%-.200s' bestaat reeds" + eng "File '%-.200s' already exists" + jps "File '%-.200s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·", + est "Fail '%-.200s' juba eksisteerib" + fre "Le fichier '%-.200s' existe déjà" + ger "Datei '%-.200s' bereits vorhanden" + greek "Ôï áñ÷åßï '%-.200s' õðÜñ÷åé Þäç" + hun "A '%-.200s' file mar letezik." + ita "Il file '%-.200s' esiste gia`" + jpn "File '%-.200s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹" + kor "'%-.200s' ÈÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." + nor "Filen '%-.200s' eksisterte allerede" + norwegian-ny "Filen '%-.200s' eksisterte allereide" + pol "Plik '%-.200s' ju¿ istnieje" + por "Arquivo '%-.200s' já existe" + rum "Fisierul '%-.200s' exista deja" + rus "æÁÊÌ '%-.200s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "File '%-.200s' veæ postoji" + slo "Súbor '%-.200s' u¾ existuje" + spa "El archivo '%-.200s' ya existe" + swe "Filen '%-.200s' existerar redan" + ukr "æÁÊÌ '%-.200s' ×ÖÅ ¦ÓÎÕ¤" +ER_LOAD_INFO + cze "Z-Báznamù: %ld Vymazáno: %ld Pøeskoèeno: %ld Varování: %ld" + dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld" + nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld" + eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld" + jps "ƒŒƒR[ƒh”: %ld íœ: %ld Skipped: %ld Warnings: %ld", + est "Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld" + fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld" + ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld" + greek "ÅããñáöÝò: %ld ÄéáãñáöÝò: %ld ÐáñåêÜìöèçóáí: %ld ÐñïåéäïðïéÞóåéò: %ld" + hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld" + ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld" + jpn "¥ì¥³¡¼¥É¿ô: %ld ºï½ü: %ld Skipped: %ld Warnings: %ld" + kor "·¹ÄÚµå: %ld°³ »èÁ¦: %ld°³ ½ºÅµ: %ld°³ °æ°í: %ld°³" + nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld" + norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld Åtvaringar: %ld" + pol "Recordów: %ld Usuniêtych: %ld Pominiêtych: %ld Ostrze¿eñ: %ld" + por "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld" + rum "Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld" + rus "úÁÐÉÓÅÊ: %ld õÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld" + serbian "Slogova: %ld Izbrisano: %ld Preskoèeno: %ld Upozorenja: %ld" + slo "Záznamov: %ld Zmazaných: %ld Preskoèených: %ld Varovania: %ld" + spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld" + swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld" + ukr "úÁÐÉÓ¦×: %ld ÷ÉÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld" +ER_ALTER_INFO + cze "Z-Báznamù: %ld Zdvojených: %ld" + dan "Poster: %ld Ens: %ld" + nla "Records: %ld Dubbel: %ld" + eng "Records: %ld Duplicates: %ld" + jps "ƒŒƒR[ƒh”: %ld d•¡: %ld", + est "Kirjeid: %ld Kattuvaid: %ld" + fre "Enregistrements: %ld Doublons: %ld" + ger "Datensätze: %ld Duplikate: %ld" + greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld" + hun "Rekordok: %ld Duplikalva: %ld" + ita "Records: %ld Duplicati: %ld" + jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£: %ld" + kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³" + nor "Poster: %ld Like: %ld" + norwegian-ny "Poster: %ld Like: %ld" + pol "Rekordów: %ld Duplikatów: %ld" + por "Registros: %ld - Duplicados: %ld" + rum "Recorduri: %ld Duplicate: %ld" + rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld" + serbian "Slogova: %ld Duplikata: %ld" + slo "Záznamov: %ld Opakovaných: %ld" + spa "Registros: %ld Duplicados: %ld" + swe "Rader: %ld Dubletter: %ld" + ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld" +ER_WRONG_SUB_KEY + cze "Chybn-Bá podèást klíèe -- není to øetìzec nebo je del¹í ne¾ délka èásti klíèe" + dan "Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden" + nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel" + eng "Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys" + est "Vigane võtme osa. Kasutatud võtmeosa ei ole string tüüpi, määratud pikkus on pikem kui võtmeosa või tabelihandler ei toeta seda tüüpi võtmeid" + fre "Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dépasse celle définie dans la clef" + ger "Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder die Speicher-Engine unterstützt keine Unterteilschlüssel" + greek "ÅóöáëìÝíï sub part key. Ôï ÷ñçóéìïðïéïýìåíï key part äåí åßíáé string Þ ôï ìÞêïò ôïõ åßíáé ìåãáëýôåñï" + hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz" + ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave." + jpn "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" + kor "ºÎÁ¤È®ÇÑ ¼¹ö ÆÄÆ® Ű. »ç¿ëµÈ Ű ÆÄÆ®°¡ ½ºÆ®¸µÀÌ ¾Æ´Ï°Å³ª Ű ÆÄÆ®ÀÇ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù." + nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden" + norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden" + pol "B³êdna podczê?æ klucza. U¿yta czê?æ klucza nie jest ³añcuchem lub u¿yta d³ugo?æ jest wiêksza ni¿ czê?æ klucza" + por "Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas" + rum "Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii" + rus "îÅËÏÒÒÅËÔÎÁÑ ÞÁÓÔØ ËÌÀÞÁ. éÓÐÏÌØÚÕÅÍÁÑ ÞÁÓÔØ ËÌÀÞÁ ÎÅ Ñ×ÌÑÅÔÓÑ ÓÔÒÏËÏÊ, ÕËÁÚÁÎÎÁÑ ÄÌÉÎÁ ÂÏÌØÛÅ, ÞÅÍ ÄÌÉÎÁ ÞÁÓÔÉ ËÌÀÞÁ, ÉÌÉ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÕÎÉËÁÌØÎÙÅ ÞÁÓÔÉ ËÌÀÞÁ" + serbian "Pogrešan pod-kljuè dela kljuèa. Upotrebljeni deo kljuèa nije string, upotrebljena dužina je veæa od dela kljuèa ili handler tabela ne podržava jedinstvene pod-kljuèeve" + slo "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" + spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave" + swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden" + ukr "îÅצÒÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ. ÷ÉËÏÒÉÓÔÁÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ ÎÅ ¤ ÓÔÒÏËÏÀ, ÚÁÄÏ×ÇÁ ÁÂÏ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ÕΦËÁÌØÎÉÈ ÞÁÓÔÉÎ ËÌÀÞÅÊ" +ER_CANT_REMOVE_ALL_FIELDS 42000 + cze "Nen-Bí mo¾né vymazat v¹echny polo¾ky s ALTER TABLE. Pou¾ijte DROP TABLE" + dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet." + nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!" + eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead" + jps "ALTER TABLE ‚Å‘S‚Ä‚Ì column ‚Í휂ł«‚Ü‚¹‚ñ. DROP TABLE ‚ðŽg—p‚µ‚Ä‚‚¾‚³‚¢", + est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil" + fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE" + ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden" + greek "Äåí åßíáé äõíáôÞ ç äéáãñáöÞ üëùí ôùí ðåäßùí ìå ALTER TABLE. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå DROP TABLE" + hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette" + ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE" + jpn "ALTER TABLE ¤ÇÁ´¤Æ¤Î column ¤Ïºï½ü¤Ç¤¤Þ¤»¤ó. DROP TABLE ¤ò»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤" + kor "ALTER TABLE ¸í·ÉÀ¸·Î´Â ¸ðµç Ä®·³À» Áö¿ï ¼ö ¾ø½À´Ï´Ù. DROP TABLE ¸í·ÉÀ» ÀÌ¿ëÇϼ¼¿ä." + nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden." + norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor." + pol "Nie mo¿na usun?æ wszystkich pól wykorzystuj?c ALTER TABLE. W zamian u¿yj DROP TABLE" + por "Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar" + rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb" + rus "îÅÌØÚÑ ÕÄÁÌÉÔØ ×ÓÅ ÓÔÏÌÂÃÙ Ó ÐÏÍÏÝØÀ ALTER TABLE. éÓÐÏÌØÚÕÊÔÅ DROP TABLE" + serbian "Ne možete da izbrišete sve kolone pomoæu komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite" + slo "One nemô¾em zmaza» all fields with ALTER TABLE; use DROP TABLE instead" + spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo" + swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället" + ukr "îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE" +ER_CANT_DROP_FIELD_OR_KEY 42000 + cze "Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe" + dan "Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer." + nla "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat." + eng "Can't DROP '%-.64s'; check that column/key exists" + jps "'%-.64s' ‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½; check that column/key exists", + est "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib" + fre "Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe" + ger "Kann '%-.64s' nicht löschen. Existiert die Spalte oder der Schlüssel?" + greek "Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé" + hun "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" + ita "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista" + jpn "'%-.64s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists" + kor "'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª ۰¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä." + nor "Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer." + norwegian-ny "Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar." + pol "Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje" + por "Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe" + rum "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.64s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji" + slo "Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe" + spa "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe" + swe "Kan inte ta bort '%-.64s'. Kontrollera att fältet/nyckel finns" + ukr "îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤" +ER_INSERT_INFO + cze "Z-Báznamù: %ld Zdvojených: %ld Varování: %ld" + dan "Poster: %ld Ens: %ld Advarsler: %ld" + nla "Records: %ld Dubbel: %ld Waarschuwing: %ld" + eng "Records: %ld Duplicates: %ld Warnings: %ld" + jps "ƒŒƒR[ƒh”: %ld d•¡”: %ld Warnings: %ld", + est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld" + fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld" + ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld" + greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld ÐñïåéäïðïéÞóåéò: %ld" + hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld" + ita "Records: %ld Duplicati: %ld Avvertimenti: %ld" + jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£¿ô: %ld Warnings: %ld" + kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³ °æ°í: %ld°³" + nor "Poster: %ld Like: %ld Advarsler: %ld" + norwegian-ny "Postar: %ld Like: %ld Åtvaringar: %ld" + pol "Rekordów: %ld Duplikatów: %ld Ostrze¿eñ: %ld" + por "Registros: %ld - Duplicados: %ld - Avisos: %ld" + rum "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld" + rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld" + serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld" + slo "Záznamov: %ld Opakovaných: %ld Varovania: %ld" + spa "Registros: %ld Duplicados: %ld Peligros: %ld" + swe "Rader: %ld Dubletter: %ld Varningar: %ld" + ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld" +ER_UPDATE_TABLE_USED + eng "You can't specify target table '%-.64s' for update in FROM clause" + ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig." + rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.64s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ" + swe "INSERT-table '%-.64s' får inte finnas i FROM tabell-listan" + ukr "ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM" +ER_NO_SUCH_THREAD + cze "Nezn-Bámá identifikace threadu: %lu" + dan "Ukendt tråd id: %lu" + nla "Onbekend thread id: %lu" + eng "Unknown thread id: %lu" + jps "thread id: %lu ‚Í‚ ‚è‚Ü‚¹‚ñ", + est "Tundmatu lõim: %lu" + fre "Numéro de tâche inconnu: %lu" + ger "Unbekannte Thread-ID: %lu" + greek "Áãíùóôï thread id: %lu" + hun "Ervenytelen szal (thread) id: %lu" + ita "Thread id: %lu sconosciuto" + jpn "thread id: %lu ¤Ï¤¢¤ê¤Þ¤»¤ó" + kor "¾Ë¼ö ¾ø´Â ¾²·¹µå id: %lu" + nor "Ukjent tråd id: %lu" + norwegian-ny "Ukjent tråd id: %lu" + pol "Nieznany identyfikator w?tku: %lu" + por "'Id' de 'thread' %lu desconhecido" + rum "Id-ul: %lu thread-ului este necunoscut" + rus "îÅÉÚ×ÅÓÔÎÙÊ ÎÏÍÅÒ ÐÏÔÏËÁ: %lu" + serbian "Nepoznat thread identifikator: %lu" + slo "Neznáma identifikácia vlákna: %lu" + spa "Identificador del thread: %lu desconocido" + swe "Finns ingen tråd med id %lu" + ukr "îÅצÄÏÍÉÊ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒ Ç¦ÌËÉ: %lu" +ER_KILL_DENIED_ERROR + cze "Nejste vlastn-Bíkem threadu %lu" + dan "Du er ikke ejer af tråden %lu" + nla "U bent geen bezitter van thread %lu" + eng "You are not owner of thread %lu" + jps "thread %lu ‚̃I[ƒi[‚ł͂ ‚è‚Ü‚¹‚ñ", + est "Ei ole lõime %lu omanik" + fre "Vous n'êtes pas propriétaire de la tâche no: %lu" + ger "Sie sind nicht Eigentümer von Thread %lu" + greek "Äåí åßóèå owner ôïõ thread %lu" + hun "A %lu thread-nek mas a tulajdonosa" + ita "Utente non proprietario del thread %lu" + jpn "thread %lu ¤Î¥ª¡¼¥Ê¡¼¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó" + kor "¾²·¹µå(Thread) %luÀÇ ¼ÒÀ¯ÀÚ°¡ ¾Æ´Õ´Ï´Ù." + nor "Du er ikke eier av tråden %lu" + norwegian-ny "Du er ikkje eigar av tråd %lu" + pol "Nie jeste? w³a?cicielem w?tku %lu" + por "Você não é proprietário da 'thread' %lu" + rum "Nu sinteti proprietarul threadului %lu" + rus "÷Ù ÎÅ Ñ×ÌÑÅÔÅÓØ ×ÌÁÄÅÌØÃÅÍ ÐÏÔÏËÁ %lu" + serbian "Vi niste vlasnik thread-a %lu" + slo "Nie ste vlastníkom vlákna %lu" + spa "Tu no eres el propietario del thread%lu" + swe "Du är inte ägare till tråd %lu" + ukr "÷É ÎÅ ×ÏÌÏÄÁÒ Ç¦ÌËÉ %lu" +ER_NO_TABLES_USED + cze "Nejsou pou-B¾ity ¾ádné tabulky" + dan "Ingen tabeller i brug" + nla "Geen tabellen gebruikt." + eng "No tables used" + est "Ühtegi tabelit pole kasutusel" + fre "Aucune table utilisée" + ger "Keine Tabellen verwendet" + greek "Äåí ÷ñçóéìïðïéÞèçêáí ðßíáêåò" + hun "Nincs hasznalt tabla" + ita "Nessuna tabella usata" + kor "¾î¶² Å×ÀÌºíµµ »ç¿ëµÇÁö ¾Ê¾Ò½À´Ï´Ù." + nor "Ingen tabeller i bruk" + norwegian-ny "Ingen tabellar i bruk" + pol "Nie ma ¿adej u¿ytej tabeli" + por "Nenhuma tabela usada" + rum "Nici o tabela folosita" + rus "îÉËÁËÉÅ ÔÁÂÌÉÃÙ ÎÅ ÉÓÐÏÌØÚÏ×ÁÎÙ" + serbian "Nema upotrebljenih tabela" + slo "Nie je pou¾itá ¾iadna tabuµka" + spa "No ha tablas usadas" + swe "Inga tabeller angivna" + ukr "îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ" +ER_TOO_BIG_SET + cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %-.64s a SET" + dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s" + nla "Teveel strings voor kolom %-.64s en SET" + eng "Too many strings for column %-.64s and SET" + est "Liiga palju string tulbale %-.64s tüübile SET" + fre "Trop de chaînes dans la colonne %-.64s avec SET" + ger "Zu viele Strings für Feld %-.64s und SET angegeben" + greek "ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET" + hun "Tul sok karakter: %-.64s es SET" + ita "Troppe stringhe per la colonna %-.64s e la SET" + kor "Ä®·³ %-.64s¿Í SET¿¡¼ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù." + nor "For mange tekststrenger kolonne %-.64s og SET" + norwegian-ny "For mange tekststrengar felt %-.64s og SET" + pol "Zbyt wiele ³añcuchów dla kolumny %-.64s i polecenia SET" + por "'Strings' demais para coluna '%-.64s' e SET" + rum "Prea multe siruri pentru coloana %-.64s si SET" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET" + serbian "Previše string-ova za kolonu '%-.64s' i komandu 'SET'" + slo "Príli¹ mnoho re»azcov pre pole %-.64s a SET" + spa "Muchas strings para columna %-.64s y SET" + swe "För många alternativ till kolumn %-.64s för SET" + ukr "úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET" +ER_NO_UNIQUE_LOGFILE + cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %-.200s.(1-999)\n" + dan "Kan ikke lave unikt log-filnavn %-.200s.(1-999)\n" + nla "Het is niet mogelijk een unieke naam te maken voor de logfile %-.200s.(1-999)\n" + eng "Can't generate a unique log-filename %-.200s.(1-999)\n" + est "Ei suuda luua unikaalset logifaili nime %-.200s.(1-999)\n" + fre "Ne peut générer un unique nom de journal %-.200s.(1-999)\n" + ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.200s(1-999) erzeugen\n" + greek "Áäýíáôç ç äçìéïõñãßá unique log-filename %-.200s.(1-999)\n" + hun "Egyedi log-filenev nem generalhato: %-.200s.(1-999)\n" + ita "Impossibile generare un nome del file log unico %-.200s.(1-999)\n" + kor "Unique ·Î±×ÈÀÏ '%-.200s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n" + nor "Kan ikke lage unikt loggfilnavn %-.200s.(1-999)\n" + norwegian-ny "Kan ikkje lage unikt loggfilnavn %-.200s.(1-999)\n" + pol "Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %-.200s.(1-999)\n" + por "Não pode gerar um nome de arquivo de 'log' único '%-.200s'.(1-999)\n" + rum "Nu pot sa generez un nume de log unic %-.200s.(1-999)\n" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.200s.(1-999)\n" + serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.200s.(1-999)'\n" + slo "Nemô¾em vytvori» unikátne meno log-súboru %-.200s.(1-999)\n" + spa "No puede crear un unico archivo log %-.200s.(1-999)\n" + swe "Kan inte generera ett unikt filnamn %-.200s.(1-999)\n" + ukr "îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.200s.(1-999)\n" +ER_TABLE_NOT_LOCKED_FOR_WRITE + cze "Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna" + dan "Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres" + nla "Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen." + eng "Table '%-.64s' was locked with a READ lock and can't be updated" + jps "Table '%-.64s' ‚Í READ lock ‚ɂȂÁ‚Ä‚¢‚ÄAXV‚͂ł«‚Ü‚¹‚ñ", + est "Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav" + fre "Table '%-.64s' verrouillée lecture (READ): modification impossible" + ger "Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" + greek "Ï ðßíáêáò '%-.64s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò" + hun "A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni" + ita "La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata" + jpn "Table '%-.64s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤¤Þ¤»¤ó" + kor "Å×À̺í '%-.64s'´Â READ ¶ôÀÌ Àá°ÜÀÖ¾î¼ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù." + nor "Tabellen '%-.64s' var låst med READ lås og kan ikke oppdateres" + norwegian-ny "Tabellen '%-.64s' var låst med READ lås og kan ikkje oppdaterast" + pol "Tabela '%-.64s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana" + por "Tabela '%-.64s' foi travada com trava de leitura e não pode ser atualizada" + rum "Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata" + rus "ôÁÂÌÉÃÁ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ" + serbian "Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati" + slo "Tabuµka '%-.64s' bola zamknutá s READ a nemô¾e by» zmenená" + spa "Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada" + swe "Tabell '%-.64s' kan inte uppdateras emedan den är låst för läsning" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ" +ER_TABLE_NOT_LOCKED + cze "Tabulka '%-.64s' nebyla zam-Bèena s LOCK TABLES" + dan "Tabellen '%-.64s' var ikke låst med LOCK TABLES" + nla "Tabel '%-.64s' was niet gelocked met LOCK TABLES" + eng "Table '%-.64s' was not locked with LOCK TABLES" + jps "Table '%-.64s' ‚Í LOCK TABLES ‚É‚æ‚Á‚ăƒbƒN‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Tabel '%-.64s' ei ole lukustatud käsuga LOCK TABLES" + fre "Table '%-.64s' non verrouillée: utilisez LOCK TABLES" + ger "Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt" + greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES" + hun "A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel" + ita "Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES" + jpn "Table '%-.64s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "Å×À̺í '%-.64s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù." + nor "Tabellen '%-.64s' var ikke låst med LOCK TABLES" + norwegian-ny "Tabellen '%-.64s' var ikkje låst med LOCK TABLES" + pol "Tabela '%-.64s' nie zosta³a zablokowana poleceniem LOCK TABLES" + por "Tabela '%-.64s' não foi travada com LOCK TABLES" + rum "Tabela '%-.64s' nu a fost locked cu LOCK TABLES" + rus "ôÁÂÌÉÃÁ '%-.64s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" + serbian "Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'" + slo "Tabuµka '%-.64s' nebola zamknutá s LOCK TABLES" + spa "Tabla '%-.64s' no fue trabada con LOCK TABLES" + swe "Tabell '%-.64s' är inte låst med LOCK TABLES" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" +ER_BLOB_CANT_HAVE_DEFAULT 42000 + cze "Blob polo-B¾ka '%-.64s' nemù¾e mít defaultní hodnotu" + dan "BLOB feltet '%-.64s' kan ikke have en standard værdi" + nla "Blob veld '%-.64s' can geen standaardwaarde bevatten" + eng "BLOB/TEXT column '%-.64s' can't have a default value" + est "BLOB-tüüpi tulp '%-.64s' ei saa omada vaikeväärtust" + fre "BLOB '%-.64s' ne peut avoir de valeur par défaut" + ger "BLOB/TEXT-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben" + greek "Ôá Blob ðåäßá '%-.64s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)" + hun "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke" + ita "Il campo BLOB '%-.64s' non puo` avere un valore di default" + jpn "BLOB column '%-.64s' can't have a default value" + kor "BLOB Ä®·³ '%-.64s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù." + nor "Blob feltet '%-.64s' kan ikke ha en standard verdi" + norwegian-ny "Blob feltet '%-.64s' kan ikkje ha ein standard verdi" + pol "Pole typu blob '%-.64s' nie mo¿e mieæ domy?lnej warto?ci" + por "Coluna BLOB '%-.64s' não pode ter um valor padrão (default)" + rum "Coloana BLOB '%-.64s' nu poate avea o valoare default" + rus "îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.64s'" + serbian "BLOB kolona '%-.64s' ne može imati default vrednost" + slo "Pole BLOB '%-.64s' nemô¾e ma» implicitnú hodnotu" + spa "Campo Blob '%-.64s' no puede tener valores patron" + swe "BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde" + ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ" +ER_WRONG_DB_NAME 42000 + cze "Nep-Bøípustné jméno databáze '%-.100s'" + dan "Ugyldigt database navn '%-.100s'" + nla "Databasenaam '%-.100s' is niet getoegestaan" + eng "Incorrect database name '%-.100s'" + jps "Žw’肵‚½ database –¼ '%-.100s' ‚ªŠÔˆá‚Á‚Ä‚¢‚Ü‚·", + est "Vigane andmebaasi nimi '%-.100s'" + fre "Nom de base de donnée illégal: '%-.100s'" + ger "Unerlaubter Datenbankname '%-.100s'" + greek "ËÜèïò üíïìá âÜóçò äåäïìÝíùí '%-.100s'" + hun "Hibas adatbazisnev: '%-.100s'" + ita "Nome database errato '%-.100s'" + jpn "»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹" + kor "'%-.100s' µ¥ÀÌŸº£À̽ºÀÇ À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù." + nor "Ugyldig database navn '%-.100s'" + norwegian-ny "Ugyldig database namn '%-.100s'" + pol "Niedozwolona nazwa bazy danych '%-.100s'" + por "Nome de banco de dados '%-.100s' incorreto" + rum "Numele bazei de date este incorect '%-.100s'" + rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÂÁÚÙ ÄÁÎÎÙÈ '%-.100s'" + serbian "Pogrešno ime baze '%-.100s'" + slo "Neprípustné meno databázy '%-.100s'" + spa "Nombre de base de datos ilegal '%-.100s'" + swe "Felaktigt databasnamn '%-.100s'" + ukr "îÅצÒÎÅ ¦Í'Ñ ÂÁÚÉ ÄÁÎÎÉÈ '%-.100s'" +ER_WRONG_TABLE_NAME 42000 + cze "Nep-Bøípustné jméno tabulky '%-.100s'" + dan "Ugyldigt tabel navn '%-.100s'" + nla "Niet toegestane tabelnaam '%-.100s'" + eng "Incorrect table name '%-.100s'" + jps "Žw’肵‚½ table –¼ '%-.100s' ‚͂܂¿‚ª‚Á‚Ä‚¢‚Ü‚·", + est "Vigane tabeli nimi '%-.100s'" + fre "Nom de table illégal: '%-.100s'" + ger "Unerlaubter Tabellenname '%-.100s'" + greek "ËÜèïò üíïìá ðßíáêá '%-.100s'" + hun "Hibas tablanev: '%-.100s'" + ita "Nome tabella errato '%-.100s'" + jpn "»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹" + kor "'%-.100s' Å×À̺í À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù." + nor "Ugyldig tabell navn '%-.100s'" + norwegian-ny "Ugyldig tabell namn '%-.100s'" + pol "Niedozwolona nazwa tabeli '%-.100s'..." + por "Nome de tabela '%-.100s' incorreto" + rum "Numele tabelei este incorect '%-.100s'" + rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÔÁÂÌÉÃÙ '%-.100s'" + serbian "Pogrešno ime tabele '%-.100s'" + slo "Neprípustné meno tabuµky '%-.100s'" + spa "Nombre de tabla ilegal '%-.100s'" + swe "Felaktigt tabellnamn '%-.100s'" + ukr "îÅצÒÎÅ ¦Í'Ñ ÔÁÂÌÉæ '%-.100s'" +ER_TOO_BIG_SELECT 42000 + cze "Zadan-Bý SELECT by procházel pøíli¹ mnoho záznamù a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v poøádku, pou¾ijte SET SQL_BIG_SELECTS=1" + dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt" + nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is." + eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay" + est "SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1" + fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien" + ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen und gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden" + greek "Ôï SELECT èá åîåôÜóåé ìåãÜëï áñéèìü åããñáöþí êáé ðéèáíþò èá êáèõóôåñÞóåé. Ðáñáêáëþ åîåôÜóôå ôéò ðáñáìÝôñïõò ôïõ WHERE êáé ÷ñçóéìïðïéåßóôå SET SQL_BIG_SELECTS=1 áí ôï SELECT åßíáé óùóôü" + hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay" + ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto." + kor "SELECT ¸í·É¿¡¼ ³Ê¹« ¸¹Àº ·¹Äڵ带 ã±â ¶§¹®¿¡ ¸¹Àº ½Ã°£ÀÌ ¼Ò¿äµË´Ï´Ù. µû¶ó¼ WHERE ¹®À» Á¡°ËÇϰųª, ¸¸¾à SELECT°¡ okµÇ¸é SET SQL_BIG_SELECTS=1 ¿É¼ÇÀ» »ç¿ëÇϼ¼¿ä." + nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" + norwegian-ny "SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" + pol "Operacja SELECT bêdzie dotyczy³a zbyt wielu rekordów i prawdopodobnie zajmie bardzo du¿o czasu. Sprawd¥ warunek WHERE i u¿yj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna" + por "O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cláusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto" + rum "SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay" + rus "äÌÑ ÔÁËÏÊ ×ÙÂÏÒËÉ SELECT ÄÏÌÖÅÎ ÂÕÄÅÔ ÐÒÏÓÍÏÔÒÅÔØ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÚÁÐÉÓÅÊ É, ×ÉÄÉÍÏ, ÜÔÏ ÚÁÊÍÅÔ ÏÞÅÎØ ÍÎÏÇÏ ×ÒÅÍÅÎÉ. ðÒÏ×ÅÒØÔÅ ×ÁÛÅ ÕËÁÚÁÎÉÅ WHERE, É, ÅÓÌÉ × ÎÅÍ ×ÓÅ × ÐÏÒÑÄËÅ, ÕËÁÖÉÔÅ SET SQL_BIG_SELECTS=1" + serbian "Komanda 'SELECT' æe ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu" + slo "Zadaná po¾iadavka SELECT by prechádzala príli¹ mnoho záznamov a trvala by príli¹ dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou¾ite SET SQL_BIG_SELECTS=1" + spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto" + swe "Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins" + ukr "úÁÐÉÔÕ SELECT ÐÏÔÒ¦ÂÎÏ ÏÂÒÏÂÉÔÉ ÂÁÇÁÔÏ ÚÁÐÉÓ¦×, ÝÏ, ÐÅ×ÎÅ, ÚÁÊÍÅ ÄÕÖÅ ÂÁÇÁÔÏ ÞÁÓÕ. ðÅÒÅצÒÔÅ ×ÁÛÅ WHERE ÔÁ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ SET SQL_BIG_SELECTS=1, ÑËÝÏ ÃÅÊ ÚÁÐÉÔ SELECT ¤ צÒÎÉÍ" +ER_UNKNOWN_ERROR + cze "Nezn-Bámá chyba" + dan "Ukendt fejl" + nla "Onbekende Fout" + eng "Unknown error" + est "Tundmatu viga" + fre "Erreur inconnue" + ger "Unbekannter Fehler" + greek "ÐñïÝêõøå Üãíùóôï ëÜèïò" + hun "Ismeretlen hiba" + ita "Errore sconosciuto" + kor "¾Ë¼ö ¾ø´Â ¿¡·¯ÀÔ´Ï´Ù." + nor "Ukjent feil" + norwegian-ny "Ukjend feil" + por "Erro desconhecido" + rum "Eroare unknown" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÏÛÉÂËÁ" + serbian "Nepoznata greška" + slo "Neznámá chyba" + spa "Error desconocido" + swe "Oidentifierat fel" + ukr "îÅצÄÏÍÁ ÐÏÍÉÌËÁ" +ER_UNKNOWN_PROCEDURE 42000 + cze "Nezn-Bámá procedura %-.64s" + dan "Ukendt procedure %-.64s" + nla "Onbekende procedure %-.64s" + eng "Unknown procedure '%-.64s'" + est "Tundmatu protseduur '%-.64s'" + fre "Procédure %-.64s inconnue" + ger "Unbekannte Prozedur '%-.64s'" + greek "Áãíùóôç äéáäéêáóßá '%-.64s'" + hun "Ismeretlen eljaras: '%-.64s'" + ita "Procedura '%-.64s' sconosciuta" + kor "¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'" + nor "Ukjent prosedyre %-.64s" + norwegian-ny "Ukjend prosedyre %-.64s" + pol "Unkown procedure %-.64s" + por "'Procedure' '%-.64s' desconhecida" + rum "Procedura unknown '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'" + serbian "Nepoznata procedura '%-.64s'" + slo "Neznámá procedúra '%-.64s'" + spa "Procedimiento desconocido %-.64s" + swe "Okänd procedur: %-.64s" + ukr "îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'" +ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 + cze "Chybn-Bý poèet parametrù procedury %-.64s" + dan "Forkert antal parametre til proceduren %-.64s" + nla "Foutief aantal parameters doorgegeven aan procedure %-.64s" + eng "Incorrect parameter count to procedure '%-.64s'" + est "Vale parameetrite hulk protseduurile '%-.64s'" + fre "Mauvais nombre de paramètres pour la procedure %-.64s" + ger "Falsche Parameterzahl für Prozedur '%-.64s'" + greek "ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'" + hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal" + ita "Numero di parametri errato per la procedura '%-.64s'" + kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ" + nor "Feil parameter antall til prosedyren %-.64s" + norwegian-ny "Feil parameter tal til prosedyra %-.64s" + pol "Incorrect parameter count to procedure %-.64s" + por "Número de parâmetros incorreto para a 'procedure' '%-.64s'" + rum "Procedura '%-.64s' are un numar incorect de parametri" + rus "îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" + serbian "Pogrešan broj parametara za proceduru '%-.64s'" + slo "Chybný poèet parametrov procedúry '%-.64s'" + spa "Equivocado parametro count para procedimiento %-.64s" + swe "Felaktigt antal parametrar till procedur %-.64s" + ukr "èÉÂÎÁ Ë¦ÌØË¦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'" +ER_WRONG_PARAMETERS_TO_PROCEDURE + cze "Chybn-Bé parametry procedury %-.64s" + dan "Forkert(e) parametre til proceduren %-.64s" + nla "Foutieve parameters voor procedure %-.64s" + eng "Incorrect parameters to procedure '%-.64s'" + est "Vigased parameetrid protseduurile '%-.64s'" + fre "Paramètre erroné pour la procedure %-.64s" + ger "Falsche Parameter für Prozedur '%-.64s'" + greek "ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'" + hun "Rossz parameter a(z) '%-.64s' eljarasban" + ita "Parametri errati per la procedura '%-.64s'" + kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ" + nor "Feil parametre til prosedyren %-.64s" + norwegian-ny "Feil parameter til prosedyra %-.64s" + pol "Incorrect parameters to procedure %-.64s" + por "Parâmetros incorretos para a 'procedure' '%-.64s'" + rum "Procedura '%-.64s' are parametrii incorecti" + rus "îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" + serbian "Pogrešni parametri prosleðeni proceduri '%-.64s'" + slo "Chybné parametre procedúry '%-.64s'" + spa "Equivocados parametros para procedimiento %-.64s" + swe "Felaktiga parametrar till procedur %-.64s" + ukr "èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'" +ER_UNKNOWN_TABLE 42S02 + cze "Nezn-Bámá tabulka '%-.64s' v %-.32s" + dan "Ukendt tabel '%-.64s' i %-.32s" + nla "Onbekende tabel '%-.64s' in %-.32s" + eng "Unknown table '%-.64s' in %-.32s" + est "Tundmatu tabel '%-.64s' %-.32s-s" + fre "Table inconnue '%-.64s' dans %-.32s" + ger "Unbekannte Tabelle '%-.64s' in '%-.32s'" + greek "Áãíùóôïò ðßíáêáò '%-.64s' óå %-.32s" + hun "Ismeretlen tabla: '%-.64s' %-.32s-ban" + ita "Tabella '%-.64s' sconosciuta in %-.32s" + jpn "Unknown table '%-.64s' in %-.32s" + kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %-.32s)" + nor "Ukjent tabell '%-.64s' i %-.32s" + norwegian-ny "Ukjend tabell '%-.64s' i %-.32s" + pol "Unknown table '%-.64s' in %-.32s" + por "Tabela '%-.64s' desconhecida em '%-.32s'" + rum "Tabla '%-.64s' invalida in %-.32s" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s" + serbian "Nepoznata tabela '%-.64s' u '%-.32s'" + slo "Neznáma tabuµka '%-.64s' v %-.32s" + spa "Tabla desconocida '%-.64s' in %-.32s" + swe "Okänd tabell '%-.64s' i '%-.32s'" + ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s" +ER_FIELD_SPECIFIED_TWICE 42000 + cze "Polo-B¾ka '%-.64s' je zadána dvakrát" + dan "Feltet '%-.64s' er anvendt to gange" + nla "Veld '%-.64s' is dubbel gespecificeerd" + eng "Column '%-.64s' specified twice" + est "Tulp '%-.64s' on määratletud topelt" + fre "Champ '%-.64s' spécifié deux fois" + ger "Feld '%-.64s' wurde zweimal angegeben" + greek "Ôï ðåäßï '%-.64s' Ý÷åé ïñéóèåß äýï öïñÝò" + hun "A(z) '%-.64s' mezot ketszer definialta" + ita "Campo '%-.64s' specificato 2 volte" + kor "Ä®·³ '%-.64s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù." + nor "Feltet '%-.64s' er spesifisert to ganger" + norwegian-ny "Feltet '%-.64s' er spesifisert to gangar" + pol "Field '%-.64s' specified twice" + por "Coluna '%-.64s' especificada duas vezes" + rum "Coloana '%-.64s' specificata de doua ori" + rus "óÔÏÌÂÅà '%-.64s' ÕËÁÚÁÎ Ä×ÁÖÄÙ" + serbian "Kolona '%-.64s' je navedena dva puta" + slo "Pole '%-.64s' je zadané dvakrát" + spa "Campo '%-.64s' especificado dos veces" + swe "Fält '%-.64s' är redan använt" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ÚÁÚÎÁÞÅÎÏ Äצަ" +ER_INVALID_GROUP_FUNC_USE + cze "Nespr-Bávné pou¾ití funkce group" + dan "Forkert brug af grupperings-funktion" + nla "Ongeldig gebruik van GROUP-functie" + eng "Invalid use of group function" + est "Vigane grupeerimisfunktsiooni kasutus" + fre "Utilisation invalide de la clause GROUP" + ger "Falsche Verwendung einer Gruppierungsfunktion" + greek "ÅóöáëìÝíç ÷ñÞóç ôçò group function" + hun "A group funkcio ervenytelen hasznalata" + ita "Uso non valido di una funzione di raggruppamento" + kor "À߸øµÈ ±×·ì ÇÔ¼ö¸¦ »ç¿ëÇÏ¿´½À´Ï´Ù." + por "Uso inválido de função de agrupamento (GROUP)" + rum "Folosire incorecta a functiei group" + rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÇÒÕÐÐÏ×ÙÈ ÆÕÎËÃÉÊ" + serbian "Pogrešna upotreba 'GROUP' funkcije" + slo "Nesprávne pou¾itie funkcie GROUP" + spa "Invalido uso de función en grupo" + swe "Felaktig användning av SQL grupp function" + ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÆÕÎËæ§ ÇÒÕÐÕ×ÁÎÎÑ" +ER_UNSUPPORTED_EXTENSION 42000 + cze "Tabulka '%-.64s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není" + dan "Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version" + nla "Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt." + eng "Table '%-.64s' uses an extension that doesn't exist in this MySQL version" + est "Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis" + fre "Table '%-.64s' : utilise une extension invalide pour cette version de MySQL" + ger "Tabelle '%-.64s' verwendet eine Erweiterung, die in dieser MySQL-Version nicht verfügbar ist" + greek "Ï ðßíáêò '%-.64s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL" + hun "A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban." + ita "La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL" + kor "Å×À̺í '%-.64s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + norwegian-ny "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + pol "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + por "Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL" + rum "Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL" + rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL" + serbian "Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a" + slo "Tabuµka '%-.64s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je" + spa "Tabla '%-.64s' usa una extensión que no existe en esta MySQL versión" + swe "Tabell '%-.64s' har en extension som inte finns i denna version av MySQL" + ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL" +ER_TABLE_MUST_HAVE_COLUMNS 42000 + cze "Tabulka mus-Bí mít alespoò jeden sloupec" + dan "En tabel skal have mindst een kolonne" + nla "Een tabel moet minstens 1 kolom bevatten" + eng "A table must have at least 1 column" + jps "ƒe[ƒuƒ‹‚ÍÅ’á 1 ŒÂ‚Ì column ‚ª•K—v‚Å‚·", + est "Tabelis peab olema vähemalt üks tulp" + fre "Une table doit comporter au moins une colonne" + ger "Eine Tabelle muss mindestens eine Spalte besitzen" + greek "Åíáò ðßíáêáò ðñÝðåé íá Ý÷åé ôïõëÜ÷éóôïí Ýíá ðåäßï" + hun "A tablanak legalabb egy oszlopot tartalmazni kell" + ita "Una tabella deve avere almeno 1 colonna" + jpn "¥Æ¡¼¥Ö¥ë¤ÏºÇÄã 1 ¸Ä¤Î column ¤¬É¬ÍפǤ¹" + kor "ÇϳªÀÇ Å×ÀÌºí¿¡¼´Â Àû¾îµµ ÇϳªÀÇ Ä®·³ÀÌ Á¸ÀçÇÏ¿©¾ß ÇÕ´Ï´Ù." + por "Uma tabela tem que ter pelo menos uma (1) coluna" + rum "O tabela trebuie sa aiba cel putin o coloana" + rus "÷ ÔÁÂÌÉÃÅ ÄÏÌÖÅÎ ÂÙÔØ ËÁË ÍÉÎÉÍÕÍ ÏÄÉÎ ÓÔÏÌÂÅÃ" + serbian "Tabela mora imati najmanje jednu kolonu" + slo "Tabuµka musí ma» aspoò 1 pole" + spa "Una tabla debe tener al menos 1 columna" + swe "Tabeller måste ha minst 1 kolumn" + ukr "ôÁÂÌÉÃÑ ÐÏ×ÉÎÎÁ ÍÁÔÉ ÈÏÞÁ ÏÄÉÎ ÓÔÏ×ÂÅÃØ" +ER_RECORD_FILE_FULL + cze "Tabulka '%-.64s' je pln-Bá" + dan "Tabellen '%-.64s' er fuld" + nla "De tabel '%-.64s' is vol" + eng "The table '%-.64s' is full" + jps "table '%-.64s' ‚Í‚¢‚Á‚Ï‚¢‚Å‚·", + est "Tabel '%-.64s' on täis" + fre "La table '%-.64s' est pleine" + ger "Tabelle '%-.64s' ist voll" + greek "Ï ðßíáêáò '%-.64s' åßíáé ãåìÜôïò" + hun "A '%-.64s' tabla megtelt" + ita "La tabella '%-.64s' e` piena" + jpn "table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹" + kor "Å×À̺í '%-.64s'°¡ full³µ½À´Ï´Ù. " + por "Tabela '%-.64s' está cheia" + rum "Tabela '%-.64s' e plina" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÅÒÅÐÏÌÎÅÎÁ" + serbian "Tabela '%-.64s' je popunjena do kraja" + slo "Tabuµka '%-.64s' je plná" + spa "La tabla '%-.64s' está llena" + swe "Tabellen '%-.64s' är full" + ukr "ôÁÂÌÉÃÑ '%-.64s' ÚÁÐÏ×ÎÅÎÁ" +ER_UNKNOWN_CHARACTER_SET 42000 + cze "Nezn-Bámá znaková sada: '%-.64s'" + dan "Ukendt tegnsæt: '%-.64s'" + nla "Onbekende character set: '%-.64s'" + eng "Unknown character set: '%-.64s'" + jps "character set '%-.64s' ‚̓Tƒ|[ƒg‚µ‚Ä‚¢‚Ü‚¹‚ñ", + est "Vigane kooditabel '%-.64s'" + fre "Jeu de caractères inconnu: '%-.64s'" + ger "Unbekannter Zeichensatz: '%-.64s'" + greek "Áãíùóôï character set: '%-.64s'" + hun "Ervenytelen karakterkeszlet: '%-.64s'" + ita "Set di caratteri '%-.64s' sconosciuto" + jpn "character set '%-.64s' ¤Ï¥µ¥Ý¡¼¥È¤·¤Æ¤¤¤Þ¤»¤ó" + kor "¾Ë¼ö¾ø´Â ¾ð¾î Set: '%-.64s'" + por "Conjunto de caracteres '%-.64s' desconhecido" + rum "Set de caractere invalid: '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÄÉÒÏ×ËÁ '%-.64s'" + serbian "Nepoznati karakter-set: '%-.64s'" + slo "Neznáma znaková sada: '%-.64s'" + spa "Juego de caracteres desconocido: '%-.64s'" + swe "Okänd teckenuppsättning: '%-.64s'" + ukr "îÅצÄÏÍÁ ËÏÄÏ×Á ÔÁÂÌÉÃÑ: '%-.64s'" +ER_TOO_MANY_TABLES + cze "P-Bøíli¹ mnoho tabulek, MySQL jich mù¾e mít v joinu jen %d" + dan "For mange tabeller. MySQL kan kun bruge %d tabeller i et join" + nla "Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten" + eng "Too many tables; MySQL can only use %d tables in a join" + jps "ƒe[ƒuƒ‹‚ª‘½‚·‚¬‚Ü‚·; MySQL can only use %d tables in a join", + est "Liiga palju tabeleid. MySQL suudab JOINiga ühendada kuni %d tabelit" + fre "Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN" + ger "Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden" + greek "Ðïëý ìåãÜëïò áñéèìüò ðéíÜêùí. Ç MySQL ìðïñåß íá ÷ñçóéìïðïéÞóåé %d ðßíáêåò óå äéáäéêáóßá join" + hun "Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor" + ita "Troppe tabelle. MySQL puo` usare solo %d tabelle in una join" + jpn "¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹; MySQL can only use %d tables in a join" + kor "³Ê¹« ¸¹Àº Å×À̺íÀÌ JoinµÇ¾ú½À´Ï´Ù. MySQL¿¡¼´Â JOIN½Ã %d°³ÀÇ Å×ÀÌºí¸¸ »ç¿ëÇÒ ¼ö ÀÖ½À´Ï´Ù." + por "Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)" + rum "Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÔÁÂÌÉÃ. MySQL ÍÏÖÅÔ ÉÓÐÏÌØÚÏ×ÁÔØ ÔÏÌØËÏ %d ÔÁÂÌÉÃ × ÓÏÅÄÉÎÅÎÉÉ" + serbian "Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji" + slo "Príli¹ mnoho tabuliek. MySQL mô¾e pou¾i» len %d v JOIN-e" + spa "Muchas tablas. MySQL solamente puede usar %d tablas en un join" + swe "För många tabeller. MySQL can ha högst %d tabeller i en och samma join" + ukr "úÁÂÁÇÁÔÏ ÔÁÂÌÉÃØ. MySQL ÍÏÖÅ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ %d ÔÁÂÌÉÃØ Õ ÏÂ'¤ÄÎÁÎΦ" +ER_TOO_MANY_FIELDS + cze "P-Bøíli¹ mnoho polo¾ek" + dan "For mange felter" + nla "Te veel velden" + eng "Too many columns" + jps "column ‚ª‘½‚·‚¬‚Ü‚·", + est "Liiga palju tulpasid" + fre "Trop de champs" + ger "Zu viele Felder" + greek "Ðïëý ìåãÜëïò áñéèìüò ðåäßùí" + hun "Tul sok mezo" + ita "Troppi campi" + jpn "column ¤¬Â¿¤¹¤®¤Þ¤¹" + kor "Ä®·³ÀÌ ³Ê¹« ¸¹½À´Ï´Ù." + por "Colunas demais" + rum "Prea multe coloane" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÔÏÌÂÃÏ×" + serbian "Previše kolona" + slo "Príli¹ mnoho polí" + spa "Muchos campos" + swe "För många fält" + ukr "úÁÂÁÇÁÔÏ ÓÔÏ×Âæ×" +ER_TOO_BIG_ROWSIZE 42000 + cze "-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %ld. Musíte zmìnit nìkteré polo¾ky na blob" + dan "For store poster. Max post størrelse, uden BLOB's, er %ld. Du må lave nogle felter til BLOB's" + nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen." + eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs" + jps "row size ‚ª‘å‚«‚·‚¬‚Ü‚·. BLOB ‚ðŠÜ‚܂Ȃ¢ê‡‚Ì row size ‚ÌÅ‘å‚Í %ld ‚Å‚·. ‚¢‚‚‚©‚Ì field ‚ð BLOB ‚ɕς¦‚Ä‚‚¾‚³‚¢.", + est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %ld. Muuda mõned väljad BLOB-tüüpi väljadeks" + fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %ld. Changez le type de quelques colonnes en BLOB" + ger "Zeilenlänge zu groß. Die maximale Zeilenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %ld. Einige Felder müssen in BLOB oder TEXT umgewandelt werden" + greek "Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %ld. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs" + hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %ld. Nehany mezot meg kell valtoztatnia" + ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %ld. Devi cambiare alcuni campi in BLOB" + jpn "row size ¤¬Â礤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %ld ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤." + kor "³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %ldÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä.." + por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %ld. Você tem que mudar alguns campos para BLOBs" + rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %ld. Trebuie sa schimbati unele cimpuri in BLOB-uri" + rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %ld. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB" + serbian "Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %ld. Trebali bi da promenite tip nekih polja u BLOB" + slo "Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %ld. Musíte zmeni» niektoré polo¾ky na BLOB" + spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %ld. Tu tienes que cambiar algunos campos para blob" + swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %ld. Ändra några av dina fält till BLOB" + ukr "úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁÊÂ¦ÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %ld. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB" +ER_STACK_OVERRUN + cze "P-Bøeteèení zásobníku threadu: pou¾ito %ld z %ld. Pou¾ijte 'mysqld -O thread_stack=#' k zadání vìt¹ího zásobníku" + dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt" + nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)." + eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed" + jps "Thread stack overrun: Used: %ld of a %ld stack. ƒXƒ^ƒbƒN—̈æ‚𑽂‚Ƃ肽‚¢ê‡A'mysqld -O thread_stack=#' ‚ÆŽw’肵‚Ä‚‚¾‚³‚¢", + fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur" + ger "Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenden, um bei Bedarf einen größeren Stack anzulegen" + greek "Stack overrun óôï thread: Used: %ld of a %ld stack. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'mysqld -O thread_stack=#' ãéá íá ïñßóåôå Ýíá ìåãáëýôåñï stack áí ÷ñåéÜæåôáé" + hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz" + ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande." + jpn "Thread stack overrun: Used: %ld of a %ld stack. ¥¹¥¿¥Ã¥¯Îΰè¤ò¿¤¯¤È¤ê¤¿¤¤¾ì¹ç¡¢'mysqld -O thread_stack=#' ¤È»ØÄꤷ¤Æ¤¯¤À¤µ¤¤" + kor "¾²·¹µå ½ºÅÃÀÌ ³ÑÃÆ½À´Ï´Ù. »ç¿ë: %ld°³ ½ºÅÃ: %ld°³. ¸¸¾à ÇÊ¿ä½Ã ´õÅ« ½ºÅÃÀ» ¿øÇÒ¶§¿¡´Â 'mysqld -O thread_stack=#' ¸¦ Á¤ÀÇÇϼ¼¿ä" + por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário" + rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare" + rus "óÔÅË ÐÏÔÏËÏ× ÐÅÒÅÐÏÌÎÅÎ: ÉÓÐÏÌØÚÏ×ÁÎÏ: %ld ÉÚ %ld ÓÔÅËÁ. ðÒÉÍÅÎÑÊÔÅ 'mysqld -O thread_stack=#' ÄÌÑ ÕËÁÚÁÎÉÑ ÂÏÌØÛÅÇÏ ÒÁÚÍÅÒÁ ÓÔÅËÁ, ÅÓÌÉ ÎÅÏÂÈÏÄÉÍÏ" + serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veæi stack ako je potrebno" + slo "Preteèenie zásobníku vlákna: pou¾ité: %ld z %ld. Pou¾ite 'mysqld -O thread_stack=#' k zadaniu väè¹ieho zásobníka" + spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario" + swe "Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld -O thread_stack=#' ifall du behöver en större stack" + ukr "óÔÅË Ç¦ÌÏË ÐÅÒÅÐÏ×ÎÅÎÏ: ÷ÉËÏÒÉÓÔÁÎÏ: %ld Ú %ld. ÷ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqld -O thread_stack=#' ÁÂÉ ÚÁÚÎÁÞÉÔÉ Â¦ÌØÛÉÊ ÓÔÅË, ÑËÝÏ ÎÅÏÂȦÄÎÏ" +ER_WRONG_OUTER_JOIN 42000 + cze "V OUTER JOIN byl nalezen k-Bøí¾ový odkaz. Provìøte ON podmínky" + dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions" + nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions" + eng "Cross dependency found in OUTER JOIN; examine your ON conditions" + est "Ristsõltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi" + fre "Dépendance croisée dans une clause OUTER JOIN. Vérifiez la condition ON" + ger "OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen" + greek "Cross dependency âñÝèçêå óå OUTER JOIN. Ðáñáêáëþ åîåôÜóôå ôéò óõíèÞêåò ðïõ èÝóáôå óôï ON" + hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket" + ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON" + por "Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'" + rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON" + rus "÷ OUTER JOIN ÏÂÎÁÒÕÖÅÎÁ ÐÅÒÅËÒÅÓÔÎÁÑ ÚÁ×ÉÓÉÍÏÓÔØ. ÷ÎÉÍÁÔÅÌØÎÏ ÐÒÏÁÎÁÌÉÚÉÒÕÊÔÅ Ó×ÏÉ ÕÓÌÏ×ÉÑ ON" + serbian "Unakrsna zavisnost pronaðena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove" + slo "V OUTER JOIN bol nájdený krí¾ový odkaz. Skontrolujte podmienky ON" + spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON" + swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket" + ukr "ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON" +ER_NULL_COLUMN_IN_INDEX 42000 + cze "Sloupec '%-.64s' je pou-B¾it s UNIQUE nebo INDEX, ale není definován jako NOT NULL" + dan "Kolonne '%-.64s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL" + nla "Kolom '%-.64s' wordt gebruikt met UNIQUE of INDEX maar is niet gedefinieerd als NOT NULL" + eng "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + jps "Column '%-.64s' ‚ª UNIQUE ‚© INDEX ‚ÅŽg—p‚³‚ê‚Ü‚µ‚½. ‚±‚̃Jƒ‰ƒ€‚Í NOT NULL ‚Æ’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.", + est "Tulp '%-.64s' on kasutusel indeksina, kuid ei ole määratletud kui NOT NULL" + fre "La colonne '%-.64s' fait partie d'un index UNIQUE ou INDEX mais n'est pas définie comme NOT NULL" + ger "Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert" + greek "Ôï ðåäßï '%-.64s' ÷ñçóéìïðïéåßôáé óáí UNIQUE Þ INDEX áëëÜ äåí Ý÷åé ïñéóèåß óáí NOT NULL" + hun "A(z) '%-.64s' oszlop INDEX vagy UNIQUE (egyedi), de a definicioja szerint nem NOT NULL" + ita "La colonna '%-.64s' e` usata con UNIQUE o INDEX ma non e` definita come NOT NULL" + jpn "Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó." + kor "'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä..." + nor "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + norwegian-ny "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + pol "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + por "Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)" + rum "Coloana '%-.64s' e folosita cu UNIQUE sau INDEX dar fara sa fie definita ca NOT NULL" + rus "óÔÏÌÂÅà '%-.64s' ÉÓÐÏÌØÚÕÅÔÓÑ × UNIQUE ÉÌÉ × INDEX, ÎÏ ÎÅ ÏÐÒÅÄÅÌÅÎ ËÁË NOT NULL" + serbian "Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'" + slo "Pole '%-.64s' je pou¾ité s UNIQUE alebo INDEX, ale nie je zadefinované ako NOT NULL" + spa "Columna '%-.64s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL" + swe "Kolumn '%-.64s' är använd med UNIQUE eller INDEX men är inte definerad med NOT NULL" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ Ú UNIQUE ÁÂÏ INDEX, ÁÌÅ ÎÅ ×ÉÚÎÁÞÅÎÉÊ ÑË NOT NULL" +ER_CANT_FIND_UDF + cze "Nemohu na-Bèíst funkci '%-.64s'" + dan "Kan ikke læse funktionen '%-.64s'" + nla "Kan functie '%-.64s' niet laden" + eng "Can't load function '%-.64s'" + jps "function '%-.64s' ‚ð ƒ[ƒh‚Å‚«‚Ü‚¹‚ñ", + est "Ei suuda avada funktsiooni '%-.64s'" + fre "Imposible de charger la fonction '%-.64s'" + ger "Kann Funktion '%-.64s' nicht laden" + greek "Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.64s'" + hun "A(z) '%-.64s' fuggveny nem toltheto be" + ita "Impossibile caricare la funzione '%-.64s'" + jpn "function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤¤Þ¤»¤ó" + kor "'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù." + por "Não pode carregar a função '%-.64s'" + rum "Nu pot incarca functia '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.64s'" + serbian "Ne mogu da uèitam funkciju '%-.64s'" + slo "Nemô¾em naèíta» funkciu '%-.64s'" + spa "No puedo cargar función '%-.64s'" + swe "Kan inte ladda funktionen '%-.64s'" + ukr "îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.64s'" +ER_CANT_INITIALIZE_UDF + cze "Nemohu inicializovat funkci '%-.64s'; %-.80s" + dan "Kan ikke starte funktionen '%-.64s'; %-.80s" + nla "Kan functie '%-.64s' niet initialiseren; %-.80s" + eng "Can't initialize function '%-.64s'; %-.80s" + jps "function '%-.64s' ‚ð‰Šú‰»‚Å‚«‚Ü‚¹‚ñ; %-.80s", + est "Ei suuda algväärtustada funktsiooni '%-.64s'; %-.80s" + fre "Impossible d'initialiser la fonction '%-.64s'; %-.80s" + ger "Kann Funktion '%-.64s' nicht initialisieren: %-.80s" + greek "Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.64s'; %-.80s" + hun "A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s" + ita "Impossibile inizializzare la funzione '%-.64s'; %-.80s" + jpn "function '%-.64s' ¤ò½é´ü²½¤Ç¤¤Þ¤»¤ó; %-.80s" + kor "'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s" + por "Não pode inicializar a função '%-.64s' - '%-.80s'" + rum "Nu pot initializa functia '%-.64s'; %-.80s" + rus "îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.64s'; %-.80s" + serbian "Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s" + slo "Nemô¾em inicializova» funkciu '%-.64s'; %-.80s" + spa "No puedo inicializar función '%-.64s'; %-.80s" + swe "Kan inte initialisera funktionen '%-.64s'; '%-.80s'" + ukr "îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.64s'; %-.80s" +ER_UDF_NO_PATHS + cze "Pro sd-Bílenou knihovnu nejsou povoleny cesty" + dan "Angivelse af sti ikke tilladt for delt bibliotek" + nla "Geen pad toegestaan voor shared library" + eng "No paths allowed for shared library" + jps "shared library ‚ւ̃pƒX‚ª’Ê‚Á‚Ä‚¢‚Ü‚¹‚ñ", + est "Teegi nimes ei tohi olla kataloogi" + fre "Chemin interdit pour les bibliothèques partagées" + ger "Keine Pfade gestattet für Shared Library" + greek "Äåí âñÝèçêáí paths ãéá ôçí shared library" + hun "Nincs ut a megosztott konyvtarakhoz (shared library)" + ita "Non sono ammessi path per le librerie condivisa" + jpn "shared library ¤Ø¤Î¥Ñ¥¹¤¬Ä̤äƤ¤¤Þ¤»¤ó" + kor "°øÀ¯ ¶óÀ̹ö·¯¸®¸¦ À§ÇÑ ÆÐ½º°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù." + por "Não há caminhos (paths) permitidos para biblioteca compartilhada" + rum "Nici un paths nu e permis pentru o librarie shared" + rus "îÅÄÏÐÕÓÔÉÍÏ ÕËÁÚÙ×ÁÔØ ÐÕÔÉ ÄÌÑ ÄÉÎÁÍÉÞÅÓËÉÈ ÂÉÂÌÉÏÔÅË" + serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke" + slo "Neprípustné ¾iadne cesty k zdieµanej kni¾nici" + spa "No pasos permitidos para librarias conjugadas" + swe "Man får inte ange sökväg för dynamiska bibliotek" + ukr "îÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÐÕÔ¦ ÄÌÑ ÒÏÚĦÌÀ×ÁÎÉÈ Â¦Â̦ÏÔÅË" +ER_UDF_EXISTS + cze "Funkce '%-.64s' ji-B¾ existuje" + dan "Funktionen '%-.64s' findes allerede" + nla "Functie '%-.64s' bestaat reeds" + eng "Function '%-.64s' already exists" + jps "Function '%-.64s' ‚ÍŠù‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚·", + est "Funktsioon '%-.64s' juba eksisteerib" + fre "La fonction '%-.64s' existe déjà" + ger "Funktion '%-.64s' existiert schon" + greek "Ç óõíÜñôçóç '%-.64s' õðÜñ÷åé Þäç" + hun "A '%-.64s' fuggveny mar letezik" + ita "La funzione '%-.64s' esiste gia`" + jpn "Function '%-.64s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹" + kor "'%-.64s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." + por "Função '%-.64s' já existe" + rum "Functia '%-.64s' exista deja" + rus "æÕÎËÃÉÑ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Funkcija '%-.64s' veæ postoji" + slo "Funkcia '%-.64s' u¾ existuje" + spa "Función '%-.64s' ya existe" + swe "Funktionen '%-.64s' finns redan" + ukr "æÕÎËÃ¦Ñ '%-.64s' ×ÖÅ ¦ÓÎÕ¤" +ER_CANT_OPEN_LIBRARY + cze "Nemohu otev-Bøít sdílenou knihovnu '%-.64s' (errno: %d %-.128s)" + dan "Kan ikke åbne delt bibliotek '%-.64s' (errno: %d %-.128s)" + nla "Kan shared library '%-.64s' niet openen (Errcode: %d %-.128s)" + eng "Can't open shared library '%-.64s' (errno: %d %-.128s)" + jps "shared library '%-.64s' ‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d %-.128s)", + est "Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.128s)" + fre "Impossible d'ouvrir la bibliothèque partagée '%-.64s' (errno: %d %-.128s)" + ger "Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.128s)" + greek "Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.64s' (êùäéêüò ëÜèïõò: %d %-.128s)" + hun "A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)" + ita "Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %-.128s)" + jpn "shared library '%-.64s' ¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d %-.128s)" + kor "'%-.64s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %-.128s)" + nor "Can't open shared library '%-.64s' (errno: %d %-.128s)" + norwegian-ny "Can't open shared library '%-.64s' (errno: %d %-.128s)" + pol "Can't open shared library '%-.64s' (errno: %d %-.128s)" + por "Não pode abrir biblioteca compartilhada '%-.64s' (erro no. '%d' - '%-.128s')" + rum "Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.128s)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.64s' (ÏÛÉÂËÁ: %d %-.128s)" + serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.128s)" + slo "Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %-.128s)" + spa "No puedo abrir libraria conjugada '%-.64s' (errno: %d %-.128s)" + swe "Kan inte öppna det dynamiska biblioteket '%-.64s' (Felkod: %d %-.128s)" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d %-.128s)" +ER_CANT_FIND_DL_ENTRY + cze "Nemohu naj-Bít funkci '%-.128s' v knihovnì" + dan "Kan ikke finde funktionen '%-.128s' i bibliotek" + nla "Kan functie '%-.128s' niet in library vinden" + eng "Can't find function '%-.128s' in library" + jps "function '%-.128s' ‚ðƒ‰ƒCƒuƒ‰ƒŠ[’†‚ÉŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ", + est "Ei leia funktsiooni '%-.128s' antud teegis" + fre "Impossible de trouver la fonction '%-.128s' dans la bibliothèque" + ger "Kann Funktion '%-.128s' in der Library nicht finden" + greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò óõíÜñôçóçò '%-.128s' óôçí âéâëéïèÞêç" + hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban" + ita "Impossibile trovare la funzione '%-.128s' nella libreria" + jpn "function '%-.128s' ¤ò¥é¥¤¥Ö¥é¥ê¡¼Ãæ¤Ë¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó" + kor "¶óÀ̹ö·¯¸®¿¡¼ '%-.128s' ÇÔ¼ö¸¦ ãÀ» ¼ö ¾ø½À´Ï´Ù." + por "Não pode encontrar a função '%-.128s' na biblioteca" + rum "Nu pot gasi functia '%-.128s' in libraria" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÆÕÎËÃÉÀ '%-.128s' × ÂÉÂÌÉÏÔÅËÅ" + serbian "Ne mogu da pronadjem funkciju '%-.128s' u biblioteci" + slo "Nemô¾em nájs» funkciu '%-.128s' v kni¾nici" + spa "No puedo encontrar función '%-.128s' en libraria" + swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÕÎËæÀ '%-.128s' Õ Â¦Â̦ÏÔÅæ" +ER_FUNCTION_NOT_DEFINED + cze "Funkce '%-.64s' nen-Bí definována" + dan "Funktionen '%-.64s' er ikke defineret" + nla "Functie '%-.64s' is niet gedefinieerd" + eng "Function '%-.64s' is not defined" + jps "Function '%-.64s' ‚Í’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Funktsioon '%-.64s' ei ole defineeritud" + fre "La fonction '%-.64s' n'est pas définie" + ger "Funktion '%-.64s' ist nicht definiert" + greek "Ç óõíÜñôçóç '%-.64s' äåí Ý÷åé ïñéóèåß" + hun "A '%-.64s' fuggveny nem definialt" + ita "La funzione '%-.64s' non e` definita" + jpn "Function '%-.64s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.64s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù." + por "Função '%-.64s' não está definida" + rum "Functia '%-.64s' nu e definita" + rus "æÕÎËÃÉÑ '%-.64s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ" + serbian "Funkcija '%-.64s' nije definisana" + slo "Funkcia '%-.64s' nie je definovaná" + spa "Función '%-.64s' no está definida" + swe "Funktionen '%-.64s' är inte definierad" + ukr "æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ" +ER_HOST_IS_BLOCKED + cze "Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'" + dan "Værten '%-.64s' er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'" + nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'" + eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'" + jps "Host '%-.64s' ‚Í many connection error ‚Ì‚½‚ßA‹‘”Û‚³‚ê‚Ü‚µ‚½. 'mysqladmin flush-hosts' ‚ʼn𜂵‚Ä‚‚¾‚³‚¢", + est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga" + fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connection. Débloquer le par 'mysqladmin flush-hosts'" + ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'" + greek "Ï õðïëïãéóôÞò '%-.64s' Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'" + hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot" + ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'" + jpn "Host '%-.64s' ¤Ï many connection error ¤Î¤¿¤á¡¢µñÈݤµ¤ì¤Þ¤·¤¿. 'mysqladmin flush-hosts' ¤Ç²ò½ü¤·¤Æ¤¯¤À¤µ¤¤" + kor "³Ê¹« ¸¹Àº ¿¬°á¿À·ù·Î ÀÎÇÏ¿© È£½ºÆ® '%-.64s'´Â ºí¶ôµÇ¾ú½À´Ï´Ù. 'mysqladmin flush-hosts'¸¦ ÀÌ¿ëÇÏ¿© ºí¶ôÀ» ÇØÁ¦Çϼ¼¿ä" + por "'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'" + rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'" + rus "èÏÓÔ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÉÚ-ÚÁ ÓÌÉÛËÏÍ ÂÏÌØÛÏÇÏ ËÏÌÉÞÅÓÔ×Á ÏÛÉÂÏË ÓÏÅÄÉÎÅÎÉÑ. òÁÚÂÌÏËÉÒÏ×ÁÔØ ÅÇÏ ÍÏÖÎÏ Ó ÐÏÍÏÝØÀ 'mysqladmin flush-hosts'" + serbian "Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoæu komande 'mysqladmin flush-hosts'" + spa "Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mysqladmin flush-hosts'" + swe "Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna" + ukr "èÏÓÔ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ú ÐÒÉÞÉÎÉ ×ÅÌÉËϧ Ë¦ÌØËÏÓÔ¦ ÐÏÍÉÌÏË Ú'¤ÄÎÁÎÎÑ. äÌÑ ÒÏÚÂÌÏËÕ×ÁÎÎÑ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqladmin flush-hosts'" +ER_HOST_NOT_PRIVILEGED + cze "Stroj '%-.64s' nem-Bá povoleno se k tomuto MySQL serveru pøipojit" + dan "Værten '%-.64s' kan ikke tilkoble denne MySQL-server" + nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server" + eng "Host '%-.64s' is not allowed to connect to this MySQL server" + jps "Host '%-.64s' ‚Í MySQL server ‚ÉÚ‘±‚ð‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Masinal '%-.64s' puudub ligipääs sellele MySQL serverile" + fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL" + ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden" + greek "Ï õðïëïãéóôÞò '%-.64s' äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server" + hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez" + ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL" + jpn "Host '%-.64s' ¤Ï MySQL server ¤ËÀܳ¤òµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.64s' È£½ºÆ®´Â ÀÌ MySQL¼¹ö¿¡ Á¢¼ÓÇÒ Çã°¡¸¦ ¹ÞÁö ¸øÇß½À´Ï´Ù." + por "'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL" + rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL" + rus "èÏÓÔÕ '%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÐÏÄËÌÀÞÁÔØÓÑ Ë ÜÔÏÍÕ ÓÅÒ×ÅÒÕ MySQL" + serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server" + spa "Servidor '%-.64s' no está permitido para conectar con este servidor MySQL" + swe "Denna dator, '%-.64s', har inte privileger att använda denna MySQL server" + ukr "èÏÓÔÕ '%-.64s' ÎÅ ÄÏ×ÏÌÅÎÏ Ú×'ÑÚÕ×ÁÔÉÓØ Ú ÃÉÍ ÓÅÒ×ÅÒÏÍ MySQL" +ER_PASSWORD_ANONYMOUS_USER 42000 + cze "Pou-B¾íváte MySQL jako anonymní u¾ivatel a anonymní u¾ivatelé nemají povoleno mìnit hesla" + dan "Du bruger MySQL som anonym bruger. Anonyme brugere må ikke ændre adgangskoder" + nla "U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen" + eng "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords" + jps "MySQL ‚ð anonymous users ‚ÅŽg—p‚µ‚Ä‚¢‚éó‘Ô‚Å‚ÍAƒpƒXƒ[ƒh‚Ì•ÏX‚͂ł«‚Ü‚¹‚ñ", + est "Te kasutate MySQL-i anonüümse kasutajana, kelledel pole parooli muutmise õigust" + fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe" + ger "Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern" + greek "×ñçóéìïðïéåßôå ôçí MySQL óáí anonymous user êáé Ýôóé äåí ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí" + hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas" + ita "Impossibile cambiare la password usando MySQL come utente anonimo" + jpn "MySQL ¤ò anonymous users ¤Ç»ÈÍѤ·¤Æ¤¤¤ë¾õÂ֤Ǥϡ¢¥Ñ¥¹¥ï¡¼¥É¤ÎÊѹ¹¤Ï¤Ç¤¤Þ¤»¤ó" + kor "´ç½ÅÀº MySQL¼¹ö¿¡ À͸íÀÇ »ç¿ëÀÚ·Î Á¢¼ÓÀ» Çϼ̽À´Ï´Ù.À͸íÀÇ »ç¿ëÀÚ´Â ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ¾ø½À´Ï´Ù." + por "Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas" + rum "Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele" + rus "÷Ù ÉÓÐÏÌØÚÕÅÔÅ MySQL ÏÔ ÉÍÅÎÉ ÁÎÏÎÉÍÎÏÇÏ ÐÏÌØÚÏ×ÁÔÅÌÑ, Á ÁÎÏÎÉÍÎÙÍ ÐÏÌØÚÏ×ÁÔÅÌÑÍ ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÍÅÎÑÔØ ÐÁÒÏÌÉ" + serbian "Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke" + spa "Tu estás usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves" + swe "Du använder MySQL som en anonym användare och som sådan får du inte ändra ditt lösenord" + ukr "÷É ×ÉËÏÒÉÓÔÏ×Õ¤ÔÅ MySQL ÑË ÁÎÏΦÍÎÉÊ ËÏÒÉÓÔÕ×ÁÞ, ÔÏÍÕ ×ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ÚͦÎÀ×ÁÔÉ ÐÁÒÏ̦" +ER_PASSWORD_NOT_ALLOWED 42000 + cze "Na zm-Bìnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql" + dan "Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ændre andres adgangskoder" + nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen" + eng "You must have privileges to update tables in the mysql database to be able to change passwords for others" + jps "‘¼‚̃†[ƒU[‚̃pƒXƒ[ƒh‚ð•ÏX‚·‚邽‚߂ɂÍ, mysql ƒf[ƒ^ƒx[ƒX‚ɑ΂µ‚Ä update ‚Ì‹–‰Â‚ª‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", + est "Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis" + fre "Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres" + ger "Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können" + greek "ÐñÝðåé íá Ý÷åôå äéêáßùìá äéüñèùóçò ðéíÜêùí (update) óôç âÜóç äåäïìÝíùí mysql ãéá íá ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí" + hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz" + ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti" + jpn "¾¤Î¥æ¡¼¥¶¡¼¤Î¥Ñ¥¹¥ï¡¼¥É¤òÊѹ¹¤¹¤ë¤¿¤á¤Ë¤Ï, mysql ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ËÂФ·¤Æ update ¤Îµö²Ä¤¬¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó." + kor "´ç½ÅÀº ´Ù¸¥»ç¿ëÀÚµéÀÇ ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ÀÖµµ·Ï µ¥ÀÌŸº£À̽º º¯°æ±ÇÇÑÀ» °¡Á®¾ß ÇÕ´Ï´Ù." + por "Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros" + rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora" + rus "äÌÑ ÔÏÇÏ ÞÔÏÂÙ ÉÚÍÅÎÑÔØ ÐÁÒÏÌÉ ÄÒÕÇÉÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ, Õ ×ÁÓ ÄÏÌÖÎÙ ÂÙÔØ ÐÒÉ×ÉÌÅÇÉÉ ÎÁ ÉÚÍÅÎÅÎÉÅ ÔÁÂÌÉÃ × ÂÁÚÅ ÄÁÎÎÙÈ mysql" + serbian "Morate imati privilegije da možete da update-ujete odreðene tabele ako želite da menjate lozinke za druge korisnike" + spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros" + swe "För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen" + ukr "÷É ÐÏ×ÉΦ ÍÁÔÉ ÐÒÁ×Ï ÎÁ ÏÎÏ×ÌÅÎÎÑ ÔÁÂÌÉÃØ Õ ÂÁÚ¦ ÄÁÎÎÉÈ mysql, ÁÂÉ ÍÁÔÉ ÍÏÖÌÉצÓÔØ ÚͦÎÀ×ÁÔÉ ÐÁÒÏÌØ ¦ÎÛÉÍ" +ER_PASSWORD_NO_MATCH 42000 + cze "V tabulce user nen-Bí ¾ádný odpovídající øádek" + dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen" + nla "Kan geen enkele passende rij vinden in de gebruikers tabel" + eng "Can't find any matching row in the user table" + est "Ei leia vastavat kirjet kasutajate tabelis" + fre "Impossible de trouver un enregistrement correspondant dans la table user" + ger "Kann keinen passenden Datensatz in Tabelle 'user' finden" + greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò áíôßóôïé÷çò åããñáöÞò óôïí ðßíáêá ôùí ÷ñçóôþí" + hun "Nincs megegyezo sor a user tablaban" + ita "Impossibile trovare la riga corrispondente nella tabella user" + kor "»ç¿ëÀÚ Å×ÀÌºí¿¡¼ ÀÏÄ¡ÇÏ´Â °ÍÀ» ãÀ» ¼ö ¾øÀ¾´Ï´Ù." + por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)" + rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÄÈÏÄÑÝÕÀ ÚÁÐÉÓØ × ÔÁÂÌÉÃÅ ÐÏÌØÚÏ×ÁÔÅÌÅÊ" + serbian "Ne mogu da pronaðem odgovarajuæi slog u 'user' tabeli" + spa "No puedo encontrar una línea correponsdiente en la tabla user" + swe "Hittade inte användaren i 'user'-tabellen" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ×¦ÄÐÏצÄÎÉÈ ÚÁÐÉÓ¦× Õ ÔÁÂÌÉæ ËÏÒÉÓÔÕ×ÁÞÁ" +ER_UPDATE_INFO + cze "Nalezen-Bých øádkù: %ld Zmìnìno: %ld Varování: %ld" + dan "Poster fundet: %ld Ændret: %ld Advarsler: %ld" + nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld" + eng "Rows matched: %ld Changed: %ld Warnings: %ld" + jps "ˆê’v”(Rows matched): %ld •ÏX: %ld Warnings: %ld", + est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld" + fre "Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld" + ger "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld" + hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld" + ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld" + jpn "°ìÃ׿ô(Rows matched): %ld Êѹ¹: %ld Warnings: %ld" + kor "ÀÏÄ¡ÇÏ´Â Rows : %ld°³ º¯°æµÊ: %ld°³ °æ°í: %ld°³" + por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld" + rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld" + rus "óÏ×ÐÁÌÏ ÚÁÐÉÓÅÊ: %ld éÚÍÅÎÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld" + serbian "Odgovarajuæih slogova: %ld Promenjeno: %ld Upozorenja: %ld" + spa "Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld" + swe "Rader: %ld Uppdaterade: %ld Varningar: %ld" + ukr "úÁÐÉÓ¦× ×¦ÄÐÏצÄÁ¤: %ld úͦÎÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld" +ER_CANT_CREATE_THREAD + cze "Nemohu vytvo-Bøit nový thread (errno %d). Pokud je je¹tì nìjaká volná pamì», podívejte se do manuálu na èást o chybách specifických pro jednotlivé operaèní systémy" + dan "Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl" + nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout" + eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug" + jps "V‹K‚ɃXƒŒƒbƒh‚ªì‚ê‚Ü‚¹‚ñ‚Å‚µ‚½ (errno %d). ‚à‚µÅ‘åŽg—p‹–‰Âƒƒ‚ƒŠ[”‚ð‰z‚¦‚Ä‚¢‚È‚¢‚̂ɃGƒ‰[‚ª”¶‚µ‚Ä‚¢‚é‚È‚ç, ƒ}ƒjƒ…ƒAƒ‹‚Ì’†‚©‚ç 'possible OS-dependent bug' ‚Æ‚¢‚¤•¶Žš‚ð’T‚µ‚Ä‚‚݂Ă¾‚³‚¢.", + est "Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga" + fre "Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS" + ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen" + hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet" + ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO" + jpn "¿·µ¬¤Ë¥¹¥ì¥Ã¥É¤¬ºî¤ì¤Þ¤»¤ó¤Ç¤·¤¿ (errno %d). ¤â¤·ºÇÂç»ÈÍѵö²Ä¥á¥â¥ê¡¼¿ô¤ò±Û¤¨¤Æ¤¤¤Ê¤¤¤Î¤Ë¥¨¥é¡¼¤¬È¯À¸¤·¤Æ¤¤¤ë¤Ê¤é, ¥Þ¥Ë¥å¥¢¥ë¤ÎÃæ¤«¤é 'possible OS-dependent bug' ¤È¤¤¤¦Ê¸»ú¤òõ¤·¤Æ¤¯¤ß¤Æ¤À¤µ¤¤." + kor "»õ·Î¿î ¾²·¹µå¸¦ ¸¸µé ¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£ %d). ¸¸¾à ¿©À¯¸Þ¸ð¸®°¡ ÀÖ´Ù¸é OS-dependent¹ö±× ÀÇ ¸Þ´º¾ó ºÎºÐÀ» ã¾Æº¸½Ã¿À." + nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + por "Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional" + rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÎÏ×ÙÊ ÐÏÔÏË (ÏÛÉÂËÁ %d). åÓÌÉ ÜÔÏ ÎÅ ÓÉÔÕÁÃÉÑ, Ó×ÑÚÁÎÎÁÑ Ó ÎÅÈ×ÁÔËÏÊ ÐÁÍÑÔÉ, ÔÏ ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÕÞÉÔØ ÄÏËÕÍÅÎÔÁÃÉÀ ÎÁ ÐÒÅÄÍÅÔ ÏÐÉÓÁÎÉÑ ×ÏÚÍÏÖÎÏÊ ÏÛÉÂËÉ ÒÁÂÏÔÙ × ËÏÎËÒÅÔÎÏÊ ïó" + serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate još slobodne memorije, trebali biste da pogledate u priruèniku da li je ovo specifièna greška vašeg operativnog sistema" + spa "No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO" + swe "Kan inte skapa en ny tråd (errno %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÎÏ×Õ Ç¦ÌËÕ (ÐÏÍÉÌËÁ %d). ñËÝÏ ×É ÎÅ ×ÉËÏÒÉÓÔÁÌÉ ÕÓÀ ÐÁÍ'ÑÔØ, ÔÏ ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÄÏ ×ÁÛϧ ïó - ÍÏÖÌÉ×Ï ÃÅ ÐÏÍÉÌËÁ ïó" +ER_WRONG_VALUE_COUNT_ON_ROW 21S01 + cze "Po-Bèet sloupcù neodpovídá poètu hodnot na øádku %ld" + dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %ld" + nla "Kolom aantal komt niet overeen met waarde aantal in rij %ld" + eng "Column count doesn't match value count at row %ld" + est "Tulpade hulk erineb väärtuste hulgast real %ld" + ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %ld überein" + hun "Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel" + ita "Il numero delle colonne non corrisponde al conteggio alla riga %ld" + kor "Row %ld¿¡¼ Ä®·³ Ä«¿îÆ®¿Í value Ä«¿îÅÍ¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù." + por "Contagem de colunas não confere com a contagem de valores na linha %ld" + rum "Numarul de coloane nu corespunde cu numarul de valori la linia %ld" + rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ × ÚÁÐÉÓÉ %ld" + serbian "Broj kolona ne odgovara broju vrednosti u slogu %ld" + spa "El número de columnas no corresponde al número en la línea %ld" + swe "Antalet kolumner motsvarar inte antalet värden på rad: %ld" + ukr "ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ Õ ÓÔÒÏæ %ld" +ER_CANT_REOPEN_TABLE + cze "Nemohu znovuotev-Bøít tabulku: '%-.64s" + dan "Kan ikke genåbne tabel '%-.64s" + nla "Kan tabel niet opnieuw openen: '%-.64s" + eng "Can't reopen table: '%-.64s'" + est "Ei suuda taasavada tabelit '%-.64s'" + fre "Impossible de réouvrir la table: '%-.64s" + ger "Kann Tabelle'%-.64s' nicht erneut öffnen" + hun "Nem lehet ujra-megnyitni a tablat: '%-.64s" + ita "Impossibile riaprire la tabella: '%-.64s'" + kor "Å×À̺íÀ» ´Ù½Ã ¿¼ö ¾ø±º¿ä: '%-.64s" + nor "Can't reopen table: '%-.64s" + norwegian-ny "Can't reopen table: '%-.64s" + pol "Can't reopen table: '%-.64s" + por "Não pode reabrir a tabela '%-.64s" + rum "Nu pot redeschide tabela: '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.64s'" + serbian "Ne mogu da ponovo otvorim tabelu '%-.64s'" + slo "Can't reopen table: '%-.64s" + spa "No puedo reabrir tabla: '%-.64s" + swe "Kunde inte stänga och öppna tabell '%-.64s" + ukr "îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.64s'" +ER_INVALID_USE_OF_NULL 22004 + cze "Neplatn-Bé u¾ití hodnoty NULL" + dan "Forkert brug af nulværdi (NULL)" + nla "Foutief gebruik van de NULL waarde" + eng "Invalid use of NULL value" + jps "NULL ’l‚ÌŽg—p•û–@‚ª•s“K؂ł·", + est "NULL väärtuse väärkasutus" + fre "Utilisation incorrecte de la valeur NULL" + ger "Unerlaubte Verwendung eines NULL-Werts" + hun "A NULL ervenytelen hasznalata" + ita "Uso scorretto del valore NULL" + jpn "NULL ÃͤλÈÍÑÊýË¡¤¬ÉÔŬÀڤǤ¹" + kor "NULL °ªÀ» À߸ø »ç¿ëÇϼ̱º¿ä..." + por "Uso inválido do valor NULL" + rum "Folosirea unei value NULL e invalida" + rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ×ÅÌÉÞÉÎÙ NULL" + serbian "Pogrešna upotreba vrednosti NULL" + spa "Invalido uso de valor NULL" + swe "Felaktig använding av NULL" + ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÚÎÁÞÅÎÎÑ NULL" +ER_REGEXP_ERROR 42000 + cze "Regul-Bární výraz vrátil chybu '%-.64s'" + dan "Fik fejl '%-.64s' fra regexp" + nla "Fout '%-.64s' ontvangen van regexp" + eng "Got error '%-.64s' from regexp" + est "regexp tagastas vea '%-.64s'" + fre "Erreur '%-.64s' provenant de regexp" + ger "regexp lieferte Fehler '%-.64s'" + hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)" + ita "Errore '%-.64s' da regexp" + kor "regexp¿¡¼ '%-.64s'°¡ ³µ½À´Ï´Ù." + por "Obteve erro '%-.64s' em regexp" + rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ '%-.64s' ÏÔ ÒÅÇÕÌÑÒÎÏÇÏ ×ÙÒÁÖÅÎÉÑ" + serbian "Funkcija regexp je vratila grešku '%-.64s'" + spa "Obtenido error '%-.64s' de regexp" + swe "Fick fel '%-.64s' från REGEXP" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ '%-.64s' ×¦Ä ÒÅÇÕÌÑÒÎÏÇÏ ×ÉÒÁÚÕ" +ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 + cze "Pokud nen-Bí ¾ádná GROUP BY klauzule, není dovoleno souèasné pou¾ití GROUP polo¾ek (MIN(),MAX(),COUNT()...) s ne GROUP polo¾kami" + dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat" + nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is" + eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause" + est "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud" + fre "Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY" + ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulässig, wenn keine GROUP-BY-Klausel vorhanden ist" + hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul" + ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY" + kor "Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT(),...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause" + por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)" + rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY" + rus "ïÄÎÏ×ÒÅÍÅÎÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÈ (GROUP) ÓÔÏÌÂÃÏ× (MIN(),MAX(),COUNT(),...) Ó ÎÅÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÍÉ ÓÔÏÌÂÃÁÍÉ Ñ×ÌÑÅÔÓÑ ÎÅËÏÒÒÅËÔÎÙÍ, ÅÓÌÉ × ×ÙÒÁÖÅÎÉÉ ÅÓÔØ GROUP BY" + serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz" + spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY" + swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del" + ukr "úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY" +ER_NONEXISTING_GRANT 42000 + cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.64s' na stroji '%-.64s'" + dan "Denne tilladelse findes ikke for brugeren '%-.64s' på vært '%-.64s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.64s' op host '%-.64s'" + eng "There is no such grant defined for user '%-.64s' on host '%-.64s'" + jps "ƒ†[ƒU[ '%-.64s' (ƒzƒXƒg '%-.64s' ‚̃†[ƒU[) ‚Í‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "Sellist õigust ei ole defineeritud kasutajale '%-.64s' masinast '%-.64s'" + fre "Un tel droit n'est pas défini pour l'utilisateur '%-.64s' sur l'hôte '%-.64s'" + ger "Für Benutzer '%-.64s' auf Host '%-.64s' gibt es keine solche Berechtigung" + hun "A '%-.64s' felhasznalonak nincs ilyen joga a '%-.64s' host-on" + ita "GRANT non definita per l'utente '%-.64s' dalla macchina '%-.64s'" + jpn "¥æ¡¼¥¶¡¼ '%-.64s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "»ç¿ëÀÚ '%-.64s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù." + por "Não existe tal permissão (grant) definida para o usuário '%-.64s' no 'host' '%-.64s'" + rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.64s' de pe host-ul '%-.64s'" + rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.64s' ÎÁ ÈÏÓÔÅ '%-.64s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.64s' na host-u '%-.64s'" + spa "No existe permiso definido para usuario '%-.64s' en el servidor '%-.64s'" + swe "Det finns inget privilegium definierat för användare '%-.64s' på '%-.64s'" + ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.64s' Ú ÈÏÓÔÕ '%-.64s'" +ER_TABLEACCESS_DENIED_ERROR 42000 + cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'" + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'" + jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s' ,ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'" + fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'" + ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' auf Tabelle '%-.64s'" + hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'" + jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'" + por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'" + rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'" + spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'" + swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'" + ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'" +ER_COLUMNACCESS_DENIED_ERROR 42000 + cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'" + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'" + jps "ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s'\n ƒJƒ‰ƒ€ '%-.64s' ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", + est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'" + fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'" + ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Feld '%-.64s' in Tabelle '%-.64s'" + hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'" + jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'" + por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'" + rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'" + spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'" + swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'" + ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'" +ER_ILLEGAL_GRANT_FOR_TABLE 42000 + cze "Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít." + dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres." + nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden." + eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used" + est "Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga" + fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel." + ger "Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt" + greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used." + hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek" + ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati." + jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + kor "À߸øµÈ GRANT/REVOKE ¸í·É. ¾î¶² ±Ç¸®¿Í ½ÂÀÎÀÌ »ç¿ëµÇ¾î Áú ¼ö ÀÖ´ÂÁö ¸Þ´º¾óÀ» º¸½Ã¿À." + nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados." + rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite." + rus "îÅ×ÅÒÎÁÑ ËÏÍÁÎÄÁ GRANT ÉÌÉ REVOKE. ïÂÒÁÔÉÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ, ÞÔÏÂÙ ×ÙÑÓÎÉÔØ, ËÁËÉÅ ÐÒÉ×ÉÌÅÇÉÉ ÍÏÖÎÏ ÉÓÐÏÌØÚÏ×ÁÔØ" + serbian "Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruèniku koje vrednosti mogu biti upotrebljene." + slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados." + swe "Felaktigt GRANT-privilegium använt" + ukr "èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ; ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ" +ER_GRANT_WRONG_HOST_OR_USER 42000 + cze "Argument p-Bøíkazu GRANT u¾ivatel nebo stroj je pøíli¹ dlouhý" + dan "Værts- eller brugernavn for langt til GRANT" + nla "De host of gebruiker parameter voor GRANT is te lang" + eng "The host or user argument to GRANT is too long" + est "Masina või kasutaja nimi GRANT lauses on liiga pikk" + fre "L'hôte ou l'utilisateur donné en argument à GRANT est trop long" + ger "Das Host- oder User-Argument für GRANT ist zu lang" + hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban" + ita "L'argomento host o utente per la GRANT e` troppo lungo" + kor "½ÂÀÎ(GRANT)À» À§ÇÏ¿© »ç¿ëÇÑ »ç¿ëÀÚ³ª È£½ºÆ®ÀÇ °ªµéÀÌ ³Ê¹« ±é´Ï´Ù." + por "Argumento de 'host' ou de usuário para o GRANT é longo demais" + rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung" + rus "óÌÉÛËÏÍ ÄÌÉÎÎÏÅ ÉÍÑ ÐÏÌØÚÏ×ÁÔÅÌÑ/ÈÏÓÔÁ ÄÌÑ GRANT" + serbian "Argument 'host' ili 'korisnik' prosleðen komandi 'GRANT' je predugaèak" + spa "El argumento para servidor o usuario para GRANT es demasiado grande" + swe "Felaktigt maskinnamn eller användarnamn använt med GRANT" + ukr "áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ" +ER_NO_SUCH_TABLE 42S02 + cze "Tabulka '%-.64s.%-.64s' neexistuje" + dan "Tabellen '%-.64s.%-.64s' eksisterer ikke" + nla "Tabel '%-.64s.%-.64s' bestaat niet" + eng "Table '%-.64s.%-.64s' doesn't exist" + est "Tabelit '%-.64s.%-.64s' ei eksisteeri" + fre "La table '%-.64s.%-.64s' n'existe pas" + ger "Tabelle '%-.64s.%-.64s' existiert nicht" + hun "A '%-.64s.%-.64s' tabla nem letezik" + ita "La tabella '%-.64s.%-.64s' non esiste" + jpn "Table '%-.64s.%-.64s' doesn't exist" + kor "Å×À̺í '%-.64s.%-.64s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Table '%-.64s.%-.64s' doesn't exist" + norwegian-ny "Table '%-.64s.%-.64s' doesn't exist" + pol "Table '%-.64s.%-.64s' doesn't exist" + por "Tabela '%-.64s.%-.64s' não existe" + rum "Tabela '%-.64s.%-.64s' nu exista" + rus "ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Tabela '%-.64s.%-.64s' ne postoji" + slo "Table '%-.64s.%-.64s' doesn't exist" + spa "Tabla '%-.64s.%-.64s' no existe" + swe "Det finns ingen tabell som heter '%-.64s.%-.64s'" + ukr "ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤" +ER_NONEXISTING_TABLE_GRANT 42000 + cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'" + dan "Denne tilladelse eksisterer ikke for brugeren '%-.32s' på vært '%-.64s' for tabellen '%-.64s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'" + eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'" + est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'" + fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s' sur la table '%-.64s'" + ger "Eine solche Berechtigung ist für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s' nicht definiert" + hun "A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett" + ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'" + kor "»ç¿ëÀÚ '%-.32s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.64s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. " + por "Não existe tal permissão (grant) definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'" + rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'" + rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'" + spa "No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'" + swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s' för tabell '%-.64s'" + ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'" +ER_NOT_ALLOWED_COMMAND 42000 + cze "Pou-B¾itý pøíkaz není v této verzi MySQL povolen" + dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL" + nla "Het used commando is niet toegestaan in deze MySQL versie" + eng "The used command is not allowed with this MySQL version" + est "Antud käsk ei ole lubatud käesolevas MySQL versioonis" + fre "Cette commande n'existe pas dans cette version de MySQL" + ger "Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig" + hun "A hasznalt parancs nem engedelyezett ebben a MySQL verzioban" + ita "Il comando utilizzato non e` supportato in questa versione di MySQL" + kor "»ç¿ëµÈ ¸í·ÉÀº ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â ÀÌ¿ëµÇÁö ¾Ê½À´Ï´Ù." + por "Comando usado não é permitido para esta versão do MySQL" + rum "Comanda folosita nu este permisa pentru aceasta versiune de MySQL" + rus "üÔÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÐÕÓËÁÅÔÓÑ × ÄÁÎÎÏÊ ×ÅÒÓÉÉ MySQL" + serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera" + spa "El comando usado no es permitido con esta versión de MySQL" + swe "Du kan inte använda detta kommando med denna MySQL version" + ukr "÷ÉËÏÒÉÓÔÏ×Õ×ÁÎÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL" +ER_SYNTAX_ERROR 42000 + cze "Va-B¹e syntaxe je nìjaká divná" + dan "Der er en fejl i SQL syntaksen" + nla "Er is iets fout in de gebruikte syntax" + eng "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use" + est "Viga SQL süntaksis" + fre "Erreur de syntaxe" + ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen" + greek "You have an error in your SQL syntax" + hun "Szintaktikai hiba" + ita "Errore di sintassi nella query SQL" + jpn "Something is wrong in your syntax" + kor "SQL ±¸¹®¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù." + nor "Something is wrong in your syntax" + norwegian-ny "Something is wrong in your syntax" + pol "Something is wrong in your syntax" + por "Você tem um erro de sintaxe no seu SQL" + rum "Aveti o eroare in sintaxa RSQL" + rus "õ ×ÁÓ ÏÛÉÂËÁ × ÚÁÐÒÏÓÅ. éÚÕÞÉÔÅ ÄÏËÕÍÅÎÔÁÃÉÀ ÐÏ ÉÓÐÏÌØÚÕÅÍÏÊ ×ÅÒÓÉÉ MySQL ÎÁ ÐÒÅÄÍÅÔ ËÏÒÒÅËÔÎÏÇÏ ÓÉÎÔÁËÓÉÓÁ" + serbian "Imate grešku u vašoj SQL sintaksi" + slo "Something is wrong in your syntax" + spa "Algo está equivocado en su sintax" + swe "Du har något fel i din syntax" + ukr "õ ×ÁÓ ÐÏÍÉÌËÁ Õ ÓÉÎÔÁËÓÉÓ¦ SQL" +ER_DELAYED_CANT_CHANGE_LOCK + cze "Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.64s" + dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.64s" + nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s" + eng "Delayed insert thread couldn't get requested lock for table %-.64s" + est "INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.64s" + fre "La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.64s" + ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten" + hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz" + ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s" + kor "Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.64sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù." + por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.64s'" + rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s" + rus "ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.64s" + serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'" + spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.64s" + swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.64s'" + ukr "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s" +ER_TOO_MANY_DELAYED_THREADS + cze "P-Bøíli¹ mnoho zpo¾dìných threadù" + dan "For mange slettede tråde (threads) i brug" + nla "Te veel 'delayed' threads in gebruik" + eng "Too many delayed threads in use" + est "Liiga palju DELAYED lõimesid kasutusel" + fre "Trop de tâche 'delayed' en cours" + ger "Zu viele verzögerte (DELAYED) Threads in Verwendung" + hun "Tul sok kesletetett thread (delayed)" + ita "Troppi threads ritardati in uso" + kor "³Ê¹« ¸¹Àº Áö¿¬ ¾²·¹µå¸¦ »ç¿ëÇϰí ÀÖ½À´Ï´Ù." + por "Excesso de 'threads' retardadas (atrasadas) em uso" + rum "Prea multe threaduri aminate care sint in uz" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÐÏÔÏËÏ×, ÏÂÓÌÕÖÉ×ÁÀÝÉÈ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert)" + serbian "Previše prolongiranih thread-ova je u upotrebi" + spa "Muchos threads retardados en uso" + swe "Det finns redan 'max_delayed_threads' trådar i använding" + ukr "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ" +ER_ABORTING_CONNECTION 08S01 + cze "Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' (%-.64s)" + dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.32s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' (%-.64s)" + eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + est "Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)" + fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' (%-.64s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s' (%-.64s)" + hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.32s' (%-.64s)" + ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.32s' (%-.64s)" + jpn "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.32s' (%-.64s)" + nor "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + pol "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + por "Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)" + rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)" + rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)" + slo "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + spa "Conexión abortada %ld para db: '%-.64s' usuario: '%-.32s' (%-.64s)" + swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s' (%-.64s)" + ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)" +ER_NET_PACKET_TOO_LARGE 08S01 + cze "Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'" + dan "Modtog en datapakke som var større end 'max_allowed_packet'" + nla "Groter pakket ontvangen dan 'max_allowed_packet'" + eng "Got a packet bigger than 'max_allowed_packet' bytes" + est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga" + fre "Paquet plus grand que 'max_allowed_packet' reçu" + ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes" + hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'" + ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'" + kor "'max_allowed_packet'º¸´Ù ´õÅ« ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù." + por "Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)" + rum "Un packet mai mare decit 'max_allowed_packet' a fost primit" + rus "ðÏÌÕÞÅÎÎÙÊ ÐÁËÅÔ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'" + serbian "Primio sam mrežni paket veæi od definisane vrednosti 'max_allowed_packet'" + spa "Obtenido un paquete mayor que 'max_allowed_packet'" + swe "Kommunkationspaketet är större än 'max_allowed_packet'" + ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö max_allowed_packet" +ER_NET_READ_ERROR_FROM_PIPE 08S01 + cze "Zji-B¹tìna chyba pøi ètení z roury spojení" + dan "Fik læsefejl fra forbindelse (connection pipe)" + nla "Kreeg leesfout van de verbindings pipe" + eng "Got a read error from the connection pipe" + est "Viga ühendustoru lugemisel" + fre "Erreur de lecture reçue du pipe de connection" + ger "Lese-Fehler bei einer Verbindungs-Pipe" + hun "Olvasasi hiba a kapcsolat soran" + ita "Rilevato un errore di lettura dalla pipe di connessione" + kor "¿¬°á ÆÄÀÌÇÁ·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro de leitura no 'pipe' da conexão" + rum "Eroare la citire din cauza lui 'connection pipe'" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ ÏÔ ÐÏÔÏËÁ ÓÏÅÄÉÎÅÎÉÑ (connection pipe)" + serbian "Greška pri èitanju podataka sa pipe-a" + spa "Obtenido un error de lectura de la conexión pipe" + swe "Fick läsfel från klienten vid läsning från 'PIPE'" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ" +ER_NET_FCNTL_ERROR 08S01 + cze "Zji-B¹tìna chyba fcntl()" + dan "Fik fejlmeddelelse fra fcntl()" + nla "Kreeg fout van fcntl()" + eng "Got an error from fcntl()" + est "fcntl() tagastas vea" + fre "Erreur reçue de fcntl() " + ger "fcntl() lieferte einen Fehler" + hun "Hiba a fcntl() fuggvenyben" + ita "Rilevato un errore da fcntl()" + kor "fcntl() ÇÔ¼ö·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro em fcntl()" + rum "Eroare obtinuta de la fcntl()" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÏÔ fcntl()" + serbian "Greška pri izvršavanju funkcije fcntl()" + spa "Obtenido un error de fcntl()" + swe "Fick fatalt fel från 'fcntl()'" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()" +ER_NET_PACKETS_OUT_OF_ORDER 08S01 + cze "P-Bøíchozí packety v chybném poøadí" + dan "Modtog ikke datapakker i korrekt rækkefølge" + nla "Pakketten in verkeerde volgorde ontvangen" + eng "Got packets out of order" + est "Paketid saabusid vales järjekorras" + fre "Paquets reçus dans le désordre" + ger "Pakete nicht in der richtigen Reihenfolge empfangen" + hun "Helytelen sorrendben erkezett adatcsomagok" + ita "Ricevuti pacchetti non in ordine" + kor "¼ø¼°¡ ¸ÂÁö¾Ê´Â ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù." + por "Obteve pacotes fora de ordem" + rum "Packets care nu sint ordonati au fost gasiti" + rus "ðÁËÅÔÙ ÐÏÌÕÞÅÎÙ × ÎÅ×ÅÒÎÏÍ ÐÏÒÑÄËÅ" + serbian "Primio sam mrežne pakete van reda" + spa "Obtenido paquetes desordenados" + swe "Kommunikationspaketen kom i fel ordning" + ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ" +ER_NET_UNCOMPRESS_ERROR 08S01 + cze "Nemohu rozkomprimovat komunika-Bèní packet" + dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)" + nla "Communicatiepakket kon niet worden gedecomprimeerd" + eng "Couldn't uncompress communication packet" + est "Viga andmepaketi lahtipakkimisel" + fre "Impossible de décompresser le paquet reçu" + ger "Kommunikationspaket lässt sich nicht entpacken" + hun "A kommunikacios adatcsomagok nem tomorithetok ki" + ita "Impossibile scompattare i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀÇ ¾ÐÃàÇØÁ¦¸¦ ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù." + por "Não conseguiu descomprimir pacote de comunicação" + rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)" + rus "îÅ×ÏÚÍÏÖÎÏ ÒÁÓÐÁËÏ×ÁÔØ ÐÁËÅÔ, ÐÏÌÕÞÅÎÎÙÊ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ" + serbian "Ne mogu da dekompresujem mrežne pakete" + spa "No puedo descomprimir paquetes de comunicación" + swe "Kunde inte packa up kommunikationspaketet" + ukr "îÅ ÍÏÖÕ ÄÅËÏÍÐÒÅÓÕ×ÁÔÉ ËÏÍÕΦËÁæÊÎÉÊ ÐÁËÅÔ" +ER_NET_READ_ERROR 08S01 + cze "Zji-B¹tìna chyba pøi ètení komunikaèního packetu" + dan "Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)" + nla "Fout bij het lezen van communicatiepakketten" + eng "Got an error reading communication packets" + est "Viga andmepaketi lugemisel" + fre "Erreur de lecture des paquets reçus" + ger "Fehler beim Lesen eines Kommunikationspakets" + hun "HIba a kommunikacios adatcsomagok olvasasa soran" + ita "Rilevato un errore ricevendo i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀ» Àд Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro na leitura de pacotes de comunicação" + rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Greška pri primanju mrežnih paketa" + spa "Obtenido un error leyendo paquetes de comunicación" + swe "Fick ett fel vid läsning från klienten" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_NET_READ_INTERRUPTED 08S01 + cze "Zji-B¹tìn timeout pøi ètení komunikaèního packetu" + dan "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)" + nla "Timeout bij het lezen van communicatiepakketten" + eng "Got timeout reading communication packets" + est "Kontrollaja ületamine andmepakettide lugemisel" + fre "Timeout en lecture des paquets reçus" + ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets" + hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran" + ita "Rilevato un timeout ricevendo i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀ» Àд Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação" + rum "Timeout obtinut citind pachetele de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Vremenski limit za èitanje mrežnih paketa je istekao" + spa "Obtenido timeout leyendo paquetes de comunicación" + swe "Fick 'timeout' vid läsning från klienten" + ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_NET_ERROR_ON_WRITE 08S01 + cze "Zji-B¹tìna chyba pøi zápisu komunikaèního packetu" + dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)" + nla "Fout bij het schrijven van communicatiepakketten" + eng "Got an error writing communication packets" + est "Viga andmepaketi kirjutamisel" + fre "Erreur d'écriture des paquets envoyés" + ger "Fehler beim Schreiben eines Kommunikationspakets" + hun "Hiba a kommunikacios csomagok irasa soran" + ita "Rilevato un errore inviando i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀ» ±â·ÏÇÏ´Â Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro na escrita de pacotes de comunicação" + rum "Eroare in scrierea pachetelor de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÐÒÉ ÐÅÒÅÄÁÞÅ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Greška pri slanju mrežnih paketa" + spa "Obtenido un error de escribiendo paquetes de comunicación" + swe "Fick ett fel vid skrivning till klienten" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_NET_WRITE_INTERRUPTED 08S01 + cze "Zji-B¹tìn timeout pøi zápisu komunikaèního packetu" + dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)" + nla "Timeout bij het schrijven van communicatiepakketten" + eng "Got timeout writing communication packets" + est "Kontrollaja ületamine andmepakettide kirjutamisel" + fre "Timeout d'écriture des paquets envoyés" + ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets" + hun "Idotullepes a kommunikacios csomagok irasa soran" + ita "Rilevato un timeout inviando i pacchetti di comunicazione" + kor "Åë½Å ÆÐÆÂÀ» ±â·ÏÇÏ´Â Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação" + rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ × ÐÒÏÃÅÓÓÅ ÐÅÒÅÄÁÞÉ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Vremenski limit za slanje mrežnih paketa je istekao" + spa "Obtenido timeout escribiendo paquetes de comunicación" + swe "Fick 'timeout' vid skrivning till klienten" + ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_TOO_LONG_STRING 42000 + cze "V-Býsledný øetìzec je del¹í ne¾ 'max_allowed_packet'" + dan "Strengen med resultater er større end 'max_allowed_packet'" + nla "Resultaat string is langer dan 'max_allowed_packet'" + eng "Result string is longer than 'max_allowed_packet' bytes" + est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga" + fre "La chaîne résultat est plus grande que 'max_allowed_packet'" + ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes" + hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'" + ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'" + por "'String' resultante é mais longa do que 'max_allowed_packet'" + rum "Sirul rezultat este mai lung decit 'max_allowed_packet'" + rus "òÅÚÕÌØÔÉÒÕÀÝÁÑ ÓÔÒÏËÁ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'" + serbian "Rezultujuèi string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'" + spa "La string resultante es mayor que max_allowed_packet" + swe "Resultatsträngen är längre än max_allowed_packet" + ukr "óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö max_allowed_packet" +ER_TABLE_CANT_HANDLE_BLOB 42000 + cze "Typ pou-B¾ité tabulky nepodporuje BLOB/TEXT sloupce" + dan "Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner" + nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen" + eng "The used table type doesn't support BLOB/TEXT columns" + est "Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju" + fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT" + ger "Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Felder" + hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket" + ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT" + por "Tipo de tabela usado não permite colunas BLOB/TEXT" + rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT" + rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÔÉÐÙ BLOB/TEXT" + serbian "Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'" + spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT" + swe "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ" +ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 + cze "Typ pou-B¾ité tabulky nepodporuje AUTO_INCREMENT sloupce" + dan "Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner" + nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen" + eng "The used table type doesn't support AUTO_INCREMENT columns" + est "Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju" + fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT" + ger "Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Felder" + hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket" + ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT" + por "Tipo de tabela usado não permite colunas AUTO_INCREMENT" + rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT" + rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÅ ÓÔÏÌÂÃÙ" + serbian "Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'" + spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT" + swe "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ" +ER_DELAYED_INSERT_TABLE_LOCKED + cze "INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES" + dan "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES" + nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES" + eng "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES" + est "INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES käsuga" + fre "INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES" + ger "INSERT DELAYED kann für Tabelle '%-.64s' nicht verwendet werden, da sie mit LOCK TABLES gesperrt ist" + greek "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + hun "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)" + ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'" + jpn "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + kor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + nor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + norwegian-ny "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + pol "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + por "INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque ela está travada com LOCK TABLES" + rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES" + rus "îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" + serbian "Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'" + slo "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + spa "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES" + swe "INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES" + ukr "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" +ER_WRONG_COLUMN_NAME 42000 + cze "Nespr-Bávné jméno sloupce '%-.100s'" + dan "Forkert kolonnenavn '%-.100s'" + nla "Incorrecte kolom naam '%-.100s'" + eng "Incorrect column name '%-.100s'" + est "Vigane tulba nimi '%-.100s'" + fre "Nom de colonne '%-.100s' incorrect" + ger "Falscher Spaltenname '%-.100s'" + hun "Ervenytelen mezonev: '%-.100s'" + ita "Nome colonna '%-.100s' non corretto" + por "Nome de coluna '%-.100s' incorreto" + rum "Nume increct de coloana '%-.100s'" + rus "îÅ×ÅÒÎÏÅ ÉÍÑ ÓÔÏÌÂÃÁ '%-.100s'" + serbian "Pogrešno ime kolone '%-.100s'" + spa "Incorrecto nombre de columna '%-.100s'" + swe "Felaktigt kolumnnamn '%-.100s'" + ukr "îÅצÒÎÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.100s'" +ER_WRONG_KEY_COLUMN 42000 + cze "Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.64s'" + dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'" + nla "De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren" + eng "The used storage engine can't index column '%-.64s'" + est "Tabelihandler ei oska indekseerida tulpa '%-.64s'" + fre "Le handler de la table ne peut indexé la colonne '%-.64s'" + ger "Die verwendete Speicher-Engine kann die Spalte '%-.64s' nicht indizieren" + greek "The used table handler can't index column '%-.64s'" + hun "A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni" + ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'" + jpn "The used table handler can't index column '%-.64s'" + kor "The used table handler can't index column '%-.64s'" + nor "The used table handler can't index column '%-.64s'" + norwegian-ny "The used table handler can't index column '%-.64s'" + pol "The used table handler can't index column '%-.64s'" + por "O manipulador de tabela usado não pode indexar a coluna '%-.64s'" + rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'" + rus "éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.64s'" + serbian "Handler tabele ne može da indeksira kolonu '%-.64s'" + slo "The used table handler can't index column '%-.64s'" + spa "El manipulador de tabla usado no puede indexar columna '%-.64s'" + swe "Den använda tabelltypen kan inte indexera kolumn '%-.64s'" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.64s'" +ER_WRONG_MRG_TABLE + cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì" + dan "Tabellerne i MERGE er ikke defineret ens" + nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities" + eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist" + est "Kõik tabelid MERGE tabeli määratluses ei ole identsed" + fre "Toutes les tables de la table de type MERGE n'ont pas la même définition" + ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert" + hun "A MERGE tablaban talalhato tablak definicioja nem azonos" + ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica" + jpn "All tables in the MERGE table are not defined identically" + kor "All tables in the MERGE table are not defined identically" + nor "All tables in the MERGE table are not defined identically" + norwegian-ny "All tables in the MERGE table are not defined identically" + pol "All tables in the MERGE table are not defined identically" + por "Todas as tabelas contidas na tabela fundida (MERGE) não estão definidas identicamente" + rum "Toate tabelele din tabela MERGE nu sint definite identic" + rus "îÅ ×ÓÅ ÔÁÂÌÉÃÙ × MERGE ÏÐÒÅÄÅÌÅÎÙ ÏÄÉÎÁËÏ×Ï" + serbian "Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti naèin" + slo "All tables in the MERGE table are not defined identically" + spa "Todas las tablas en la MERGE tabla no estan definidas identicamente" + swe "Tabellerna i MERGE-tabellen är inte identiskt definierade" + ukr "ôÁÂÌÉæ Õ MERGE TABLE ÍÁÀÔØ Ò¦ÚÎÕ ÓÔÒÕËÔÕÒÕ" +ER_DUP_UNIQUE 23000 + cze "Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.64s'" + dan "Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler" + nla "Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking" + eng "Can't write, because of unique constraint, to table '%-.64s'" + est "Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub ühesuse kitsendust" + fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.64s'" + ger "Schreiben in Tabelle '%-.64s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)" + hun "A '%-.64s' nem irhato, az egyedi mezok miatt" + ita "Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`" + por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'" + rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.64s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ" + serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'" + spa "No puedo escribir, debido al único constraint, para tabla '%-.64s'" + swe "Kan inte skriva till tabell '%-.64s'; UNIQUE-test" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.64s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦" +ER_BLOB_KEY_WITHOUT_LENGTH 42000 + cze "BLOB sloupec '%-.64s' je pou-B¾it ve specifikaci klíèe bez délky" + dan "BLOB kolonnen '%-.64s' brugt i nøglespecifikation uden nøglelængde" + nla "BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte" + eng "BLOB/TEXT column '%-.64s' used in key specification without a key length" + est "BLOB-tüüpi tulp '%-.64s' on kasutusel võtmes ilma pikkust määratlemata" + fre "La colonne '%-.64s' de type BLOB est utilisée dans une définition d'index sans longueur d'index" + ger "BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet" + greek "BLOB column '%-.64s' used in key specification without a key length" + hun "BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul" + ita "La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza" + jpn "BLOB column '%-.64s' used in key specification without a key length" + kor "BLOB column '%-.64s' used in key specification without a key length" + nor "BLOB column '%-.64s' used in key specification without a key length" + norwegian-ny "BLOB column '%-.64s' used in key specification without a key length" + pol "BLOB column '%-.64s' used in key specification without a key length" + por "Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave" + rum "Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita" + rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ" + serbian "BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa" + slo "BLOB column '%-.64s' used in key specification without a key length" + spa "Columna BLOB column '%-.64s' usada en especificación de clave sin tamaño de la clave" + swe "Du har inte angett någon nyckellängd för BLOB '%-.64s'" + ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ" +ER_PRIMARY_CANT_HAVE_NULL 42000 + cze "V-B¹echny èásti primárního klíèe musejí být NOT NULL; pokud potøebujete NULL, pou¾ijte UNIQUE" + dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet" + nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken" + eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead" + est "Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit" + fre "Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE" + ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden" + hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot" + ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE" + por "Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar" + rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb" + rus "÷ÓÅ ÞÁÓÔÉ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ (PRIMARY KEY) ÄÏÌÖÎÙ ÂÙÔØ ÏÐÒÅÄÅÌÅÎÙ ËÁË NOT NULL; åÓÌÉ ×ÁÍ ÎÕÖÎÁ ÐÏÄÄÅÒÖËÁ ×ÅÌÉÞÉÎ NULL × ËÌÀÞÅ, ×ÏÓÐÏÌØÚÕÊÔÅÓØ ÉÎÄÅËÓÏÍ UNIQUE" + serbian "Svi delovi primarnog kljuèa moraju biti razlièiti od NULL; Ako Vam ipak treba NULL vrednost u kljuèu, upotrebite 'UNIQUE'" + spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE" + swe "Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället" + ukr "õÓ¦ ÞÁÓÔÉÎÉ PRIMARY KEY ÐÏ×ÉÎΦ ÂÕÔÉ NOT NULL; ñËÝÏ ×É ÐÏÔÒÅÂÕ¤ÔÅ NULL Õ ËÌÀÞ¦, ÓËÏÒÉÓÔÁÊÔÅÓÑ UNIQUE" +ER_TOO_MANY_ROWS 42000 + cze "V-Býsledek obsahuje více ne¾ jeden øádek" + dan "Resultatet bestod af mere end een række" + nla "Resultaat bevatte meer dan een rij" + eng "Result consisted of more than one row" + est "Tulemis oli rohkem kui üks kirje" + fre "Le résultat contient plus d'un enregistrement" + ger "Ergebnis besteht aus mehr als einer Zeile" + hun "Az eredmeny tobb, mint egy sort tartalmaz" + ita "Il risultato consiste di piu` di una riga" + por "O resultado consistiu em mais do que uma linha" + rum "Resultatul constista din mai multe linii" + rus "÷ ÒÅÚÕÌØÔÁÔÅ ×ÏÚ×ÒÁÝÅÎÁ ÂÏÌÅÅ ÞÅÍ ÏÄÎÁ ÓÔÒÏËÁ" + serbian "Rezultat je saèinjen od više slogova" + spa "Resultado compuesto de mas que una línea" + swe "Resultet bestod av mera än en rad" + ukr "òÅÚÕÌØÔÁÔ ÚÎÁÈÏÄÉÔØÓÑ Õ Â¦ÌØÛÅ Î¦Ö ÏÄÎ¦Ê ÓÔÒÏæ" +ER_REQUIRES_PRIMARY_KEY 42000 + cze "Tento typ tabulky vy-B¾aduje primární klíè" + dan "Denne tabeltype kræver en primærnøgle" + nla "Dit tabel type heeft een primaire zoeksleutel nodig" + eng "This table type requires a primary key" + est "Antud tabelitüüp nõuab primaarset võtit" + fre "Ce type de table nécessite une clé primaire (PRIMARY KEY)" + ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)" + hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo" + ita "Questo tipo di tabella richiede una chiave primaria" + por "Este tipo de tabela requer uma chave primária" + rum "Aceast tip de tabela are nevoie de o cheie primara" + rus "üÔÏÔ ÔÉÐ ÔÁÂÌÉÃÙ ÔÒÅÂÕÅÔ ÏÐÒÅÄÅÌÅÎÉÑ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ" + serbian "Ovaj tip tabele zahteva da imate definisan primarni kljuè" + spa "Este tipo de tabla necesita de una primary key" + swe "Denna tabelltyp kräver en PRIMARY KEY" + ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ÐÏÔÒÅÂÕ¤ ÐÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ" +ER_NO_RAID_COMPILED + cze "Tato verze MySQL nen-Bí zkompilována s podporou RAID" + dan "Denne udgave af MySQL er ikke oversat med understøttelse af RAID" + nla "Deze versie van MySQL is niet gecompileerd met RAID ondersteuning" + eng "This version of MySQL is not compiled with RAID support" + est "Antud MySQL versioon on kompileeritud ilma RAID toeta" + fre "Cette version de MySQL n'est pas compilée avec le support RAID" + ger "Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert" + hun "Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot" + ita "Questa versione di MYSQL non e` compilata con il supporto RAID" + por "Esta versão do MySQL não foi compilada com suporte a RAID" + rum "Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID" + rus "üÔÁ ×ÅÒÓÉÑ MySQL ÓËÏÍÐÉÌÉÒÏ×ÁÎÁ ÂÅÚ ÐÏÄÄÅÒÖËÉ RAID" + serbian "Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID ureðaje" + spa "Esta versión de MySQL no es compilada con soporte RAID" + swe "Denna version av MySQL är inte kompilerad med RAID" + ukr "ãÑ ×ÅÒÓ¦Ñ MySQL ÎÅ ÚËÏÍÐ¦ÌØÏ×ÁÎÁ Ú Ð¦ÄÔÒÉÍËÏÀ RAID" +ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE + cze "Update tabulky bez WHERE s kl-Bíèem není v módu bezpeèných update dovoleno" + dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt" + nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom" + eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column" + est "Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita" + fre "Vous êtes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index" + ger "MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben" + hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column" + ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave" + por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave" + rus "÷Ù ÒÁÂÏÔÁÅÔÅ × ÒÅÖÉÍÅ ÂÅÚÏÐÁÓÎÙÈ ÏÂÎÏ×ÌÅÎÉÊ (safe update mode) É ÐÏÐÒÏÂÏ×ÁÌÉ ÉÚÍÅÎÉÔØ ÔÁÂÌÉÃÕ ÂÅÚ ÉÓÐÏÌØÚÏ×ÁÎÉÑ ËÌÀÞÅ×ÏÇÏ ÓÔÏÌÂÃÁ × ÞÁÓÔÉ WHERE" + serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuèa" + spa "Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna" + swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel" + ukr "÷É Õ ÒÅÖÉͦ ÂÅÚÐÅÞÎÏÇÏ ÏÎÏ×ÌÅÎÎÑ ÔÁ ÎÁÍÁÇÁ¤ÔÅÓØ ÏÎÏ×ÉÔÉ ÔÁÂÌÉÃÀ ÂÅÚ ÏÐÅÒÁÔÏÒÁ WHERE, ÝÏ ×ÉËÏÒÉÓÔÏ×Õ¤ KEY ÓÔÏ×ÂÅÃØ" +ER_KEY_DOES_NOT_EXITS + cze "Kl-Bíè '%-.64s' v tabulce '%-.64s' neexistuje" + dan "Nøglen '%-.64s' eksisterer ikke i tabellen '%-.64s'" + nla "Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'" + eng "Key '%-.64s' doesn't exist in table '%-.64s'" + est "Võti '%-.64s' ei eksisteeri tabelis '%-.64s'" + fre "L'index '%-.64s' n'existe pas sur la table '%-.64s'" + ger "Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht" + hun "A '%-.64s' kulcs nem letezik a '%-.64s' tablaban" + ita "La chiave '%-.64s' non esiste nella tabella '%-.64s'" + por "Chave '%-.64s' não existe na tabela '%-.64s'" + rus "ëÌÀÞ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "Kljuè '%-.64s' ne postoji u tabeli '%-.64s'" + spa "Clave '%-.64s' no existe en la tabla '%-.64s'" + swe "Nyckel '%-.64s' finns inte in tabell '%-.64s'" + ukr "ëÌÀÞ '%-.64s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.64s'" +ER_CHECK_NO_SUCH_TABLE 42000 + cze "Nemohu otev-Bøít tabulku" + dan "Kan ikke åbne tabellen" + nla "Kan tabel niet openen" + eng "Can't open table" + est "Ei suuda avada tabelit" + fre "Impossible d'ouvrir la table" + ger "Kann Tabelle nicht öffnen" + hun "Nem tudom megnyitni a tablat" + ita "Impossibile aprire la tabella" + por "Não pode abrir a tabela" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ" + serbian "Ne mogu da otvorim tabelu" + spa "No puedo abrir tabla" + swe "Kan inte öppna tabellen" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÔÁÂÌÉÃÀ" +ER_CHECK_NOT_IMPLEMENTED 42000 + cze "Handler tabulky nepodporuje %s" + dan "Denne tabeltype understøtter ikke %s" + nla "De 'handler' voor de tabel ondersteund geen %s" + eng "The storage engine for the table doesn't support %s" + est "Antud tabelitüüp ei toeta %s käske" + fre "Ce type de table ne supporte pas les %s" + ger "Die Speicher-Engine für diese Tabelle unterstützt kein %s" + greek "The handler for the table doesn't support %s" + hun "A tabla kezeloje (handler) nem tamogatja az %s" + ita "Il gestore per la tabella non supporta il %s" + jpn "The handler for the table doesn't support %s" + kor "The handler for the table doesn't support %s" + nor "The handler for the table doesn't support %s" + norwegian-ny "The handler for the table doesn't support %s" + pol "The handler for the table doesn't support %s" + por "O manipulador de tabela não suporta %s" + rum "The handler for the table doesn't support %s" + rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÏÇÏ: %s" + serbian "Handler za ovu tabelu ne dozvoljava %s komande" + slo "The handler for the table doesn't support %s" + spa "El manipulador de la tabla no permite soporte para %s" + swe "Tabellhanteraren för denna tabell kan inte göra %s" + ukr "÷ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕÅ %s" +ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 + cze "Proveden-Bí tohoto pøíkazu není v transakci dovoleno" + dan "Du må ikke bruge denne kommando i en transaktion" + nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie" + eng "You are not allowed to execute this command in a transaction" + est "Seda käsku ei saa kasutada transaktsiooni sees" + fre "Vous n'êtes pas autorisé à exécute cette commande dans une transaction" + ger "Sie dürfen diesen Befehl nicht in einer Transaktion ausführen" + hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban" + ita "Non puoi eseguire questo comando in una transazione" + por "Não lhe é permitido executar este comando em uma transação" + rus "÷ÁÍ ÎÅ ÒÁÚÒÅÛÅÎÏ ×ÙÐÏÌÎÑÔØ ÜÔÕ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËÃÉÉ" + serbian "Nije Vam dozvoljeno da izvršite ovu komandu u transakciji" + spa "No tienes el permiso para ejecutar este comando en una transición" + swe "Du får inte utföra detta kommando i en transaktion" + ukr "÷ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÎÕ×ÁÔÉ ÃÀ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËæ§" +ER_ERROR_DURING_COMMIT + cze "Chyba %d p-Bøi COMMIT" + dan "Modtog fejl %d mens kommandoen COMMIT blev udført" + nla "Kreeg fout %d tijdens COMMIT" + eng "Got error %d during COMMIT" + est "Viga %d käsu COMMIT täitmisel" + fre "Erreur %d lors du COMMIT" + ger "Fehler %d beim COMMIT" + hun "%d hiba a COMMIT vegrehajtasa soran" + ita "Rilevato l'errore %d durante il COMMIT" + por "Obteve erro %d durante COMMIT" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ COMMIT" + serbian "Greška %d za vreme izvršavanja komande 'COMMIT'" + spa "Obtenido error %d durante COMMIT" + swe "Fick fel %d vid COMMIT" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ COMMIT" +ER_ERROR_DURING_ROLLBACK + cze "Chyba %d p-Bøi ROLLBACK" + dan "Modtog fejl %d mens kommandoen ROLLBACK blev udført" + nla "Kreeg fout %d tijdens ROLLBACK" + eng "Got error %d during ROLLBACK" + est "Viga %d käsu ROLLBACK täitmisel" + fre "Erreur %d lors du ROLLBACK" + ger "Fehler %d beim ROLLBACK" + hun "%d hiba a ROLLBACK vegrehajtasa soran" + ita "Rilevato l'errore %d durante il ROLLBACK" + por "Obteve erro %d durante ROLLBACK" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ ROLLBACK" + serbian "Greška %d za vreme izvršavanja komande 'ROLLBACK'" + spa "Obtenido error %d durante ROLLBACK" + swe "Fick fel %d vid ROLLBACK" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ ROLLBACK" +ER_ERROR_DURING_FLUSH_LOGS + cze "Chyba %d p-Bøi FLUSH_LOGS" + dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført" + nla "Kreeg fout %d tijdens FLUSH_LOGS" + eng "Got error %d during FLUSH_LOGS" + est "Viga %d käsu FLUSH_LOGS täitmisel" + fre "Erreur %d lors du FLUSH_LOGS" + ger "Fehler %d bei FLUSH_LOGS" + hun "%d hiba a FLUSH_LOGS vegrehajtasa soran" + ita "Rilevato l'errore %d durante il FLUSH_LOGS" + por "Obteve erro %d durante FLUSH_LOGS" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ FLUSH_LOGS" + serbian "Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'" + spa "Obtenido error %d durante FLUSH_LOGS" + swe "Fick fel %d vid FLUSH_LOGS" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ FLUSH_LOGS" +ER_ERROR_DURING_CHECKPOINT + cze "Chyba %d p-Bøi CHECKPOINT" + dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udført" + nla "Kreeg fout %d tijdens CHECKPOINT" + eng "Got error %d during CHECKPOINT" + est "Viga %d käsu CHECKPOINT täitmisel" + fre "Erreur %d lors du CHECKPOINT" + ger "Fehler %d bei CHECKPOINT" + hun "%d hiba a CHECKPOINT vegrehajtasa soran" + ita "Rilevato l'errore %d durante il CHECKPOINT" + por "Obteve erro %d durante CHECKPOINT" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ CHECKPOINT" + serbian "Greška %d za vreme izvršavanja komande 'CHECKPOINT'" + spa "Obtenido error %d durante CHECKPOINT" + swe "Fick fel %d vid CHECKPOINT" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT" +ER_NEW_ABORTING_CONNECTION 08S01 + cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: '%-.64s' (%-.64s) bylo pøeru¹eno" + dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: '%-.64s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: '%-.64s' (%-.64s)" + eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: '%-.64s' (%-.64s)" + est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: '%-.64s' (%-.64s)" + fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: '%-.64s' (%-.64s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: '%-.64s' (%-.64s)" + ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)" + por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' '%-.64s' ('%-.64s')" + rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ '%-.64s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: '%-.64s' (%-.64s)" + spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: '%-.64s' (%-.64s)" + swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)" + ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: '%-.64s' (%-.64s)" +ER_DUMP_NOT_IMPLEMENTED + cze "Handler tabulky nepodporuje bin-Bární dump" + dan "Denne tabeltype unserstøtter ikke binært tabeldump" + nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump" + eng "The storage engine for the table does not support binary table dump" + fre "Ce type de table ne supporte pas les copies binaires" + ger "Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump" + ita "Il gestore per la tabella non supporta il dump binario" + jpn "The handler for the table does not support binary table dump" + por "O manipulador de tabela não suporta 'dump' binário de tabela" + rum "The handler for the table does not support binary table dump" + rus "ïÂÒÁÂÏÔÞÉË ÜÔÏÊ ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Ä×ÏÉÞÎÏÇÏ ÓÏÈÒÁÎÅÎÉÑ ÏÂÒÁÚÁ ÔÁÂÌÉÃÙ (dump)" + serbian "Handler tabele ne podržava binarni dump tabele" + spa "El manipulador de tabla no soporta dump para tabla binaria" + swe "Tabellhanteraren klarar inte en binär kopiering av tabellen" + ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ¦ÎÁÒÎÕ ÐÅÒÅÄÁÞÕ ÔÁÂÌÉæ" +ER_FLUSH_MASTER_BINLOG_CLOSED + eng "Binlog closed, cannot RESET MASTER" + ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen" + por "Binlog fechado. Não pode fazer RESET MASTER" + rus "ä×ÏÉÞÎÙÊ ÖÕÒÎÁÌ ÏÂÎÏ×ÌÅÎÉÑ ÚÁËÒÙÔ, ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ RESET MASTER" + serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'" + ukr "òÅÐ̦ËÁæÊÎÉÊ ÌÏÇ ÚÁËÒÉÔÏ, ÎÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ RESET MASTER" +ER_INDEX_REBUILD + cze "P-Bøebudování indexu dumpnuté tabulky '%-.64s' nebylo úspì¹né" + dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'" + nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'" + eng "Failed rebuilding the index of dumped table '%-.64s'" + fre "La reconstruction de l'index de la table copiée '%-.64s' a échoué" + ger "Neuerstellung des Index der Dump-Tabelle '%-.64s' fehlgeschlagen" + greek "Failed rebuilding the index of dumped table '%-.64s'" + hun "Failed rebuilding the index of dumped table '%-.64s'" + ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'" + por "Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'" + rus "ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'" + serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela" + spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'" + ukr "îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'" +ER_MASTER + cze "Chyba masteru: '%-.64s'" + dan "Fejl fra master: '%-.64s'" + nla "Fout van master: '%-.64s'" + eng "Error from master: '%-.64s'" + fre "Erreur reçue du maître: '%-.64s'" + ger "Fehler vom Master: '%-.64s'" + ita "Errore dal master: '%-.64s" + por "Erro no 'master' '%-.64s'" + rus "ïÛÉÂËÁ ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ: '%-.64s'" + serbian "Greška iz glavnog servera '%-.64s' u klasteru" + spa "Error del master: '%-.64s'" + swe "Fick en master: '%-.64s'" + ukr "ðÏÍÉÌËÁ ×¦Ä ÇÏÌÏ×ÎÏÇÏ: '%-.64s'" +ER_MASTER_NET_READ 08S01 + cze "S-Bí»ová chyba pøi ètení z masteru" + dan "Netværksfejl ved læsning fra master" + nla "Net fout tijdens lezen van master" + eng "Net error reading from master" + fre "Erreur de lecture réseau reçue du maître" + ger "Netzfehler beim Lesen vom Master" + ita "Errore di rete durante la ricezione dal master" + por "Erro de rede lendo do 'master'" + rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ" + serbian "Greška u primanju mrežnih paketa sa glavnog servera u klasteru" + spa "Error de red leyendo del master" + swe "Fick nätverksfel vid läsning från master" + ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÞÉÔÁÎÎÑ ×¦Ä ÇÏÌÏ×ÎÏÇÏ" +ER_MASTER_NET_WRITE 08S01 + cze "S-Bí»ová chyba pøi zápisu na master" + dan "Netværksfejl ved skrivning til master" + nla "Net fout tijdens schrijven naar master" + eng "Net error writing to master" + fre "Erreur d'écriture réseau reçue du maître" + ger "Netzfehler beim Schreiben zum Master" + ita "Errore di rete durante l'invio al master" + por "Erro de rede gravando no 'master'" + rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÚÁÐÉÓÉ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ" + serbian "Greška u slanju mrežnih paketa na glavni server u klasteru" + spa "Error de red escribiendo para el master" + swe "Fick nätverksfel vid skrivning till master" + ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÚÁÐÉÓÕ ÄÏ ÇÏÌÏ×ÎÏÇÏ" +ER_FT_MATCHING_KEY_NOT_FOUND + cze "-B®ádný sloupec nemá vytvoøen fulltextový index" + dan "Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen" + nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst" + eng "Can't find FULLTEXT index matching the column list" + est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega" + fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes" + ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht" + ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne" + por "Não pode encontrar um índice para o texto todo que combine com a lista de colunas" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÌÎÏÔÅËÓÔÏ×ÙÊ (FULLTEXT) ÉÎÄÅËÓ, ÓÏÏÔ×ÅÔÓÔ×ÕÀÝÉÊ ÓÐÉÓËÕ ÓÔÏÌÂÃÏ×" + serbian "Ne mogu da pronaðem 'FULLTEXT' indeks koli odgovara listi kolona" + spa "No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas" + swe "Hittar inte ett FULLTEXT-index i kolumnlistan" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ FULLTEXT ¦ÎÄÅËÓ, ÝÏ ×¦ÄÐÏצÄÁ¤ ÐÅÒÅ̦ËÕ ÓÔÏ×Âæ×" +ER_LOCK_OR_ACTIVE_TRANSACTION + cze "Nemohu prov-Bést zadaný pøíkaz, proto¾e existují aktivní zamèené tabulky nebo aktivní transakce" + dan "Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion" + nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie" + eng "Can't execute the given command because you have active locked tables or an active transaction" + est "Ei suuda täita antud käsku kuna on aktiivseid lukke või käimasolev transaktsioon" + fre "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active" + ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen" + ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto" + por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa" + rus "îÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÕËÁÚÁÎÎÕÀ ËÏÍÁÎÄÕ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÐÒÉÓÕÔÓÔ×ÕÀÔ ÁËÔÉ×ÎÏ ÚÁÂÌÏËÉÒÏ×ÁÎÎÙÅ ÔÁÂÌÉÃÁ ÉÌÉ ÏÔËÒÙÔÁÑ ÔÒÁÎÚÁËÃÉÑ" + serbian "Ne mogu da izvršim datu komandu zbog toga što su tabele zakljuèane ili je transakcija u toku" + spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa" + swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion" + ukr "îÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ ÐÏÄÁÎÕ ËÏÍÁÎÄÕ ÔÏÍÕ, ÝÏ ÔÁÂÌÉÃÑ ÚÁÂÌÏËÏ×ÁÎÁ ÁÂÏ ×ÉËÏÎÕ¤ÔØÓÑ ÔÒÁÎÚÁËæÑ" +ER_UNKNOWN_SYSTEM_VARIABLE + cze "Nezn-Bámá systémová promìnná '%-.64s'" + dan "Ukendt systemvariabel '%-.64s'" + nla "Onbekende systeem variabele '%-.64s'" + eng "Unknown system variable '%-.64s'" + est "Tundmatu süsteemne muutuja '%-.64s'" + fre "Variable système '%-.64s' inconnue" + ger "Unbekannte Systemvariable '%-.64s'" + ita "Variabile di sistema '%-.64s' sconosciuta" + por "Variável de sistema '%-.64s' desconhecida" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÓÉÓÔÅÍÎÁÑ ÐÅÒÅÍÅÎÎÁÑ '%-.64s'" + serbian "Nepoznata sistemska promenljiva '%-.64s'" + spa "Desconocida variable de sistema '%-.64s'" + swe "Okänd systemvariabel: '%-.64s'" + ukr "îÅצÄÏÍÁ ÓÉÓÔÅÍÎÁ ÚͦÎÎÁ '%-.64s'" +ER_CRASHED_ON_USAGE + cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena" + dan "Tabellen '%-.64s' er markeret med fejl og bør repareres" + nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd" + eng "Table '%-.64s' is marked as crashed and should be repaired" + est "Tabel '%-.64s' on märgitud vigaseks ja tuleb parandada" + fre "La table '%-.64s' est marquée 'crashed' et devrait être réparée" + ger "Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden" + ita "La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata" + por "Tabela '%-.64s' está marcada como danificada e deve ser reparada" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ" + serbian "Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena" + spa "Tabla '%-.64s' está marcada como crashed y debe ser reparada" + swe "Tabell '%-.64s' är trasig och bör repareras med REPAIR TABLE" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ" +ER_CRASHED_ON_REPAIR + cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila" + dan "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede" + nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte" + eng "Table '%-.64s' is marked as crashed and last (automatic?) repair failed" + est "Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus" + fre "La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué" + ger "Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl" + ita "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita" + por "Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ" + serbian "Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela" + spa "Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló" + swe "Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ" +ER_WARNING_NOT_COMPLETE_ROLLBACK + dan "Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles" + nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen" + eng "Some non-transactional changed tables couldn't be rolled back" + est "Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida" + fre "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées" + ger "Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden" + ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)" + por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)" + rus "÷ÎÉÍÁÎÉÅ: ÐÏ ÎÅËÏÔÏÒÙÍ ÉÚÍÅÎÅÎÎÙÍ ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍ ÔÁÂÌÉÃÁÍ ÎÅ×ÏÚÍÏÖÎÏ ÂÕÄÅÔ ÐÒÏÉÚ×ÅÓÔÉ ÏÔËÁÔ ÔÒÁÎÚÁËÃÉÉ" + serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'" + spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back" + swe "Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK" + ukr "úÁÓÔÅÒÅÖÅÎÎÑ: äÅÑ˦ ÎÅÔÒÁÎÚÁËæÊΦ ÚͦÎÉ ÔÁÂÌÉÃØ ÎÅ ÍÏÖÎÁ ÂÕÄÅ ÐÏ×ÅÒÎÕÔÉ" +ER_TRANS_CACHE_FULL + dan "Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen" + nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw" + eng "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again" + est "Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti" + fre "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez" + ger "Transaktionen, die aus mehreren Befehlen bestehen, benötigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrössern Sie diese Server-Variable versuchen Sie es noch einmal" + ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare" + por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente" + rus "ôÒÁÎÚÁËÃÉÉ, ×ËÌÀÞÁÀÝÅÊ ÂÏÌØÛÏÅ ËÏÌÉÞÅÓÔ×Ï ËÏÍÁÎÄ, ÐÏÔÒÅÂÏ×ÁÌÏÓØ ÂÏÌÅÅ ÞÅÍ 'max_binlog_cache_size' ÂÁÊÔ. õ×ÅÌÉÞØÔÅ ÜÔÕ ÐÅÒÅÍÅÎÎÕÀ ÓÅÒ×ÅÒÁ mysqld É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ" + spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo" + swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt" + ukr "ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. úÂ¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ" +ER_SLAVE_MUST_STOP + dan "Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE" + nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE" + eng "This operation cannot be performed with a running slave; run STOP SLAVE first" + fre "Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord" + ger "Diese Operation kann bei einem aktiven Slave nicht durchgeführt werden. Bitte zuerst STOP SLAVE ausführen" + ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE" + por "Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro" + rus "üÔÕ ÏÐÅÒÁÃÉÀ ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÐÒÉ ÒÁÂÏÔÁÀÝÅÍ ÐÏÔÏËÅ ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ STOP SLAVE" + serbian "Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server." + spa "Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE" + swe "Denna operation kan inte göras under replikering; Gör STOP SLAVE först" + ukr "ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE" +ER_SLAVE_NOT_RUNNING + dan "Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE" + nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE" + eng "This operation requires a running slave; configure slave and do START SLAVE" + fre "Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE" + ger "Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren" + ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE" + por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE" + rus "äÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ ÔÒÅÂÕÅÔÓÑ ÒÁÂÏÔÁÀÝÉÊ ÐÏÄÞÉÎÅÎÎÙÊ ÓÅÒ×ÅÒ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ START SLAVE" + serbian "Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'" + spa "Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE" + swe "Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE" + ukr "ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎÆ¦ÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE" +ER_BAD_SLAVE + dan "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO" + nla "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO" + eng "The server is not configured as slave; fix in config file or with CHANGE MASTER TO" + fre "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO" + ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben" + ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO" + por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO" + rus "üÔÏÔ ÓÅÒ×ÅÒ ÎÅ ÎÁÓÔÒÏÅÎ ËÁË ÐÏÄÞÉÎÅÎÎÙÊ. ÷ÎÅÓÉÔÅ ÉÓÐÒÁ×ÌÅÎÉÑ × ËÏÎÆÉÇÕÒÁÃÉÏÎÎÏÍ ÆÁÊÌÅ ÉÌÉ Ó ÐÏÍÏÝØÀ CHANGE MASTER TO" + serbian "Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'" + spa "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO" + swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO" + ukr "óÅÒ×ÅÒ ÎÅ ÚËÏÎÆ¦ÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎÆ¦ÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO" +ER_MASTER_INFO + eng "Could not initialize master info structure; more error messages can be found in the MySQL error log" + fre "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL" + ger "Konnte Master-Info-Struktur nicht initialisieren. Weitere Fehlermeldungen können im MySQL-Error-Log eingesehen werden" + serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'" + swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information" +ER_SLAVE_THREAD + dan "Kunne ikke danne en slave-tråd; check systemressourcerne" + nla "Kon slave thread niet aanmaken, controleer systeem resources" + eng "Could not create slave thread; check system resources" + fre "Impossible de créer une tâche esclave, vérifiez les ressources système" + ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen überprüfen" + ita "Impossibile creare il thread 'slave', controllare le risorse di sistema" + por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÐÏÔÏË ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. ðÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ" + serbian "Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse" + spa "No puedo crear el thread esclavo, verifique recursos del sistema" + swe "Kunde inte starta en tråd för replikering" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ" +ER_TOO_MANY_USER_CONNECTIONS 42000 + dan "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser" + nla "Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen" + eng "User %-.64s already has more than 'max_user_connections' active connections" + est "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga" + fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connections actives" + ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen" + ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive" + por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas" + rus "õ ÐÏÌØÚÏ×ÁÔÅÌÑ %-.64s ÕÖÅ ÂÏÌØÛÅ ÞÅÍ 'max_user_connections' ÁËÔÉ×ÎÙÈ ÓÏÅÄÉÎÅÎÉÊ" + serbian "Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom" + spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas" + swe "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar" + ukr "ëÏÒÉÓÔÕ×ÁÞ %-.64s ×ÖÅ ÍÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_user_connections' ÁËÔÉ×ÎÉÈ Ú'¤ÄÎÁÎØ" +ER_SET_CONSTANTS_ONLY + dan "Du må kun bruge konstantudtryk med SET" + nla "U mag alleen constante expressies gebruiken bij SET" + eng "You may only use constant expressions with SET" + est "Ainult konstantsed suurused on lubatud SET klauslis" + fre "Seules les expressions constantes sont autorisées avec SET" + ger "Bei SET dürfen nur konstante Ausdrücke verwendet werden" + ita "Si possono usare solo espressioni costanti con SET" + por "Você pode usar apenas expressões constantes com SET" + rus "÷Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ × SET ÔÏÌØËÏ ËÏÎÓÔÁÎÔÎÙÅ ×ÙÒÁÖÅÎÉÑ" + serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'" + spa "Tu solo debes usar expresiones constantes con SET" + swe "Man kan endast använda konstantuttryck med SET" + ukr "íÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ ×ÉÒÁÚÉ Ú¦ ÓÔÁÌÉÍÉ Õ SET" +ER_LOCK_WAIT_TIMEOUT + dan "Lock wait timeout overskredet" + nla "Lock wacht tijd overschreden" + eng "Lock wait timeout exceeded; try restarting transaction" + est "Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata" + fre "Timeout sur l'obtention du verrou" + ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten" + ita "E' scaduto il timeout per l'attesa del lock" + por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação." + rus "ôÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÂÌÏËÉÒÏ×ËÉ ÉÓÔÅË; ÐÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ" + serbian "Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju" + spa "Tiempo de bloqueo de espera excedido" + swe "Fick inte ett lås i tid ; Försök att starta om transaktionen" + ukr "úÁÔÒÉÍËÕ ÏÞ¦ËÕ×ÁÎÎÑ ÂÌÏËÕ×ÁÎÎÑ ×ÉÞÅÒÐÁÎÏ" +ER_LOCK_TABLE_FULL + dan "Det totale antal låse overstiger størrelsen på låse-tabellen" + nla "Het totale aantal locks overschrijdt de lock tabel grootte" + eng "The total number of locks exceeds the lock table size" + est "Lukkude koguarv ületab lukutabeli suuruse" + fre "Le nombre total de verrou dépasse la taille de la table des verrous" + ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle" + ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock" + por "O número total de travamentos excede o tamanho da tabela de travamentos" + rus "ïÂÝÅÅ ËÏÌÉÞÅÓÔ×Ï ÂÌÏËÉÒÏ×ÏË ÐÒÅ×ÙÓÉÌÏ ÒÁÚÍÅÒÙ ÔÁÂÌÉÃÙ ÂÌÏËÉÒÏ×ÏË" + serbian "Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja" + spa "El número total de bloqueos excede el tamaño de bloqueo de la tabla" + swe "Antal lås överskrider antalet reserverade lås" + ukr "úÁÇÁÌØÎÁ Ë¦ÌØË¦ÓÔØ ÂÌÏËÕ×ÁÎØ ÐÅÒÅ×ÉÝÉÌÁ ÒÏÚÍ¦Ò ÂÌÏËÕ×ÁÎØ ÄÌÑ ÔÁÂÌÉæ" +ER_READ_ONLY_TRANSACTION 25000 + dan "Update lås kan ikke opnås under en READ UNCOMMITTED transaktion" + nla "Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie" + eng "Update locks cannot be acquired during a READ UNCOMMITTED transaction" + est "Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni käigus" + fre "Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED" + ger "Während einer READ-UNCOMMITTED-Transaktion können keine UPDATE-Sperren angefordert werden" + ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'" + por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED" + rus "âÌÏËÉÒÏ×ËÉ ÏÂÎÏ×ÌÅÎÉÊ ÎÅÌØÚÑ ÐÏÌÕÞÉÔØ × ÐÒÏÃÅÓÓÅ ÞÔÅÎÉÑ ÎÅ ÐÒÉÎÑÔÏÊ (× ÒÅÖÉÍÅ READ UNCOMMITTED) ÔÒÁÎÚÁËÃÉÉ" + serbian "Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija" + spa "Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED" + swe "Updateringslås kan inte göras när man använder READ UNCOMMITTED" + ukr "ïÎÏ×ÉÔÉ ÂÌÏËÕ×ÁÎÎÑ ÎÅ ÍÏÖÌÉ×Ï ÎÁ ÐÒÏÔÑÚ¦ ÔÒÁÎÚÁËæ§ READ UNCOMMITTED" +ER_DROP_DB_WITH_READ_LOCK + dan "DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock" + nla "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" + eng "DROP DATABASE not allowed while thread is holding global read lock" + est "DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku" + fre "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture" + ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält" + ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" + rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ" + serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka" + spa "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global" + swe "DROP DATABASE är inte tillåtet när man har ett globalt läslås" + ukr "DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ" +ER_CREATE_DB_WITH_READ_LOCK + dan "CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock" + nla "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" + eng "CREATE DATABASE not allowed while thread is holding global read lock" + est "CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku" + fre "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture" + ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält" + ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" + rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ" + serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka" + spa "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global" + swe "CREATE DATABASE är inte tillåtet när man har ett globalt läslås" + ukr "CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ" +ER_WRONG_ARGUMENTS + nla "Foutieve parameters voor %s" + eng "Incorrect arguments to %s" + est "Vigased parameetrid %s-le" + fre "Mauvais arguments à %s" + ger "Falsche Argumente für %s" + ita "Argomenti errati a %s" + por "Argumentos errados para %s" + rus "îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s" + serbian "Pogrešni argumenti prosleðeni na %s" + spa "Argumentos errados para %s" + swe "Felaktiga argument till %s" + ukr "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s" +ER_NO_PERMISSION_TO_CREATE_USER 42000 + nla "'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren" + eng "'%-.32s'@'%-.64s' is not allowed to create new users" + est "Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid" + fre "'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs" + ger "'%-.32s'@'%-.64s' ist nicht berechtigt, neue Benutzer hinzuzufügen" + ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti" + por "Não é permitido a '%-.32s'@'%-.64s' criar novos usuários" + rus "'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ" + serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike" + spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios" + swe "'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare" + ukr "ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×" +ER_UNION_TABLES_IN_DIFFERENT_DIR + nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren" + eng "Incorrect table definition; all MERGE tables must be in the same database" + est "Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis" + fre "Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée" + ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden" + ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database" + por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados." + rus "îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ" + serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka" + spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos" + swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas" +ER_LOCK_DEADLOCK 40001 + nla "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie" + eng "Deadlock found when trying to get lock; try restarting transaction" + est "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast" + fre "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction" + ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten" + ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione" + por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação." + rus "÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ" + serbian "Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju" + spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición" + swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen" +ER_TABLE_CANT_HANDLE_FT + nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen" + eng "The used table type doesn't support FULLTEXT indexes" + est "Antud tabelitüüp ei toeta FULLTEXT indekseid" + fre "Le type de table utilisé ne supporte pas les index FULLTEXT" + ger "Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes" + ita "La tabella usata non supporta gli indici FULLTEXT" + por "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)" + rus "éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×" + serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse" + spa "El tipo de tabla usada no soporta índices FULLTEXT" + swe "Tabelltypen har inte hantering av FULLTEXT-index" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×" +ER_CANNOT_ADD_FOREIGN + nla "Kan foreign key beperking niet toevoegen" + eng "Cannot add foreign key constraint" + fre "Impossible d'ajouter des contraintes d'index externe" + ger "Fremdschlüssel-Beschränkung kann nicht hinzugefügt werden" + ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)" + por "Não pode acrescentar uma restrição de chave estrangeira" + rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÏÇÒÁÎÉÞÅÎÉÑ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ" + serbian "Ne mogu da dodam proveru spoljnog kljuèa" + spa "No puede adicionar clave extranjera constraint" + swe "Kan inte lägga till 'FOREIGN KEY constraint'" +ER_NO_REFERENCED_ROW 23000 + nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald" + eng "Cannot add or update a child row: a foreign key constraint fails" + fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche" + ger "Hinzufügen oder Aktualisieren eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl" + greek "Cannot add a child row: a foreign key constraint fails" + hun "Cannot add a child row: a foreign key constraint fails" + ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + norwegian-ny "Cannot add a child row: a foreign key constraint fails" + por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou" + rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÄÏÞÅÒÎÀÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ" + spa "No puede adicionar una línea hijo: falla de clave extranjera constraint" + swe "FOREIGN KEY-konflikt: Kan inte skriva barn" +ER_ROW_IS_REFERENCED 23000 + eng "Cannot delete or update a parent row: a foreign key constraint fails" + fre "Impossible de supprimer un enregistrement père : une constrainte externe l'empèche" + ger "Löschen oder Aktualisieren eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl" + greek "Cannot delete a parent row: a foreign key constraint fails" + hun "Cannot delete a parent row: a foreign key constraint fails" + ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÒÏÄÉÔÅÌØÓËÕÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ" + serbian "Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela" + spa "No puede deletar una línea padre: falla de clave extranjera constraint" + swe "FOREIGN KEY-konflikt: Kan inte radera fader" +ER_CONNECT_TO_MASTER 08S01 + nla "Fout bij opbouwen verbinding naar master: %-.128s" + eng "Error connecting to master: %-.128s" + ger "Fehler bei der Verbindung zum Master: %-.128s" + ita "Errore durante la connessione al master: %-.128s" + por "Erro conectando com o master: %-.128s" + rus "ïÛÉÂËÁ ÓÏÅÄÉÎÅÎÉÑ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ: %-.128s" + spa "Error de coneccion a master: %-.128s" + swe "Fick fel vid anslutning till master: %-.128s" +ER_QUERY_ON_MASTER + nla "Fout bij uitvoeren query op master: %-.128s" + eng "Error running query on master: %-.128s" + ger "Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s" + ita "Errore eseguendo una query sul master: %-.128s" + por "Erro rodando consulta no master: %-.128s" + rus "ïÛÉÂËÁ ×ÙÐÏÌÎÅÎÉÑ ÚÁÐÒÏÓÁ ÎÁ ÇÏÌÏ×ÎÏÍ ÓÅÒ×ÅÒÅ: %-.128s" + spa "Error executando el query en master: %-.128s" + swe "Fick fel vid utförande av command på mastern: %-.128s" +ER_ERROR_WHEN_EXECUTING_COMMAND + nla "Fout tijdens uitvoeren van commando %s: %-.128s" + eng "Error when executing command %s: %-.128s" + est "Viga käsu %s täitmisel: %-.128s" + ger "Fehler beim Ausführen des Befehls %s: %-.128s" + ita "Errore durante l'esecuzione del comando %s: %-.128s" + por "Erro quando executando comando %s: %-.128s" + rus "ïÛÉÂËÁ ÐÒÉ ×ÙÐÏÌÎÅÎÉÉ ËÏÍÁÎÄÙ %s: %-.128s" + serbian "Greška pri izvršavanju komande %s: %-.128s" + spa "Error de %s: %-.128s" + swe "Fick fel vid utförande av %s: %-.128s" +ER_WRONG_USAGE + nla "Foutief gebruik van %s en %s" + eng "Incorrect usage of %s and %s" + est "Vigane %s ja %s kasutus" + ger "Falsche Verwendung von %s und %s" + ita "Uso errato di %s e %s" + por "Uso errado de %s e %s" + rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ %s É %s" + serbian "Pogrešna upotreba %s i %s" + spa "Equivocado uso de %s y %s" + swe "Felaktig använding av %s and %s" + ukr "Wrong usage of %s and %s" +ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000 + nla "De gebruikte SELECT commando's hebben een verschillend aantal kolommen" + eng "The used SELECT statements have a different number of columns" + est "Tulpade arv kasutatud SELECT lausetes ei kattu" + ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurück" + ita "La SELECT utilizzata ha un numero di colonne differente" + por "Os comandos SELECT usados têm diferente número de colunas" + rus "éÓÐÏÌØÚÏ×ÁÎÎÙÅ ÏÐÅÒÁÔÏÒÙ ×ÙÂÏÒËÉ (SELECT) ÄÁÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×" + serbian "Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona" + spa "El comando SELECT usado tiene diferente número de columnas" + swe "SELECT-kommandona har olika antal kolumner" +ER_CANT_UPDATE_WITH_READLOCK + nla "Kan de query niet uitvoeren vanwege een conflicterende read lock" + eng "Can't execute the query because you have a conflicting read lock" + est "Ei suuda täita päringut konfliktse luku tõttu" + ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgeführt werden" + ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura" + por "Não posso executar a consulta porque você tem um conflito de travamento de leitura" + rus "îÅ×ÏÚÍÏÖÎÏ ÉÓÐÏÌÎÉÔØ ÚÁÐÒÏÓ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÕÓÔÁÎÏ×ÌÅÎÙ ËÏÎÆÌÉËÔÕÀÝÉÅ ÂÌÏËÉÒÏ×ËÉ ÞÔÅÎÉÑ" + serbian "Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu" + spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura" + swe "Kan inte utföra kommandot emedan du har ett READ-lås" +ER_MIXING_NOT_ALLOWED + nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld." + eng "Mixing of transactional and non-transactional tables is disabled" + est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud" + ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert" + ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali" + por "Mistura de tabelas transacional e não-transacional está desabilitada" + rus "éÓÐÏÌØÚÏ×ÁÎÉÅ ÔÒÁÎÚÁËÃÉÏÎÎÙÈ ÔÁÂÌÉà ÎÁÒÑÄÕ Ó ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍÉ ÚÁÐÒÅÝÅÎÏ" + serbian "Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno" + spa "Mezla de transancional y no-transancional tablas está deshabilitada" + swe "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat" +ER_DUP_ARGUMENT + nla "Optie '%s' tweemaal gebruikt in opdracht" + eng "Option '%s' used twice in statement" + est "Määrangut '%s' on lauses kasutatud topelt" + ger "Option '%s' wird im Befehl zweimal verwendet" + ita "L'opzione '%s' e' stata usata due volte nel comando" + por "Opção '%s' usada duas vezes no comando" + rus "ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ" + spa "Opción '%s' usada dos veces en el comando" + swe "Option '%s' användes två gånger" +ER_USER_LIMIT_REACHED 42000 + nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)" + eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)" + ger "Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)" + ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)" + por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)" + rus "ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)" + spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)" + swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)" +ER_SPECIFIC_ACCESS_DENIED_ERROR 42000 + nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie" + eng "Access denied; you need the %-.128s privilege for this operation" + ger "Kein Zugriff. Hierfür wird die Berechtigung %-.128s benötigt" + ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione" + por "Acesso negado. Você precisa o privilégio %-.128s para essa operação" + rus "÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ" + spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación" + swe "Du har inte privlegiet '%-.128s' som behövs för denna operation" + ukr "Access denied. You need the %-.128s privilege for this operation" +ER_LOCAL_VARIABLE + nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL" + eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL" + ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden" + ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL" + por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL" + spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL" + swe "Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL" +ER_GLOBAL_VARIABLE + nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL" + eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL" + ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden" + ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL" + por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL" + spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL" + swe "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL" +ER_NO_DEFAULT 42000 + nla "Variabele '%-.64s' heeft geen standaard waarde" + eng "Variable '%-.64s' doesn't have a default value" + ger "Variable '%-.64s' hat keinen Vorgabewert" + ita "La variabile '%-.64s' non ha un valore di default" + por "Variável '%-.64s' não tem um valor padrão" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ" + spa "Variable '%-.64s' no tiene un valor patrón" + swe "Variabel '%-.64s' har inte ett DEFAULT-värde" +ER_WRONG_VALUE_FOR_VAR 42000 + nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'" + eng "Variable '%-.64s' can't be set to the value of '%-.64s'" + ger "Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden" + ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'" + por "Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'" + spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'" + swe "Variabel '%-.64s' kan inte sättas till '%-.64s'" +ER_WRONG_TYPE_FOR_VAR 42000 + nla "Foutief argumenttype voor variabele '%-.64s'" + eng "Incorrect argument type to variable '%-.64s'" + ger "Falscher Argumenttyp für Variable '%-.64s'" + ita "Tipo di valore errato per la variabile '%-.64s'" + por "Tipo errado de argumento para variável '%-.64s'" + rus "îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'" + spa "Tipo de argumento equivocado para variable '%-.64s'" + swe "Fel typ av argument till variabel '%-.64s'" +ER_VAR_CANT_BE_READ + nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen" + eng "Variable '%-.64s' can only be set, not read" + ger "Variable '%-.64s' kann nur verändert, nicht gelesen werden" + ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto" + por "Variável '%-.64s' somente pode ser configurada, não lida" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ" + spa "Variable '%-.64s' solamente puede ser configurada, no leída" + swe "Variabeln '%-.64s' kan endast sättas, inte läsas" +ER_CANT_USE_OPTION_HERE 42000 + nla "Foutieve toepassing/plaatsing van '%s'" + eng "Incorrect usage/placement of '%s'" + ger "Falsche Verwendung oder Platzierung von '%s'" + ita "Uso/posizione di '%s' sbagliato" + por "Errado uso/colocação de '%s'" + rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'" + spa "Equivocado uso/colocación de '%s'" + swe "Fel använding/placering av '%s'" +ER_NOT_SUPPORTED_YET 42000 + nla "Deze versie van MySQL ondersteunt nog geen '%s'" + eng "This version of MySQL doesn't yet support '%s'" + ger "Diese MySQL-Version unterstützt '%s' nicht" + ita "Questa versione di MySQL non supporta ancora '%s'" + por "Esta versão de MySQL não suporta ainda '%s'" + rus "üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'" + spa "Esta versión de MySQL no soporta todavia '%s'" + swe "Denna version av MySQL kan ännu inte utföra '%s'" +ER_MASTER_FATAL_ERROR_READING_BINLOG + nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log" + eng "Got fatal error %d: '%-.128s' from master when reading data from binary log" + ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs" + ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario" + por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log" + rus "ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ" + spa "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log" + swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen" +ER_SLAVE_IGNORED_TABLE + eng "Slave SQL thread ignored the query because of replicate-*-table rules" + ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert" + por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela" + spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla" + swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel" +ER_INCORRECT_GLOBAL_LOCAL_VAR + eng "Variable '%-.64s' is a %s variable" + serbian "Incorrect foreign key definition for '%-.64s': %s" + ger "Variable '%-.64s' ist eine %s-Variable" + spa "Variable '%-.64s' es una %s variable" + swe "Variabel '%-.64s' är av typ %s" +ER_WRONG_FK_DEF 42000 + eng "Incorrect foreign key definition for '%-.64s': %s" + ger "Falsche Fremdschlüssel-Definition für '%-.64s': %s" + por "Definição errada da chave estrangeira para '%-.64s': %s" + spa "Equivocada definición de llave extranjera para '%-.64s': %s" + swe "Felaktig FOREIGN KEY-definition för '%-.64s': %s" +ER_KEY_REF_DO_NOT_MATCH_TABLE_REF + eng "Key reference and table reference don't match" + ger "Schlüssel- und Tabellenverweis passen nicht zusammen" + por "Referência da chave e referência da tabela não coincidem" + spa "Referencia de llave y referencia de tabla no coinciden" + swe "Nyckelreferensen och tabellreferensen stämmer inte överens" +ER_OPERAND_COLUMNS 21000 + eng "Operand should contain %d column(s)" + ger "Operand sollte %d Spalte(n) enthalten" + rus "ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË" + spa "Operando debe tener %d columna(s)" + ukr "ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×" +ER_SUBQUERY_NO_1_ROW 21000 + eng "Subquery returns more than 1 row" + ger "Unterabfrage lieferte mehr als einen Datensatz zurück" + por "Subconsulta retorna mais que 1 registro" + rus "ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ" + spa "Subconsulta retorna mas que 1 línea" + swe "Subquery returnerade mer än 1 rad" + ukr "ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ Â¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ" +ER_UNKNOWN_STMT_HANDLER + dan "Unknown prepared statement handler (%.*s) given to %s" + eng "Unknown prepared statement handler (%.*s) given to %s" + ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben" + por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s" + spa "Desconocido preparado comando handler (%.*s) dado para %s" + swe "Okänd PREPARED STATEMENT id (%.*s) var given till %s" + ukr "Unknown prepared statement handler (%.*s) given to %s" +ER_CORRUPT_HELP_DB + eng "Help database is corrupt or does not exist" + ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht" + por "Banco de dado de ajuda corrupto ou não existente" + spa "Base de datos Help está corrupto o no existe" + swe "Hjälpdatabasen finns inte eller är skadad" +ER_CYCLIC_REFERENCE + eng "Cyclic reference on subqueries" + ger "Zyklischer Verweis in Unterabfragen" + por "Referência cíclica em subconsultas" + rus "ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ" + spa "Cíclica referencia en subconsultas" + swe "Cyklisk referens i subqueries" + ukr "ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ" +ER_AUTO_CONVERT + eng "Converting column '%s' from %s to %s" + ger "Feld '%s' wird von %s nach %s umgewandelt" + por "Convertendo coluna '%s' de %s para %s" + rus "ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s" + spa "Convirtiendo columna '%s' de %s para %s" + swe "Konvertar kolumn '%s' från %s till %s" + ukr "ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s" +ER_ILLEGAL_REFERENCE 42S22 + eng "Reference '%-.64s' not supported (%s)" + ger "Verweis '%-.64s' wird nicht unterstützt (%s)" + por "Referência '%-.64s' não suportada (%s)" + rus "óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)" + spa "Referencia '%-.64s' no soportada (%s)" + swe "Referens '%-.64s' stöds inte (%s)" + ukr "ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)" +ER_DERIVED_MUST_HAVE_ALIAS 42000 + eng "Every derived table must have its own alias" + ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden" + por "Cada tabela derivada deve ter seu próprio alias" + spa "Cada tabla derivada debe tener su propio alias" + swe "Varje 'derived table' måste ha sitt eget alias" +ER_SELECT_REDUCED 01000 + eng "Select %u was reduced during optimization" + ger "Select %u wurde während der Optimierung reduziert" + por "Select %u foi reduzido durante otimização" + rus "Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ" + spa "Select %u fué reducido durante optimización" + swe "Select %u reducerades vid optimiering" + ukr "Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii" +ER_TABLENAME_NOT_ALLOWED_HERE 42000 + eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s" + ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden" + por "Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s" + spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s" + swe "Tabell '%-.64s' från en SELECT kan inte användas i %-.32s" +ER_NOT_SUPPORTED_AUTH_MODE 08004 + eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client" + ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client" + por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL" + spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL" + swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet." +ER_SPATIAL_CANT_HAVE_NULL 42000 + eng "All parts of a SPATIAL index must be NOT NULL" + ger "Alle Teile eines SPATIAL-Index müssen als NOT NULL deklariert sein" + por "Todas as partes de uma SPATIAL index devem ser NOT NULL" + spa "Todas las partes de una SPATIAL index deben ser NOT NULL" + swe "Alla delar av en SPATIAL index måste vara NOT NULL" +ER_COLLATION_CHARSET_MISMATCH 42000 + eng "COLLATION '%s' is not valid for CHARACTER SET '%s'" + ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig" + por "COLLATION '%s' não é válida para CHARACTER SET '%s'" + spa "COLLATION '%s' no es válido para CHARACTER SET '%s'" + swe "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'" +ER_SLAVE_WAS_RUNNING + eng "Slave is already running" + ger "Slave läuft bereits" + por "O slave já está rodando" + spa "Slave ya está funcionando" + swe "Slaven har redan startat" +ER_SLAVE_WAS_NOT_RUNNING + eng "Slave already has been stopped" + ger "Slave wurde bereits angehalten" + por "O slave já está parado" + spa "Slave ya fué parado" + swe "Slaven har redan stoppat" +ER_TOO_BIG_FOR_UNCOMPRESS + eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)" + ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)" + por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)" + spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)" +ER_ZLIB_Z_MEM_ERROR + eng "ZLIB: Not enough memory" + ger "ZLIB: Nicht genug Speicher" + por "ZLIB: Não suficiente memória disponível" + spa "Z_MEM_ERROR: No suficiente memoria para zlib" +ER_ZLIB_Z_BUF_ERROR + eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)" + ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)" + por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)" + spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)" +ER_ZLIB_Z_DATA_ERROR + eng "ZLIB: Input data corrupted" + ger "ZLIB: Eingabedaten beschädigt" + por "ZLIB: Dados de entrada está corrupto" + spa "ZLIB: Dato de entrada fué corrompido para zlib" +ER_CUT_VALUE_GROUP_CONCAT + eng "%d line(s) were cut by GROUP_CONCAT()" + ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten" + por "%d linha(s) foram cortada(s) por GROUP_CONCAT()" + spa "%d línea(s) fue(fueron) cortadas por group_concat()" + swe "%d rad(er) kapades av group_concat()" + ukr "%d line(s) was(were) cut by group_concat()" +ER_WARN_TOO_FEW_RECORDS 01000 + eng "Row %ld doesn't contain data for all columns" + ger "Zeile %ld enthält nicht für alle Felder Daten" + por "Conta de registro é menor que a conta de coluna na linha %ld" + spa "Línea %ld no contiene datos para todas las columnas" +ER_WARN_TOO_MANY_RECORDS 01000 + eng "Row %ld was truncated; it contained more data than there were input columns" + ger "Zeile %ld gekürzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt" + por "Conta de registro é maior que a conta de coluna na linha %ld" + spa "Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada" +ER_WARN_NULL_TO_NOTNULL 22004 + eng "Column was set to data type implicit default; NULL supplied for NOT NULL column '%s' at row %ld" + ger "Feld auf Datentyp-spezifischen Vorgabewert gesetzt; da NULL für NOT-NULL-Feld '%s' in Zeile %ld angegeben" + por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld" + spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld" +ER_WARN_DATA_OUT_OF_RANGE 22003 + eng "Out of range value adjusted for column '%s' at row %ld" + ger "Daten abgeschnitten, außerhalb des Wertebereichs für Feld '%s' in Zeile %ld" + por "Dado truncado, fora de alcance para coluna '%s' na linha %ld" + spa "Datos truncados, fuera de gama para columna '%s' en la línea %ld" +WARN_DATA_TRUNCATED 01000 + eng "Data truncated for column '%s' at row %ld" + ger "Daten abgeschnitten für Feld '%s' in Zeile %ld" + por "Dado truncado para coluna '%s' na linha %ld" + spa "Datos truncados para columna '%s' en la línea %ld" +ER_WARN_USING_OTHER_HANDLER + eng "Using storage engine %s for table '%s'" + ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt" + por "Usando engine de armazenamento %s para tabela '%s'" + spa "Usando motor de almacenamiento %s para tabla '%s'" + swe "Använder handler %s för tabell '%s'" +ER_CANT_AGGREGATE_2COLLATIONS + eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'" + ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'" + por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'" + spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'" +ER_DROP_USER + eng "Cannot drop one or more of the requested users" + ger "Kann einen oder mehrere der angegebenen Benutzer nicht löschen" +ER_REVOKE_GRANTS + eng "Can't revoke all privileges for one or more of the requested users" + ger "Kann nicht alle Berechtigungen widerrufen, die für einen oder mehrere Benutzer gewährt wurden" + por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos" + spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados" +ER_CANT_AGGREGATE_3COLLATIONS + eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'" + ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) für Operation '%s'" + por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'" + spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'" +ER_CANT_AGGREGATE_NCOLLATIONS + eng "Illegal mix of collations for operation '%s'" + ger "Unerlaubte Mischung von Sortierreihenfolgen für Operation '%s'" + por "Ilegal combinação de collations para operação '%s'" + spa "Ilegal mezcla de collations para operación '%s'" +ER_VARIABLE_IS_NOT_STRUCT + eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)" + ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)" + por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)" + spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)" +ER_UNKNOWN_COLLATION + eng "Unknown collation: '%-.64s'" + ger "Unbekannte Sortierreihenfolge: '%-.64s'" + por "Collation desconhecida: '%-.64s'" + spa "Collation desconocida: '%-.64s'" +ER_SLAVE_IGNORED_SSL_PARAMS + eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started" + ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn ein MySQL-Slave mit SSL gestartet wird" + por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado." + spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado" +ER_SERVER_IS_IN_SECURE_AUTH_MODE + eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format" + ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern" + por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato" + rus "óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ" + spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato" +ER_WARN_FIELD_RESOLVED + eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d" + ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst" + por "Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d" + rus "ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d" + spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d" + ukr "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d" +ER_BAD_SLAVE_UNTIL_COND + eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL" + ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL" + por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL" + spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL" +ER_MISSING_SKIP_SLAVE + eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart" + ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet" + por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo" + spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave" +ER_UNTIL_COND_IGNORED + eng "SQL thread is not to be started so UNTIL options are ignored" + ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert" + por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas" + spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas" +ER_WRONG_NAME_FOR_INDEX 42000 + eng "Incorrect index name '%-.100s'" + ger "Falscher Indexname '%-.100s'" + por "Incorreto nome de índice '%-.100s'" + spa "Nombre de índice incorrecto '%-.100s'" + swe "Felaktigt index namn '%-.100s'" +ER_WRONG_NAME_FOR_CATALOG 42000 + eng "Incorrect catalog name '%-.100s'" + ger "Falscher Katalogname '%-.100s'" + por "Incorreto nome de catálogo '%-.100s'" + spa "Nombre de catalog incorrecto '%-.100s'" + swe "Felaktigt katalog namn '%-.100s'" +ER_WARN_QC_RESIZE + eng "Query cache failed to set size %lu; new query cache size is %lu" + ger "Änderung der Query-Cache-Größe auf %lu fehlgeschlagen; neue Query-Cache-Größe ist %lu" + por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu" + rus "ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu" + spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu" + swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu" + ukr "ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu" +ER_BAD_FT_COLUMN + eng "Column '%-.64s' cannot be part of FULLTEXT index" + ger "Feld '%-.64s' kann nicht Teil eines FULLTEXT-Index sein" + por "Coluna '%-.64s' não pode ser parte de índice FULLTEXT" + spa "Columna '%-.64s' no puede ser parte de FULLTEXT index" + swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index" +ER_UNKNOWN_KEY_CACHE + eng "Unknown key cache '%-.100s'" + ger "Unbekannter Schlüssel-Cache '%-.100s'" + por "Key cache desconhecida '%-.100s'" + spa "Desconocida key cache '%-.100s'" + swe "Okänd nyckel cache '%-.100s'" +ER_WARN_HOSTNAME_WONT_WORK + eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work" + ger "MySQL wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe möglich ist" + por "MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar" + spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar" +ER_UNKNOWN_STORAGE_ENGINE 42000 + eng "Unknown table engine '%s'" + ger "Unbekannte Speicher-Engine '%s'" + por "Motor de tabela desconhecido '%s'" + spa "Desconocido motor de tabla '%s'" +ER_WARN_DEPRECATED_SYNTAX + eng "'%s' is deprecated; use '%s' instead" + ger "'%s' ist veraltet. Bitte benutzen Sie '%s'" + por "'%s' é desatualizado. Use '%s' em seu lugar" + spa "'%s' está desaprobado, use '%s' en su lugar" +ER_NON_UPDATABLE_TABLE + eng "The target table %-.100s of the %s is not updatable" + ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar" + por "A tabela destino %-.100s do %s não é atualizável" + rus "ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ" + spa "La tabla destino %-.100s del %s no es actualizable" + swe "Tabell %-.100s använd med '%s' är inte uppdateringsbar" + ukr "ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ" +ER_FEATURE_DISABLED + eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working" + ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MySQL mit '%s' übersetzen, damit es verfügbar ist" + por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando" + spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando" + swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad" +ER_OPTION_PREVENTS_STATEMENT + eng "The MySQL server is running with the %s option so it cannot execute this statement" + ger "Der MySQL-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen" + por "O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando" + spa "El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando" + swe "MySQL är startad med %s. Pga av detta kan du inte använda detta kommando" +ER_DUPLICATED_VALUE_IN_TYPE + eng "Column '%-.100s' has duplicated value '%-.64s' in %s" + ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s" + por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" + spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s" +ER_TRUNCATED_WRONG_VALUE 22007 + eng "Truncated incorrect %-.32s value: '%-.128s'" + ger "Falscher %-.32s-Wert gekürzt: '%-.128s'" + por "Truncado errado %-.32s valor: '%-.128s'" + spa "Equivocado truncado %-.32s valor: '%-.128s'" +ER_TOO_MUCH_AUTO_TIMESTAMP_COLS + eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" + ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben" + por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" + spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" +ER_INVALID_ON_UPDATE + eng "Invalid ON UPDATE clause for '%-.64s' column" + ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.64s'" + por "Inválida cláusula ON UPDATE para campo '%-.64s'" + spa "Inválido ON UPDATE cláusula para campo '%-.64s'" +ER_UNSUPPORTED_PS + eng "This command is not supported in the prepared statement protocol yet" + ger "Dieser Befehl wird im Protokoll für vorbereitete Anweisungen noch nicht unterstützt" +ER_GET_ERRMSG + dan "Modtog fejl %d '%-.100s' fra %s" + eng "Got error %d '%-.100s' from %s" + ger "Fehler %d '%-.100s' von %s" + nor "Mottok feil %d '%-.100s' fa %s" + norwegian-ny "Mottok feil %d '%-.100s' fra %s" +ER_GET_TEMPORARY_ERRMSG + dan "Modtog temporary fejl %d '%-.100s' fra %s" + eng "Got temporary error %d '%-.100s' from %s" + ger "Temporärer Fehler %d '%-.100s' von %s" + nor "Mottok temporary feil %d '%-.100s' fra %s" + norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s" +ER_UNKNOWN_TIME_ZONE + eng "Unknown or incorrect time zone: '%-.64s'" + ger "Unbekannte oder falsche Zeitzone: '%-.64s'" +ER_WARN_INVALID_TIMESTAMP + eng "Invalid TIMESTAMP value in column '%s' at row %ld" + ger "Ungültiger TIMESTAMP-Wert in Feld '%s', Zeile %ld" +ER_INVALID_CHARACTER_STRING + eng "Invalid %s character string: '%.64s'" + ger "Ungültiger %s-Zeichen-String: '%.64s'" +ER_WARN_ALLOWED_PACKET_OVERFLOWED + eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated" + ger "Ergebnis von %s() war größer als max_allowed_packet (%ld) Bytes und wurde deshalb gekürzt" +ER_CONFLICTING_DECLARATIONS + eng "Conflicting declarations: '%s%s' and '%s%s'" + ger "Widersprüchliche Deklarationen: '%s%s' und '%s%s'" +ER_SP_NO_RECURSIVE_CREATE 2F003 + eng "Can't create a %s from within another stored routine" + ger "Kann kein %s innerhalb einer anderen gespeicherten Routine erzeugen" +ER_SP_ALREADY_EXISTS 42000 + eng "%s %s already exists" + ger "%s %s existiert bereits" +ER_SP_DOES_NOT_EXIST 42000 + eng "%s %s does not exist" + ger "%s %s existiert nicht" +ER_SP_DROP_FAILED + eng "Failed to DROP %s %s" + ger "DROP %s %s ist fehlgeschlagen" +ER_SP_STORE_FAILED + eng "Failed to CREATE %s %s" + ger "CREATE %s %s ist fehlgeschlagen" +ER_SP_LILABEL_MISMATCH 42000 + eng "%s with no matching label: %s" + ger "%s ohne passende Marke: %s" +ER_SP_LABEL_REDEFINE 42000 + eng "Redefining label %s" + ger "Neudefinition der Marke %s" +ER_SP_LABEL_MISMATCH 42000 + eng "End-label %s without match" + ger "Ende-Marke %s ohne zugehörigen Anfang" +ER_SP_UNINIT_VAR 01000 + eng "Referring to uninitialized variable %s" + ger "Zugriff auf nichtinitialisierte Variable %s" +ER_SP_BADSELECT 0A000 + eng "PROCEDURE %s can't return a result set in the given context" + ger "PROCEDURE %s kann im gegebenen Kontext keine Ergebnismenge zurückgeben" +ER_SP_BADRETURN 42000 + eng "RETURN is only allowed in a FUNCTION" + ger "RETURN ist nur innerhalb einer FUNCTION erlaubt" +ER_SP_BADSTATEMENT 0A000 + eng "%s is not allowed in stored procedures" + ger "%s ist in gespeicherten Prozeduren nicht erlaubt" +ER_UPDATE_LOG_DEPRECATED_IGNORED 42000 + eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" + ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wird ignoriert" +ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000 + eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" + ger "Das Update-Log ist veraltet und wurde durch das Binär-Log ersetzt. SET SQL_LOG_UPDATE wurde in SET SQL_LOG_BIN übersetzt" +ER_QUERY_INTERRUPTED 70100 + eng "Query execution was interrupted" + ger "Ausführung der Abfrage wurde unterbrochen" +ER_SP_WRONG_NO_OF_ARGS 42000 + eng "Incorrect number of arguments for %s %s; expected %u, got %u" + ger "Falsche Anzahl von Argumenten für %s %s; erwarte %u, erhalte %u" +ER_SP_COND_MISMATCH 42000 + eng "Undefined CONDITION: %s" + ger "Undefinierte CONDITION: %s" +ER_SP_NORETURN 42000 + eng "No RETURN found in FUNCTION %s" + ger "Kein RETURN in FUNCTION %s gefunden" +ER_SP_NORETURNEND 2F005 + eng "FUNCTION %s ended without RETURN" + ger "FUNCTION %s endete ohne RETURN" +ER_SP_BAD_CURSOR_QUERY 42000 + eng "Cursor statement must be a SELECT" + ger "Cursor-Anweisung muss ein SELECT sein" +ER_SP_BAD_CURSOR_SELECT 42000 + eng "Cursor SELECT must not have INTO" + ger "Cursor-SELECT darf kein INTO haben" +ER_SP_CURSOR_MISMATCH 42000 + eng "Undefined CURSOR: %s" + ger "Undefinierter CURSOR: %s" +ER_SP_CURSOR_ALREADY_OPEN 24000 + eng "Cursor is already open" + ger "Cursor ist schon geöffnet" +ER_SP_CURSOR_NOT_OPEN 24000 + eng "Cursor is not open" + ger "Cursor ist nicht geöffnet" +ER_SP_UNDECLARED_VAR 42000 + eng "Undeclared variable: %s" + ger "Nicht deklarierte Variable: %s" +ER_SP_WRONG_NO_OF_FETCH_ARGS + eng "Incorrect number of FETCH variables" + ger "Falsche Anzahl von FETCH-Variablen" +ER_SP_FETCH_NO_DATA 02000 + eng "No data - zero rows fetched, selected, or processed" + ger "Keine Daten - null Zeilen geholt (fetch), ausgewählt oder verarbeitet" +ER_SP_DUP_PARAM 42000 + eng "Duplicate parameter: %s" + ger "Doppelter Parameter: %s" +ER_SP_DUP_VAR 42000 + eng "Duplicate variable: %s" + ger "Doppelte Variable: %s" +ER_SP_DUP_COND 42000 + eng "Duplicate condition: %s" + ger "Doppelte Bedingung: %s" +ER_SP_DUP_CURS 42000 + eng "Duplicate cursor: %s" + ger "Doppelter Cursor: %s" +ER_SP_CANT_ALTER + eng "Failed to ALTER %s %s" + ger "ALTER %s %s fehlgeschlagen" +ER_SP_SUBSELECT_NYI 0A000 + eng "Subselect value not supported" + ger "Subselect-Wert wird nicht unterstützt" +ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG 0A000 + eng "%s is not allowed in stored function or trigger" + ger "%s ist in gespeicherten Funktionen und in Triggern nicht erlaubt" +ER_SP_VARCOND_AFTER_CURSHNDLR 42000 + eng "Variable or condition declaration after cursor or handler declaration" + ger "Deklaration einer Variablen oder einer Bedingung nach der Deklaration eines Cursors oder eines Handlers" +ER_SP_CURSOR_AFTER_HANDLER 42000 + eng "Cursor declaration after handler declaration" + ger "Deklaration eines Cursors nach der Deklaration eines Handlers" +ER_SP_CASE_NOT_FOUND 20000 + eng "Case not found for CASE statement" + ger "Fall für CASE-Anweisung nicht gefunden" +ER_FPARSER_TOO_BIG_FILE + eng "Configuration file '%-.64s' is too big" + ger "Konfigurationsdatei '%-.64s' ist zu groß" + rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.64s'" + ukr "úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎÆ¦ÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.64s'" +ER_FPARSER_BAD_HEADER + eng "Malformed file type header in file '%-.64s'" + ger "Nicht wohlgeformter Dateityp-Header in Datei '%-.64s'" + rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'" + ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'" +ER_FPARSER_EOF_IN_COMMENT + eng "Unexpected end of file while parsing comment '%-.200s'" + ger "Unerwartetes Dateiende beim Parsen des Kommentars '%-.200s'" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.200s'" + ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.200s'" +ER_FPARSER_ERROR_IN_PARAMETER + eng "Error while parsing parameter '%-.64s' (line: '%-.64s')" + ger "Fehler beim Parsen des Parameters '%-.64s' (Zeile: '%-.64s')" + rus "ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.64s' (ÓÔÒÏËÁ: '%-.64s')" + ukr "ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.64s' (ÒÑÄÏË: '%-.64s')" +ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER + eng "Unexpected end of file while skipping unknown parameter '%-.64s'" + ger "Unerwartetes Dateiende beim Überspringen des unbekannten Parameters '%-.64s'" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.64s'" + ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.64s'" +ER_VIEW_NO_EXPLAIN + eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" + ger "EXPLAIN/SHOW kann nicht verlangt werden. Rechte für zugrunde liegende Tabelle fehlen" + rus "EXPLAIN/SHOW ÎÅ ÍÏÖÅÔ ÂÙÔØ ×ÙÐÏÌÎÅÎÎÏ; ÎÅÄÏÓÔÁÔÏÞÎÏ ÐÒÁ× ÎÁ ÔÁËÂÌÉÃÙ ÚÁÐÒÏÓÁ" + ukr "EXPLAIN/SHOW ÎÅ ÍÏÖÅ ÂÕÔÉ ×¦ËÏÎÁÎÏ; ÎÅÍÁ¤ ÐÒÁ× ÎÁ ÔÉÂÌÉæ ÚÁÐÉÔÕ" +ER_FRM_UNKNOWN_TYPE + eng "File '%-.64s' has unknown type '%-.64s' in its header" + ger "Datei '%-.64s' hat unbekannten Typ '%-.64s' im Header" + rus "æÁÊÌ '%-.64s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ" + ukr "æÁÊÌ '%-.64s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ" +ER_WRONG_OBJECT + eng "'%-.64s.%-.64s' is not %s" + ger "'%-.64s.%-.64s' ist nicht %s" + rus "'%-.64s.%-.64s' - ÎÅ %s" + ukr "'%-.64s.%-.64s' ÎÅ ¤ %s" +ER_NONUPDATEABLE_COLUMN + eng "Column '%-.64s' is not updatable" + ger "Feld '%-.64s' ist nicht aktualisierbar" + rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ" +ER_VIEW_SELECT_DERIVED + eng "View's SELECT contains a subquery in the FROM clause" + ger "SELECT der View enthält eine Subquery in der FROM-Klausel" + rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÏÄÚÁÐÒÏÓ × ËÏÎÓÔÒÕËÃÉÉ FROM" + ukr "View SELECT ÍÁ¤ ЦÄÚÁÐÉÔ Õ ËÏÎÓÔÒÕËæ§ FROM" +ER_VIEW_SELECT_CLAUSE + eng "View's SELECT contains a '%s' clause" + ger "SELECT der View enthält eine '%s'-Klausel" + rus "View SELECT ÓÏÄÅÒÖÉÔ ËÏÎÓÔÒÕËÃÉÀ '%s'" + ukr "View SELECT ÍÁ¤ ËÏÎÓÔÒÕËæÀ '%s'" +ER_VIEW_SELECT_VARIABLE + eng "View's SELECT contains a variable or parameter" + ger "SELECT der View enthält eine Variable oder einen Parameter" + rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÅÒÅÍÅÎÎÕÀ ÉÌÉ ÐÁÒÁÍÅÔÒ" + ukr "View SELECT ÍÁ¤ ÚÍÉÎÎÕ ÁÂÏ ÐÁÒÁÍÅÔÅÒ" +ER_VIEW_SELECT_TMPTABLE + eng "View's SELECT refers to a temporary table '%-.64s'" + ger "SELECT der View verweist auf eine temporäre Tabelle '%-.64s'" + rus "View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.64s'" + ukr "View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.64s'" +ER_VIEW_WRONG_LIST + eng "View's SELECT and view's field list have different column counts" + ger "SELECT- und Feldliste der Views haben unterschiedliche Anzahlen von Spalten" + rus "View SELECT É ÓÐÉÓÏË ÐÏÌÅÊ view ÉÍÅÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×" + ukr "View SELECT ¦ ÐÅÒÅÌ¦Ë ÓÔÏ×ÂÃ¦× view ÍÁÀÔØ Ò¦ÚÎÕ Ë¦ÌØË¦ÓÔØ ÓËÏ×Âæ×" +ER_WARN_VIEW_MERGE + eng "View merge algorithm can't be used here for now (assumed undefined algorithm)" + ger "View-Merge-Algorithmus kann hier momentan nicht verwendet werden (undefinierter Algorithmus wird angenommen)" + rus "áÌÇÏÒÉÔÍ ÓÌÉÑÎÉÑ view ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ÓÅÊÞÁÓ (ÁÌÇÏÒÉÔÍ ÂÕÄÅÔ ÎÅÏÐÅÒÅÄÅÌÅÎÎÙÍ)" + ukr "áÌÇÏÒÉÔÍ ÚÌÉ×ÁÎÎÑ view ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ ÚÁÒÁÚ (ÁÌÇÏÒÉÔÍ ÂÕÄÅ ÎÅ×ÉÚÎÁÞÅÎÉÊ)" +ER_WARN_VIEW_WITHOUT_KEY + eng "View being updated does not have complete key of underlying table in it" + ger "Die aktualisierte View enthält nicht den vollständigen Schlüssel der zugrunde liegenden Tabelle" + rus "ïÂÎÏ×ÌÑÅÍÙÊ view ÎÅ ÓÏÄÅÒÖÉÔ ËÌÀÞÁ ÉÓÐÏÌØÚÏ×ÁÎÎÙÈ(ÏÊ) × ÎÅÍ ÔÁÂÌÉÃ(Ù)" + ukr "View, ÝÏ ÏÎÏ×ÌÀÅÔØÓÑ, ΊͦÓÔÉÔØ ÐÏ×ÎÏÇÏ ËÌÀÞÁ ÔÁÂÌÉæ(Ø), ÝÏ ×ÉËÏÒ¦ÓÔÁÎÁ × ÎØÀÏÍÕ" +ER_VIEW_INVALID + eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them" +ER_SP_NO_DROP_SP + eng "Can't drop or alter a %s from within another stored routine" + ger "Kann eine %s nicht von innerhalb einer anderen gespeicherten Routine löschen oder ändern" +ER_SP_GOTO_IN_HNDLR + eng "GOTO is not allowed in a stored procedure handler" + ger "GOTO ist im Handler einer gespeicherten Prozedur nicht erlaubt" +ER_TRG_ALREADY_EXISTS + eng "Trigger already exists" + ger "Trigger existiert bereits" +ER_TRG_DOES_NOT_EXIST + eng "Trigger does not exist" + ger "Trigger existiert nicht" +ER_TRG_ON_VIEW_OR_TEMP_TABLE + eng "Trigger's '%-.64s' is view or temporary table" + ger "'%-.64s' des Triggers ist View oder temporäre Tabelle" +ER_TRG_CANT_CHANGE_ROW + eng "Updating of %s row is not allowed in %strigger" + ger "Aktualisieren einer %s-Zeile ist in einem %s-Trigger nicht erlaubt" +ER_TRG_NO_SUCH_ROW_IN_TRG + eng "There is no %s row in %s trigger" + ger "Es gibt keine %s-Zeile im %s-Trigger" +ER_NO_DEFAULT_FOR_FIELD + eng "Field '%-.64s' doesn't have a default value" + ger "Feld '%-.64s' hat keinen Vorgabewert" +ER_DIVISION_BY_ZERO 22012 + eng "Division by 0" + ger "Division durch 0" +ER_TRUNCATED_WRONG_VALUE_FOR_FIELD + eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld" + ger "Falscher %-.32s-Wert: '%-.128s' für Feld '%.64s' in Zeile %ld" +ER_ILLEGAL_VALUE_FOR_TYPE 22007 + eng "Illegal %s '%-.64s' value found during parsing" + ger "Nicht zulässiger %s-Wert '%-.64s' beim Parsen gefunden" +ER_VIEW_NONUPD_CHECK + eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'" + ger "CHECK OPTION auf nicht-aktualisierbarem View '%-.64s.%-.64s'" + rus "CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.64s.%-.64s'" + ukr "CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ" +ER_VIEW_CHECK_FAILED + eng "CHECK OPTION failed '%-.64s.%-.64s'" + ger "CHECK OPTION fehlgeschlagen: '%-.64s.%-.64s'" + rus "ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÐÒÏ×ÁÌÉÌÁÓØ" + ukr "ðÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÎÅ ÐÒÏÊÛÌÁ" +ER_PROCACCESS_DENIED_ERROR 42000 + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for routine '%-.64s'" + ger "Befehl %-.16s nicht zulässig für Benutzer '%-.32s'@'%-.64s' in Routine '%-.64s'" +ER_RELAY_LOG_FAIL + eng "Failed purging old relay logs: %s" + ger "Bereinigen alter Relais-Logs fehlgeschlagen: %s" +ER_PASSWD_LENGTH + eng "Password hash should be a %d-digit hexadecimal number" + ger "Passwort-Hash sollte eine Hexdaezimalzahl mit %d Stellen sein" +ER_UNKNOWN_TARGET_BINLOG + eng "Target log not found in binlog index" + ger "Ziel-Log im Binlog-Index nicht gefunden" +ER_IO_ERR_LOG_INDEX_READ + eng "I/O error reading log index file" + ger "Fehler beim Lesen der Log-Index-Datei" +ER_BINLOG_PURGE_PROHIBITED + eng "Server configuration does not permit binlog purge" + ger "Server-Konfiguration erlaubt keine Binlog-Bereinigung" +ER_FSEEK_FAIL + eng "Failed on fseek()" + ger "fseek() fehlgeschlagen" +ER_BINLOG_PURGE_FATAL_ERR + eng "Fatal error during log purge" + ger "Schwerwiegender Fehler bei der Log-Bereinigung" +ER_LOG_IN_USE + eng "A purgeable log is in use, will not purge" + ger "Ein zu bereinigendes Log wird gerade benutzt, daher keine Bereinigung" +ER_LOG_PURGE_UNKNOWN_ERR + eng "Unknown error during log purge" + ger "Unbekannter Fehler bei Log-Bereinigung" +ER_RELAY_LOG_INIT + eng "Failed initializing relay log position: %s" + ger "Initialisierung der Relais-Log-Position fehlgeschlagen: %s" +ER_NO_BINARY_LOGGING + eng "You are not using binary logging" + ger "Sie verwenden keine Binärlogs" +ER_RESERVED_SYNTAX + eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server" + ger "Die Schreibweise '%-.64s' ist für interne Zwecke des MySQL-Servers reserviert" +ER_WSAS_FAILED + eng "WSAStartup Failed" + ger "WSAStartup fehlgeschlagen" +ER_DIFF_GROUPS_PROC + eng "Can't handle procedures with different groups yet" + ger "Kann Prozeduren mit unterschiedlichen Gruppen noch nicht verarbeiten" +ER_NO_GROUP_FOR_PROC + eng "Select must have a group with this procedure" + ger "SELECT muss bei dieser Prozedur ein GROUP BY haben" +ER_ORDER_WITH_PROC + eng "Can't use ORDER clause with this procedure" + ger "Kann bei dieser Prozedur keine ORDER-BY-Klausel verwenden" +ER_LOGGING_PROHIBIT_CHANGING_OF + eng "Binary logging and replication forbid changing the global server %s" + ger "Binärlogs und Replikation verhindern Wechsel des globalen Servers %s" +ER_NO_FILE_MAPPING + eng "Can't map file: %-.200s, errno: %d" + ger "Kann Datei nicht abbilden: %-.200s, Fehler: %d" +ER_WRONG_MAGIC + eng "Wrong magic in %-.64s" + ger "Falsche magische Zahlen in %-.64s" +ER_PS_MANY_PARAM + eng "Prepared statement contains too many placeholders" + ger "Vorbereitete Anweisung enthält zu viele Platzhalter" +ER_KEY_PART_0 + eng "Key part '%-.64s' length cannot be 0" + ger "Länge des Schlüsselteils '%-.64s' kann nicht 0 sein" +ER_VIEW_CHECKSUM + eng "View text checksum failed" + ger "View-Text-Prüfsumme fehlgeschlagen" + rus "ðÒÏ×ÅÒËÁ ËÏÎÔÒÏÌØÎÏÊ ÓÕÍÍÙ ÔÅËÓÔÁ VIEW ÐÒÏ×ÁÌÉÌÁÓØ" + ukr "ðÅÒÅצÒËÁ ËÏÎÔÒÏÌØÎϧ ÓÕÍÉ ÔÅËÓÔÕ VIEW ÎÅ ÐÒÏÊÛÌÁ" +ER_VIEW_MULTIUPDATE + eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'" + ger "Kann nicht mehr als eine Basistabelle über Join-View '%-.64s.%-.64s' ändern" + rus "îÅÌØÚÑ ÉÚÍÅÎÉÔØ ÂÏÌØÛÅ ÞÅÍ ÏÄÎÕ ÂÁÚÏ×ÕÀ ÔÁÂÌÉÃÕ ÉÓÐÏÌØÚÕÑ ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s'" + ukr "îÅÍÏÖÌÉ×Ï ÏÎÏ×ÉÔÉ Â¦ÌØÛ ÎÉÖ ÏÄÎÕ ÂÁÚÏ×Õ ÔÁÂÌÉÃÀ ×ÙËÏÒÉÓÔÏ×ÕÀÞÉ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔ¦ÔØ ÄÅË¦ÌØËÁ ÔÁÂÌÉÃØ" +ER_VIEW_NO_INSERT_FIELD_LIST + eng "Can not insert into join view '%-.64s.%-.64s' without fields list" + ger "Kann nicht ohne Feldliste in Join-View '%-.64s.%-.64s' einfügen" + rus "îÅÌØÚÑ ×ÓÔÁ×ÌÑÔØ ÚÁÐÉÓÉ × ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s' ÂÅÚ ÓÐÉÓËÁ ÐÏÌÅÊ" + ukr "îÅÍÏÖÌÉ×Ï ÕÓÔÁ×ÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅË¦ÌØËÁ ÔÁÂÌÉÃØ, ÂÅÚ ÓÐÉÓËÕ ÓÔÏ×Âæ×" +ER_VIEW_DELETE_MERGE_VIEW + eng "Can not delete from join view '%-.64s.%-.64s'" + ger "Kann nicht aus Join-View '%-.64s.%-.64s' löschen" + rus "îÅÌØÚÑ ÕÄÁÌÑÔØ ÉÚ ÍÎÏÇÏÔÁÂÌÉÞÎÏÇÏ VIEW '%-.64s.%-.64s'" + ukr "îÅÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅË¦ÌØËÁ ÔÁÂÌÉÃØ" +ER_CANNOT_USER + eng "Operation %s failed for %.256s" + ger "Operation %s schlug fehl für %.256s" + norwegian-ny "Operation %s failed for '%.256s'" +ER_XAER_NOTA XAE04 + eng "XAER_NOTA: Unknown XID" + ger "XAER_NOTA: Unbekannte XID" +ER_XAER_INVAL XAE05 + eng "XAER_INVAL: Invalid arguments (or unsupported command)" + ger "XAER_INVAL: Ungültige Argumente (oder nicht unterstützter Befehl)" +ER_XAER_RMFAIL XAE07 + eng "XAER_RMFAIL: The command cannot be executed when global transaction is in the %.64s state" + ger "XAER_RMFAIL: DEr Befehl kann nicht ausgeführt werden, wenn die globale Transaktion im Zustand %.64s ist" + rus "XAER_RMFAIL: ÜÔÕ ËÏÍÁÎÄÕ ÎÅÌØÚÑ ×ÙÐÏÌÎÑÔØ ËÏÇÄÁ ÇÌÏÂÁÌØÎÁÑ ÔÒÁÎÚÁËÃÉÑ ÎÁÈÏÄÉÔÓÑ × ÓÏÓÔÏÑÎÉÉ '%.64s'" +ER_XAER_OUTSIDE XAE09 + eng "XAER_OUTSIDE: Some work is done outside global transaction" + ger "XAER_OUTSIDE: Einige Arbeiten werden außerhalb der globalen Transaktion verrichtet" +ER_XAER_RMERR XAE03 + eng "XAER_RMERR: Fatal error occurred in the transaction branch - check your data for consistency" + ger "XAER_RMERR: Schwerwiegender Fehler im Transaktionszweig - prüfen Sie Ihre Daten auf Konsistenz" +ER_XA_RBROLLBACK XA100 + eng "XA_RBROLLBACK: Transaction branch was rolled back" + ger "XA_RBROLLBACK: Transaktionszweig wurde zurückgerollt" +ER_NONEXISTING_PROC_GRANT 42000 + eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on routine '%-.64s'" + ger "Es gibt diese Berechtigung für Benutzer '%-.32s' auf Host '%-.64s' für Routine '%-.64s' nicht" +ER_PROC_AUTO_GRANT_FAIL + eng "Failed to grant EXECUTE and ALTER ROUTINE privileges" + ger "Gewährung von EXECUTE- und ALTER-ROUTINE-Rechten fehlgeschlagen" +ER_PROC_AUTO_REVOKE_FAIL + eng "Failed to revoke all privileges to dropped routine" + ger "Rücknahme aller Rechte für die gelöschte Routine fehlgeschlagen" +ER_DATA_TOO_LONG 22001 + eng "Data too long for column '%s' at row %ld" + ger "Daten zu lang für Feld '%s' in Zeile %ld" +ER_SP_BAD_SQLSTATE 42000 + eng "Bad SQLSTATE: '%s'" + ger "Ungültiger SQLSTATE: '%s'" +ER_STARTUP + eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s" + ger "%s: bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d %s" +ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR + eng "Can't load value from file with fixed size rows to variable" + ger "Kann Wert aus Datei mit Zeilen fester Größe nicht in Variable laden" +ER_CANT_CREATE_USER_WITH_GRANT 42000 + eng "You are not allowed to create a user with GRANT" + ger "Sie dürfen keinen Benutzer mit GRANT anlegen" +ER_WRONG_VALUE_FOR_TYPE + eng "Incorrect %-.32s value: '%-.128s' for function %-.32s" + ger "Falscher %-.32s-Wert: '%-.128s' für Funktion %-.32s" +ER_TABLE_DEF_CHANGED + eng "Table definition has changed, please retry transaction" + ger "Tabellendefinition wurde geändert, bitte starten Sie die Transaktion neu" +ER_SP_DUP_HANDLER 42000 + eng "Duplicate handler declared in the same block" + ger "Doppelter Handler im selben Block deklariert" +ER_SP_NOT_VAR_ARG 42000 + eng "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger" + ger "OUT- oder INOUT-Argument %d für Routine %s ist keine Variable" +ER_SP_NO_RETSET 0A000 + eng "Not allowed to return a result set from a %s" + ger "Rückgabe einer Ergebnismenge aus einer %s ist nicht erlaubt" +ER_CANT_CREATE_GEOMETRY_OBJECT 22003 + eng "Cannot get geometry object from data you send to the GEOMETRY field" + ger "Kann kein Geometrieobjekt aus den Daten machen, die Sie dem GEOMETRY-Feld übergeben haben" +ER_FAILED_ROUTINE_BREAK_BINLOG + eng "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes" + ger "Eine Routine, die weder NO SQL noch READS SQL DATA in der Deklaration hat, schlug fehl und Binärlogging ist aktiv. Wenn Nicht-Transaktions-Tabellen aktualisiert wurden, enthält das Binärlog ihre Änderungen nicht" +ER_BINLOG_UNSAFE_ROUTINE + eng "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" + ger "Diese Routine hat weder DETERMINISTIC, NO SQL noch READS SQL DATA in der Deklaration und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)" +ER_BINLOG_CREATE_ROUTINE_NEED_SUPER + eng "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)" + ger "Sie haben keine SUPER-Berechtigung und Binärlogging ist aktiv (*vielleicht* sollten Sie die weniger sichere Variable log_bin_trust_routine_creators verwenden)" +ER_EXEC_STMT_WITH_OPEN_CURSOR + eng "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it." + ger "Sie können keine vorbereitete Anweisung ausführen, die mit einem geöffneten Cursor verknüpft ist. Setzen Sie die Anweisung zurück, um sie neu auszuführen" +ER_STMT_HAS_NO_OPEN_CURSOR + eng "The statement (%lu) has no open cursor." + ger "Die Anweisung (%lu) hat keinen geöffneten Cursor" +ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG + eng "Explicit or implicit commit is not allowed in stored function or trigger." + ger "Explizites oder implizites Commit ist in gespeicherten Funktionen und in Triggern nicht erlaubt" +ER_NO_DEFAULT_FOR_VIEW_FIELD + eng "Field of view '%-.64s.%-.64s' underlying table doesn't have a default value" + ger "Ein Feld der dem View '%-.64s.%-.64s' zugrundeliegenden Tabelle hat keinen Vorgabewert" +ER_SP_NO_RECURSION + eng "Recursive stored functions and triggers are not allowed." + ger "Rekursive gespeicherte Routinen und Triggers sind nicht erlaubt" +ER_TOO_BIG_SCALE 42000 S1009 + eng "Too big scale %d specified for column '%-.64s'. Maximum is %d." + ger "Zu großer Skalierungsfaktor %d für Feld '%-.64s' angegeben. Maximum ist %d" +ER_TOO_BIG_PRECISION 42000 S1009 + eng "Too big precision %d specified for column '%-.64s'. Maximum is %d." + ger "Zu große Genauigkeit %d für Feld '%-.64s' angegeben. Maximum ist %d" +ER_M_BIGGER_THAN_D 42000 S1009 + eng "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.64s')." + ger "Für FLOAT(M,D), DOUBLE(M,D) oder DECIMAL(M,D) muss M >= D sein (Feld '%-.64s')" +ER_WRONG_LOCK_OF_SYSTEM_TABLE + eng "You can't combine write-locking of system '%-.64s.%-.64s' table with other tables" + ger "Sie können Schreibsperren auf der Systemtabelle '%-.64s.%-.64s' nicht mit anderen Tabellen kombinieren" +ER_CONNECT_TO_FOREIGN_DATA_SOURCE + eng "Unable to connect to foreign data source: %.64s" + ger "Kann nicht mit Fremddatenquelle verbinden: %.64s" +ER_QUERY_ON_FOREIGN_DATA_SOURCE + eng "There was a problem processing the query on the foreign data source. Data source error: %-.64s" + ger "Bei der Verarbeitung der Abfrage ist in der Fremddatenquelle ein Problem aufgetreten. Datenquellenfehlermeldung: %-.64s" +ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST + eng "The foreign data source you are trying to reference does not exist. Data source error: %-.64s" + ger "Die Fremddatenquelle, auf die Sie zugreifen wollen, existiert nicht. Datenquellenfehlermeldung: %-.64s" +ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE + eng "Can't create federated table. The data source connection string '%-.64s' is not in the correct format" + ger "Kann föderierte Tabelle nicht erzeugen. Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format" +ER_FOREIGN_DATA_STRING_INVALID + eng "The data source connection string '%-.64s' is not in the correct format" + ger "Der Datenquellen-Verbindungsstring '%-.64s' hat kein korrektes Format" +ER_CANT_CREATE_FEDERATED_TABLE + eng "Can't create federated table. Foreign data src error: %-.64s" + ger "Kann föderierte Tabelle nicht erzeugen. Fremddatenquellenfehlermeldung: %-.64s" +ER_TRG_IN_WRONG_SCHEMA + eng "Trigger in wrong schema" + ger "Trigger im falschen Schema" +ER_STACK_OVERRUN_NEED_MORE + eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld -O thread_stack=#' to specify a bigger stack." + ger "Thread-Stack-Überlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mysqld -O thread_stack=#', um einen größeren Stack anzugeben" +ER_TOO_LONG_BODY 42000 S1009 + eng "Routine body for '%-.100s' is too long" + ger "Routinen-Body für '%-.100s' ist zu lang" +ER_WARN_CANT_DROP_DEFAULT_KEYCACHE + eng "Cannot drop default keycache" + ger "Der vorgabemäßige Schlüssel-Cache kann nicht gelöscht werden" +ER_TOO_BIG_DISPLAYWIDTH 42000 S1009 + eng "Display width out of range for column '%-.64s' (max = %d)" + ger "Anzeigebreite außerhalb des zulässigen Bereichs für Spalte '%-.64s' (Maximum: %d)" +ER_XAER_DUPID XAE08 + eng "XAER_DUPID: The XID already exists" + ger "XAER_DUPID: Die XID existiert bereits" +ER_DATETIME_FUNCTION_OVERFLOW 22008 + eng "Datetime function: %-.32s field overflow" + ger "Datetime-Funktion: %-.32s Feldüberlauf" +ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG + eng "Can't update table '%-.64s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger." + ger "Kann Tabelle '%-.64s' in gespeicherter Funktion oder Trigger nicht aktualisieren, weil sie bereits von der Anweisung verwendet wird, die diese gespeicherte Funktion oder den Trigger aufrief" +ER_VIEW_PREVENT_UPDATE + eng "The definition of table '%-.64s' prevents operation %.64s on table '%-.64s'." + ger "Die Definition der Tabelle '%-.64s' verhindert die Operation %.64s auf Tabelle '%-.64s'" +ER_PS_NO_RECURSION + eng "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner" + ger "Die vorbereitete Anweisung enthält einen Aufruf einer gespeicherten Routine, die auf eben dieselbe Anweisung verweist. Es ist nicht erlaubt, eine vorbereitete Anweisung in solch rekursiver Weise auszuführen" +ER_SP_CANT_SET_AUTOCOMMIT + eng "Not allowed to set autocommit from a stored function or trigger" + ger "Es ist nicht erlaubt, innerhalb einer gespeicherten Funktion oder eines Triggers AUTOCOMMIT zu setzen" +ER_MALFORMED_DEFINER + eng "Definer is not fully qualified" + ger "Definierer des View ist nicht vollständig spezifiziert" +ER_VIEW_FRM_NO_USER + eng "View '%-.64s'.'%-.64s' has no definer information (old table format). Current user is used as definer. Please recreate the view!" + ger "View '%-.64s'.'%-.64s' hat keine Definierer-Information (altes Tabellenformat). Der aktuelle Benutzer wird als Definierer verwendet. Bitte erstellen Sie den View neu" +ER_VIEW_OTHER_USER + eng "You need the SUPER privilege for creation view with '%-.64s'@'%-.64s' definer" + ger "Sie brauchen die SUPER-Berechtigung, um einen View mit dem Definierer '%-.64s'@'%-.64s' zu erzeugen" +ER_NO_SUCH_USER + eng "There is no '%-.64s'@'%-.64s' registered" + ger "'%-.64s'@'%-.64s' ist nicht registriert" +ER_FORBID_SCHEMA_CHANGE + eng "Changing schema from '%-.64s' to '%-.64s' is not allowed." + ger "Wechsel des Schemas von '%-.64s' auf '%-.64s' ist nicht erlaubt" +ER_ROW_IS_REFERENCED_2 23000 + eng "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)" + ger "Kann Eltern-Zeile nicht löschen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)" +ER_NO_REFERENCED_ROW_2 23000 + eng "Cannot add or update a child row: a foreign key constraint fails (%.192s)" + ger "Kann Kind-Zeile nicht hinzufügen oder aktualisieren: eine Fremdschlüsselbedingung schlägt fehl (%.192s)" +ER_SP_BAD_VAR_SHADOW 42000 + eng "Variable '%-.64s' must be quoted with `...`, or renamed" + ger "Variable '%-.64s' muss mit `...` geschützt oder aber umbenannt werden" +ER_TRG_NO_DEFINER + eng "No definer attribute for trigger '%-.64s'.'%-.64s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger." + ger "Kein Definierer-Attribut für Trigger '%-.64s'.'%-.64s'. Der Trigger wird mit der Autorisierung des Aufrufers aktiviert, der möglicherweise keine zureichenden Berechtigungen hat. Bitte legen Sie den Trigger neu an." +ER_OLD_FILE_FORMAT + eng "'%-.64s' has an old format, you should re-create the '%s' object(s)" + ger "'%-.64s' hat altes Format, Sie sollten die '%s'-Objekt(e) neu erzeugen" +ER_SP_RECURSION_LIMIT + eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s" + ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten" +ER_SP_PROC_TABLE_CORRUPT + eng "Failed to load routine %s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" +ER_SP_WRONG_NAME 42000 + eng "Incorrect routine name '%-.64s'" +ER_TABLE_NEEDS_UPGRADE + eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!" +ER_SP_NO_AGGREGATE 42000 + eng "AGGREGATE is not supported for stored functions" +ER_MAX_PREPARED_STMT_COUNT_REACHED 42000 + eng "Can't create more than max_prepared_stmt_count statements (current value: %lu)" +ER_VIEW_RECURSIVE + eng "`%-.64s`.`%-.64s` contains view recursion" +ER_NON_GROUPING_FIELD_USED 42000 + eng "non-grouping field '%-.64s' is used in %-.64s clause" +ER_TABLE_CANT_HANDLE_SPKEYS + eng "The used table type doesn't support SPATIAL indexes" +ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA + eng "Triggers can not be created on system tables" +ER_REMOVED_SPACES + eng "Leading spaces are removed from name '%s'" +ER_AUTOINC_READ_FAILED + eng "Failed to read auto-increment value from storage engine" +ER_USERNAME + eng "user name" +ER_HOSTNAME + eng "host name" +ER_WRONG_STRING_LENGTH + eng "String '%-.70s' is too long for %s (should be no longer than %d)" +ER_NON_INSERTABLE_TABLE + eng "The target table %-.100s of the %s is not insertable-into" + diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt deleted file mode 100644 index 8487e29d89d..00000000000 --- a/sql/share/estonian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Esialgne tõlge: Tõnu Samuel (tonu@spam.ee) - Parandanud ja täiendanud: Indrek Siitan (tfr@mysql.com) -*/ - -character-set=latin7 - -"hashchk", -"isamchk", -"EI", -"JAH", -"Ei suuda luua faili '%-.64s' (veakood: %d)", -"Ei suuda luua tabelit '%-.64s' (veakood: %d)", -"Ei suuda luua andmebaasi '%-.64s' (veakood: %d)", -"Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib", -"Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri", -"Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)", -"Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)", -"Viga '%-.64s' kustutamisel (veakood: %d)", -"Ei suuda lugeda kirjet süsteemsest tabelist", -"Ei suuda lugeda '%-.64s' olekut (veakood: %d)", -"Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)", -"Ei suuda lukustada faili (veakood: %d)", -"Ei suuda avada faili '%-.64s' (veakood: %d)", -"Ei suuda leida faili '%-.64s' (veakood: %d)", -"Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)", -"Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)", -"Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik", -"Ketas täis (%s). Ootame kuni tekib vaba ruumi...", -"Ei saa kirjutada, korduv võti tabelis '%-.64s'", -"Viga faili '%-.64s' sulgemisel (veakood: %d)", -"Viga faili '%-.64s' lugemisel (veakood: %d)", -"Viga faili '%-.64s' ümbernimetamisel '%-.64s'-ks (veakood: %d)", -"Viga faili '%-.64s' kirjutamisel (veakood: %d)", -"'%-.64s' on lukustatud muudatuste vastu", -"Sorteerimine katkestatud", -"Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks", -"Tabeli handler tagastas vea %d", -"Tabeli '%-.64s' handler ei toeta antud operatsiooni", -"Ei suuda leida kirjet '%-.64s'-s", -"Vigane informatsioon failis '%-.64s'", -"Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada", -"Tabeli '%-.64s' võtmefail on aegunud; paranda see!", -"Tabel '%-.64s' on ainult lugemiseks", -"Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)", -"Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit", -"Ootamatu faililõpumärgend faili '%-.64s' lugemisel (veakood: %d)", -"Liiga palju samaaegseid ühendusi", -"Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine", -"Ei suuda lahendada IP aadressi masina nimeks", -"Väär handshake", -"Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'", -"Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)", -"Andmebaasi ei ole valitud", -"Tundmatu käsk", -"Tulp '%-.64s' ei saa omada nullväärtust", -"Tundmatu andmebaas '%-.64s'", -"Tabel '%-.64s' juba eksisteerib", -"Tundmatu tabel '%-.64s'", -"Väli '%-.64s' %-.64s-s ei ole ühene", -"Serveri seiskamine käib", -"Tundmatu tulp '%-.64s' '%-.64s'-s", -"'%-.64s' puudub GROUP BY klauslis", -"Ei saa grupeerida '%-.64s' järgi", -"Lauses on korraga nii tulbad kui summeerimisfunktsioonid", -"Tulpade arv erineb väärtuste arvust", -"Identifikaatori '%-.100s' nimi on liiga pikk", -"Kattuv tulba nimi '%-.64s'", -"Kattuv võtme nimi '%-.64s'", -"Kattuv väärtus '%-.64s' võtmele %d", -"Vigane tulba kirjeldus tulbale '%-.64s'", -"%s '%-.80s' ligidal real %d", -"Tühi päring", -"Ei ole unikaalne tabel/alias '%-.64s'", -"Vigane vaikeväärtus '%-.64s' jaoks", -"Mitut primaarset võtit ei saa olla", -"Liiga palju võtmeid. Maksimaalselt võib olla %d võtit", -"Võti koosneb liiga paljudest osadest. Maksimaalselt võib olla %d osa", -"Võti on liiga pikk. Maksimaalne võtmepikkus on %d", -"Võtme tulp '%-.64s' puudub tabelis", -"BLOB-tüüpi tulpa '%-.64s' ei saa kasutada võtmena", -"Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi", -"Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena", -"%s: ootab ühendusi", -"%s: MySQL lõpetas\n", -"%s: sain signaali %d. Lõpetan!\n", -"%s: Lõpp\n", -"%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n", -"Ei suuda luua IP socketit", -"Tabelil '%-.64s' puuduvad võtmed. Loo tabel uuesti", -"Väljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga", -"BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang.", -"Fail '%-.64s' peab asuma andmebaasi kataloogis või olema kõigile loetav", -"Fail '%-.80s' juba eksisteerib", -"Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld", -"Kirjeid: %ld Kattuvaid: %ld", -"Vigane võtme osa. Kasutatud võtmeosa ei ole string tüüpi, määratud pikkus on pikem kui võtmeosa või tabelihandler ei toeta seda tüüpi võtmeid", -"ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil", -"Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib", -"Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Tundmatu lõim: %lu", -"Ei ole lõime %lu omanik", -"Ühtegi tabelit pole kasutusel", -"Liiga palju string tulbale %-.64s tüübile SET", -"Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n", -"Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav", -"Tabel '%-.64s' ei ole lukustatud käsuga LOCK TABLES", -"BLOB-tüüpi tulp '%-.64s' ei saa omada vaikeväärtust", -"Vigane andmebaasi nimi '%-.100s'", -"Vigane tabeli nimi '%-.100s'", -"SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1", -"Tundmatu viga", -"Tundmatu protseduur '%-.64s'", -"Vale parameetrite hulk protseduurile '%-.64s'", -"Vigased parameetrid protseduurile '%-.64s'", -"Tundmatu tabel '%-.64s' %-.32s-s", -"Tulp '%-.64s' on määratletud topelt", -"Vigane grupeerimisfunktsiooni kasutus", -"Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis", -"Tabelis peab olema vähemalt üks tulp", -"Tabel '%-.64s' on täis", -"Vigane kooditabel '%-.64s'", -"Liiga palju tabeleid. MySQL suudab JOINiga ühendada kuni %d tabelit", -"Liiga palju tulpasid", -"Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %d. Muuda mõned väljad BLOB-tüüpi väljadeks", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Ristsõltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi", -"Tulp '%-.64s' on kasutusel indeksina, kuid ei ole määratletud kui NOT NULL", -"Ei suuda avada funktsiooni '%-.64s'", -"Ei suuda algväärtustada funktsiooni '%-.64s'; %-.80s", -"Teegi nimes ei tohi olla kataloogi", -"Funktsioon '%-.64s' juba eksisteerib", -"Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.64s)", -"Ei leia funktsiooni '%-.64s' antud teegis", -"Funktsioon '%-.64s' ei ole defineeritud", -"Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga", -"Masinal '%-.64s' puudub ligipääs sellele MySQL serverile", -"Te kasutate MySQL-i anonüümse kasutajana, kelledel pole parooli muutmise õigust", -"Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis", -"Ei leia vastavat kirjet kasutajate tabelis", -"Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld", -"Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga", -"Tulpade hulk erineb väärtuste hulgast real %ld", -"Ei suuda taasavada tabelit '%-.64s'", -"NULL väärtuse väärkasutus", -"regexp tagastas vea '%-.64s'", -"GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud", -"Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'", -"%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'", -"%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'", -"Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga", -"Masina või kasutaja nimi GRANT lauses on liiga pikk", -"Tabelit '%-.64s.%-.64s' ei eksisteeri", -"Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'", -"Antud käsk ei ole lubatud käesolevas MySQL versioonis", -"Viga SQL süntaksis", -"INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.64s", -"Liiga palju DELAYED lõimesid kasutusel", -"Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)", -"Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga", -"Viga ühendustoru lugemisel", -"fcntl() tagastas vea", -"Paketid saabusid vales järjekorras", -"Viga andmepaketi lahtipakkimisel", -"Viga andmepaketi lugemisel", -"Kontrollaja ületamine andmepakettide lugemisel", -"Viga andmepaketi kirjutamisel", -"Kontrollaja ületamine andmepakettide kirjutamisel", -"Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga", -"Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju", -"Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju", -"INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES käsuga", -"Vigane tulba nimi '%-.100s'", -"Tabelihandler ei oska indekseerida tulpa '%-.64s'", -"Kõik tabelid MERGE tabeli määratluses ei ole identsed", -"Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub ühesuse kitsendust", -"BLOB-tüüpi tulp '%-.64s' on kasutusel võtmes ilma pikkust määratlemata", -"Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit", -"Tulemis oli rohkem kui üks kirje", -"Antud tabelitüüp nõuab primaarset võtit", -"Antud MySQL versioon on kompileeritud ilma RAID toeta", -"Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita", -"Võti '%-.64s' ei eksisteeri tabelis '%-.64s'", -"Ei suuda avada tabelit", -"Antud tabelitüüp ei toeta %s käske", -"Seda käsku ei saa kasutada transaktsiooni sees", -"Viga %d käsu COMMIT täitmisel", -"Viga %d käsu ROLLBACK täitmisel", -"Viga %d käsu FLUSH_LOGS täitmisel", -"Viga %d käsu CHECKPOINT täitmisel", -"Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega", -"Ei suuda täita antud käsku kuna on aktiivseid lukke või käimasolev transaktsioon", -"Tundmatu süsteemne muutuja '%-.64s'", -"Tabel '%-.64s' on märgitud vigaseks ja tuleb parandada", -"Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus", -"Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida", -"Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga", -"Ainult konstantsed suurused on lubatud SET klauslis", -"Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata", -"Lukkude koguarv ületab lukutabeli suuruse", -"Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni käigus", -"DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku", -"CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku", -"Vigased parameetrid %s-le", -"Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid", -"Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis", -"Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast", -"Antud tabelitüüp ei toeta FULLTEXT indekseid", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Viga käsu %s täitmisel: %-.128s", -"Vigane %s ja %s kasutus", -"Tulpade arv kasutatud SELECT lausetes ei kattu", -"Ei suuda täita päringut konfliktse luku tõttu", -"Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud", -"Määrangut '%s' on lauses kasutatud topelt", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt deleted file mode 100644 index ffd5e12c108..00000000000 --- a/sql/share/french/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NON", -"OUI", -"Ne peut créer le fichier '%-.64s' (Errcode: %d)", -"Ne peut créer la table '%-.64s' (Errcode: %d)", -"Ne peut créer la base '%-.64s' (Erreur %d)", -"Ne peut créer la base '%-.64s'; elle existe déjà", -"Ne peut effacer la base '%-.64s'; elle n'existe pas", -"Ne peut effacer la base '%-.64s' (erreur %d)", -"Erreur en effaçant la base (rmdir '%-.64s', erreur %d)", -"Erreur en effaçant '%-.64s' (Errcode: %d)", -"Ne peut lire un enregistrement de la table 'system'", -"Ne peut obtenir le status de '%-.64s' (Errcode: %d)", -"Ne peut obtenir le répertoire de travail (Errcode: %d)", -"Ne peut verrouiller le fichier (Errcode: %d)", -"Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)", -"Ne peut trouver le fichier: '%-.64s' (Errcode: %d)", -"Ne peut lire le répertoire de '%-.64s' (Errcode: %d)", -"Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)", -"Enregistrement modifié depuis sa dernière lecture dans la table '%-.64s'", -"Disque plein (%s). J'attend que quelqu'un libère de l'espace...", -"Ecriture impossible, doublon dans une clé de la table '%-.64s'", -"Erreur a la fermeture de '%-.64s' (Errcode: %d)", -"Erreur en lecture du fichier '%-.64s' (Errcode: %d)", -"Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)", -"Erreur d'écriture du fichier '%-.64s' (Errcode: %d)", -"'%-.64s' est verrouillé contre les modifications", -"Tri alphabétique abandonné", -"La vue (View) '%-.64s' n'existe pas pour '%-.64s'", -"Reçu l'erreur %d du handler de la table", -"Le handler de la table '%-.64s' n'a pas cette option", -"Ne peut trouver l'enregistrement dans '%-.64s'", -"Information erronnée dans le fichier: '%-.64s'", -"Index corrompu dans la table: '%-.64s'; essayez de le réparer", -"Vieux fichier d'index pour la table '%-.64s'; réparez le!", -"'%-.64s' est en lecture seulement", -"Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)", -"Manque de mémoire pour le tri. Augmentez-la.", -"Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)", -"Trop de connections", -"Manque de 'threads'/mémoire", -"Ne peut obtenir de hostname pour votre adresse", -"Mauvais 'handshake'", -"Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'", -"Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)", -"Aucune base n'a été sélectionnée", -"Commande inconnue", -"Le champ '%-.64s' ne peut être vide (null)", -"Base '%-.64s' inconnue", -"La table '%-.64s' existe déjà", -"Table '%-.64s' inconnue", -"Champ: '%-.64s' dans %s est ambigu", -"Arrêt du serveur en cours", -"Champ '%-.64s' inconnu dans %s", -"'%-.64s' n'est pas dans 'group by'", -"Ne peut regrouper '%-.64s'", -"Vous demandez la fonction sum() et des champs dans la même commande", -"Column count doesn't match value count", -"Le nom de l'identificateur '%-.64s' est trop long", -"Nom du champ '%-.64s' déjà utilisé", -"Nom de clef '%-.64s' déjà utilisé", -"Duplicata du champ '%-.64s' pour la clef %d", -"Mauvais paramètre de champ pour le champ '%-.64s'", -"%s près de '%-.64s' à la ligne %d", -"Query est vide", -"Table/alias: '%-.64s' non unique", -"Valeur par défaut invalide pour '%-.64s'", -"Plusieurs clefs primaires définies", -"Trop de clefs sont définies. Maximum de %d clefs alloué", -"Trop de parties specifiées dans la clef. Maximum de %d parties", -"La clé est trop longue. Longueur maximale: %d", -"La clé '%-.64s' n'existe pas dans la table", -"Champ BLOB '%-.64s' ne peut être utilisé dans une clé", -"Champ '%-.64s' trop long (max = %d). Utilisez un BLOB", -"Un seul champ automatique est permis et il doit être indexé", -"%s: Prêt pour des connections", -"%s: Arrêt normal du serveur\n", -"%s: Reçu le signal %d. Abandonne!\n", -"%s: Arrêt du serveur terminé\n", -"%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.64s'\n", -"Ne peut créer la connection IP (socket)", -"La table '%-.64s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table", -"Séparateur de champs inconnu. Vérifiez dans le manuel", -"Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'.", -"Le fichier '%-.64s' doit être dans le répertoire de la base et lisible par tous", -"Le fichier '%-.64s' existe déjà", -"Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld", -"Enregistrements: %ld Doublons: %ld", -"Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dépasse celle définie dans la clef", -"Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE", -"Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe", -"Enregistrements: %ld Doublons: %ld Avertissements: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Numéro de tâche inconnu: %lu", -"Vous n'êtes pas propriétaire de la tâche no: %lu", -"Aucune table utilisée", -"Trop de chaînes dans la colonne %s avec SET", -"Ne peut générer un unique nom de journal %s.(1-999)\n", -"Table '%-.64s' verrouillée lecture (READ): modification impossible", -"Table '%-.64s' non verrouillée: utilisez LOCK TABLES", -"BLOB '%-.64s' ne peut avoir de valeur par défaut", -"Nom de base de donnée illégal: '%-.64s'", -"Nom de table illégal: '%-.64s'", -"SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien", -"Erreur inconnue", -"Procédure %s inconnue", -"Mauvais nombre de paramètres pour la procedure %s", -"Paramètre erroné pour la procedure %s", -"Table inconnue '%-.64s' dans %s", -"Champ '%-.64s' spécifié deux fois", -"Utilisation invalide de la clause GROUP", -"Table '%-.64s' : utilise une extension invalide pour cette version de MySQL", -"Une table doit comporter au moins une colonne", -"La table '%-.64s' est pleine", -"Jeu de caractères inconnu: '%-.64s'", -"Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN", -"Trop de champs", -"Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB", -"Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur", -"Dépendance croisée dans une clause OUTER JOIN. Vérifiez la condition ON", -"La colonne '%-.32s' fait partie d'un index UNIQUE ou INDEX mais n'est pas définie comme NOT NULL", -"Imposible de charger la fonction '%-.64s'", -"Impossible d'initialiser la fonction '%-.64s'; %-.80s", -"Chemin interdit pour les bibliothèques partagées", -"La fonction '%-.64s' existe déjà", -"Impossible d'ouvrir la bibliothèque partagée '%-.64s' (errno: %d %s)", -"Impossible de trouver la fonction '%-.64s' dans la bibliothèque'", -"La fonction '%-.64s' n'est pas définie", -"L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connection. Débloquer le par 'mysqladmin flush-hosts'", -"Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL", -"Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe", -"Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres", -"Impossible de trouver un enregistrement correspondant dans la table user", -"Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld", -"Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS", -"Column count doesn't match value count at row %ld", -"Impossible de réouvrir la table: '%-.64s", -"Utilisation incorrecte de la valeur NULL", -"Erreur '%-.64s' provenant de regexp", -"Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY", -"Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s'", -"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'", -"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'", -"Commande GRANT/REVOKE incorrecte. Consultez le manuel.", -"L'hôte ou l'utilisateur donné en argument à GRANT est trop long", -"La table '%-.64s.%s' n'existe pas", -"Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s' sur la table '%-.64s'", -"Cette commande n'existe pas dans cette version de MySQL", -"Erreur de syntaxe", -"La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.64s", -"Trop de tâche 'delayed' en cours", -"Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)", -"Paquet plus grand que 'max_allowed_packet' reçu", -"Erreur de lecture reçue du pipe de connection", -"Erreur reçue de fcntl() ", -"Paquets reçus dans le désordre", -"Impossible de décompresser le paquet reçu", -"Erreur de lecture des paquets reçus", -"Timeout en lecture des paquets reçus", -"Erreur d'écriture des paquets envoyés", -"Timeout d'écriture des paquets envoyés", -"La chaîne résultat est plus grande que 'max_allowed_packet'", -"Ce type de table ne supporte pas les colonnes BLOB/TEXT", -"Ce type de table ne supporte pas les colonnes AUTO_INCREMENT", -"INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES", -"Nom de colonne '%-.100s' incorrect", -"Le handler de la table ne peut indexé la colonne '%-.64s'", -"Toutes les tables de la table de type MERGE n'ont pas la même définition", -"Écriture impossible à cause d'un index UNIQUE sur la table '%-.64s'", -"La colonne '%-.64s' de type BLOB est utilisée dans une définition d'index sans longueur d'index", -"Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE", -"Le résultat contient plus d'un enregistrement", -"Ce type de table nécessite une clé primaire (PRIMARY KEY)", -"Cette version de MySQL n'est pas compilée avec le support RAID", -"Vous êtes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index", -"L'index '%-.64s' n'existe pas sur la table '%-.64s'", -"Impossible d'ouvrir la table", -"Ce type de table ne supporte pas les %s", -"Vous n'êtes pas autorisé à exécute cette commande dans une transaction", -"Erreur %d lors du COMMIT", -"Erreur %d lors du ROLLBACK", -"Erreur %d lors du FLUSH_LOGS", -"Erreur %d lors du CHECKPOINT", -"Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: `%-.64s' (%-.64s)", -"Ce type de table ne supporte pas les copies binaires", -"Le 'binlog' a été fermé pendant l'exécution du FLUSH MASTER", -"La reconstruction de l'index de la table copiée '%-.64s' a échoué", -"Erreur reçue du maître: '%-.64s'", -"Erreur de lecture réseau reçue du maître", -"Erreur d'écriture réseau reçue du maître", -"Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes", -"Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active", -"Variable système '%-.64s' inconnue", -"La table '%-.64s' est marquée 'crashed' et devrait être réparée", -"La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué", -"Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées", -"Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez", -"Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord", -"Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE", -"Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO", -"Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL", -"Impossible de créer une tâche esclave, vérifiez les ressources système", -"L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connections actives", -"Seules les expressions constantes sont autorisées avec SET", -"Timeout sur l'obtention du verrou", -"Le nombre total de verrou dépasse la taille de la table des verrous", -"Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED", -"DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture", -"CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture", -"Mauvais arguments à %s", -"'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs", -"Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée", -"Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction", -"Le type de table utilisé ne supporte pas les index FULLTEXT", -"Impossible d'ajouter des contraintes d'index externe", -"Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche", -"Impossible de supprimer un enregistrement père : une constrainte externe l'empèche", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt deleted file mode 100644 index a8b98164e72..00000000000 --- a/sql/share/german/errmsg.txt +++ /dev/null @@ -1,334 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Dirk Munzinger (dmun@4t2.com) - 2001-06-07 - - Georg Richter (georg@php.net) - fixed typos and translation - translated new error messages - 2002-12-11 - - Stefan Hinz (stefan@mysql.com) - 2003-10-01 -*/ - -character-set=latin1 - -"hashchk", -"isamchk", -"Nein", -"Ja", -"Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)", -"Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)", -"Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)", -"Kann Datenbank '%-.64s' nicht erzeugen. Datenbank '%-.64s' existiert bereits", -"Kann Datenbank '%-.64s' nicht löschen. Keine Datenbank '%-.64s' vorhanden", -"Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehlernuumer: %d)", -"Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehlernummer: %d)", -"Fehler beim Löschen von '%-.64s' (Fehler: %d)", -"Datensatz in der Systemtabelle nicht lesbar", -"Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)", -"Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)", -"Datei kann nicht gesperrt werden (Fehler: %d)", -"Datei '%-.64s' nicht öffnen (Fehler: %d)", -"Kann Datei '%-.64s' nicht finden (Fehler: %d)", -"Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)", -"Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)", -"Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert", -"Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ...", -"Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'", -"Fehler beim Schließen von '%-.64s' (Fehler: %d)", -"Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)", -"Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)", -"Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)", -"'%-.64s' ist für Änderungen gesperrt", -"Sortiervorgang abgebrochen", -"View '%-.64s' existiert für '%-.64s' nicht", -"Fehler %d (Tabellenhandler)", -"Diese Option gibt es nicht (Tabellenhandler)", -"Kann Datensatz nicht finden", -"Falsche Information in Datei '%-.64s'", -"Falsche Schlüssel-Datei für Tabelle '%-.64s'. versuche zu reparieren", -"Alte Schlüssel-Datei für Tabelle '%-.64s'. Bitte reparieren", -"'%-.64s' ist nur lesbar", -"Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten", -"Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte erhöht werden", -"Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)", -"Zu viele Verbindungen", -"Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess allen Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten", -"Kann Hostnamen für diese Adresse nicht erhalten", -"Schlechter Handshake", -"Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'", -"Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)", -"Keine Datenbank ausgewählt", -"Unbekannter Befehl", -"Feld '%-.64s' darf nicht NULL sein", -"Unbekannte Datenbank '%-.64s'", -"Tabelle '%-.64s' bereits vorhanden", -"Unbekannte Tabelle '%-.64s'", -"Spalte '%-.64s' in %-.64s ist nicht eindeutig", -"Der Server wird heruntergefahren", -"Unbekanntes Tabellenfeld '%-.64s' in %-.64s", -"'%-.64s' ist nicht in GROUP BY vorhanden", -"Gruppierung über '%-.64s' nicht möglich", -"Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt", -"Die Anzahl der Spalten entspricht nicht der Anzahl der Werte", -"Name des Bezeichners '%-.64s' ist zu lang", -"Doppelter Spaltenname vorhanden: '%-.64s'", -"Doppelter Name für Schlüssel (Key) vorhanden: '%-.64s'", -"Doppelter Eintrag '%-.64s' für Schlüssel %d", -"Falsche Spaltenangaben für Spalte '%-.64s'", -"%s bei '%-.80s' in Zeile %d", -"Leere Abfrage", -"Tabellenname/Alias '%-.64s' nicht eindeutig", -"Fehlerhafter Vorgabewert (DEFAULT): '%-.64s'", -"Mehrfacher Primärschlüssel (PRIMARY KEY) definiert", -"Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt", -"Zu viele Teilschlüssel definiert. Maximal sind %d Teilschlüssel erlaubt", -"Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d", -"In der Tabelle gibt es keine Schlüsselspalte '%-.64s'", -"BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden", -"Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB-Feld verwenden!", -"Falsche Tabellendefinition. Es darf nur ein Auto-Feld geben und dieses muss als Schlüssel definiert werden", -"%-.64s: Bereit für Verbindungen", -"%-.64s: Normal heruntergefahren\n", -"%-.64s: Signal %d erhalten. Abbruch!\n", -"%-.64s: Heruntergefahren (shutdown)\n", -"%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n", -"Kann IP-Socket nicht erzeugen", -"Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Index neu anlegen", -"Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen", -"Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden", -"Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden und lesbar für alle sein", -"Datei '%-.64s' bereits vorhanden", -"Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld", -"Datensätze: %ld Duplikate: %ld", -"Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder der Tabellenhandler unterstützt keine Unterteilschlüssel", -"Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden", -"Kann '%-.64s' nicht löschen. Existiert das Feld / der Schlüssel?", -"Datensätze: %ld Duplikate: %ld Warnungen: %ld", -"Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig.", -"Unbekannte Thread-ID: %lu", -"Sie sind nicht Eigentümer von Thread %lu", -"Keine Tabellen verwendet", -"Zu viele Strings für SET-Spalte %-.64s angegeben", -"Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s erzeugen (1-999)\n", -"Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden", -"Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt", -"BLOB-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben", -"Unerlaubter Datenbankname '%-.64s'", -"Unerlaubter Tabellenname '%-.64s'", -"Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen oder gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden", -"Unbekannter Fehler", -"Unbekannte Prozedur '%-.64s'", -"Falsche Parameterzahl für Prozedur '%-.64s'", -"Falsche Parameter für Prozedur '%-.64s'", -"Unbekannte Tabelle '%-.64s' in '%-.64s'", -"Feld '%-.64s' wurde zweimal angegeben", -"Falsche Verwendung einer Gruppierungsfunktion", -"Tabelle '%-.64s' verwendet eine Extension, die in dieser MySQL-Version nicht verfügbar ist", -"Eine Tabelle muß mindestens 1 Spalte besitzen", -"Tabelle '%-.64s' ist voll", -"Unbekannter Zeichensatz: '%-.64s'", -"Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden", -"Zu viele Spalten", -"Zeilenlänge zu groß. Die maximale Spaltenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %d. Einige Felder müssen in BLOB oder TEXT umgewandelt werden", -"Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenen, um notfalls einen größeren Stack anzulegen", -"OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen", -"Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert", -"Kann Funktion '%-.64s' nicht laden", -"Kann Funktion '%-.64s' nicht initialisieren: %-.80s", -"Keine Pfade gestattet für Shared Library", -"Funktion '%-.64s' existiert schon", -"Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.64s)", -"Kann Funktion '%-.64s' in der Library nicht finden", -"Funktion '%-.64s' ist nicht definiert", -"Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'", -"Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden", -"Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern", -"Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können", -"Kann keinen passenden Datensatz in Tabelle 'user' finden", -"Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld", -"Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen", -"Anzahl der Spalten stimmt nicht mit der Anzahl der Werte in Zeile %ld überein", -"Kann Tabelle'%-.64s' nicht erneut öffnen", -"Unerlaubte Verwendung eines NULL-Werts", -"regexp lieferte Fehler '%-.64s'", -"Das Vermischen von GROUP-Spalten (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Spalten ist nicht zulässig, wenn keine GROUP BY-Klausel vorhanden ist", -"Für Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung", -"%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und für Tabelle '%-.64s'", -"%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Spalte '%-.64s' in Tabelle '%-.64s'", -"Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt", -"Das Host- oder User-Argument für GRANT ist zu lang", -"Tabelle '%-.64s.%-.64s' existiert nicht", -"Keine solche Berechtigung für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s'", -"Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig", -"Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen (diese kann für verschiedene Server-Versionen unterschiedlich sein)", -"Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten", -"Zu viele verzögerte (DELAYED) Threads in Verwendung", -"Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)", -"Empfangenes Paket ist größer als 'max_allowed_packet'", -"Lese-Fehler bei einer Kommunikations-Pipe", -"fcntl() lieferte einen Fehler", -"Pakete nicht in der richtigen Reihenfolge empfangen", -"Kommunikationspaket lässt sich nicht entpacken", -"Fehler beim Lesen eines Kommunikationspakets", -"Zeitüberschreitung beim Lesen eines Kommunikationspakets", -"Fehler beim Schreiben eines Kommunikationspakets", -"Zeitüberschreitung beim Schreiben eines Kommunikationspakets", -"Ergebnis ist länger als 'max_allowed_packet'", -"Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Spalten", -"Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Spalten", -"INSERT DELAYED kann nicht auf Tabelle '%-.64s' angewendet werden, da diese mit LOCK TABLES gesperrt ist", -"Falscher Spaltenname '%-.100s'", -"Der verwendete Tabellen-Handler kann die Spalte '%-.64s' nicht indizieren", -"Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert", -"Schreiben in Tabelle '%-.64s' nicht möglich wegen einer eindeutigen Beschränkung (unique constraint)", -"BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet", -"Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel verwendet wird, muss ein UNIQUE-Schlüssel verwendet werden", -"Ergebnis besteht aus mehr als einer Zeile", -"Dieser Tabellentyp benötigt einen PRIMARY KEY", -"Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert", -"MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel eine KEY-Spalte anzugeben", -"Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht", -"Kann Tabelle nicht öffnen", -"Die Speicher-Engine für diese Tabelle unterstützt kein %s", -"Sie dürfen diesen Befehl nicht in einer Transaktion ausführen", -"Fehler %d beim COMMIT", -"Fehler %d beim ROLLBACK", -"Fehler %d bei FLUSH_LOGS", -"Fehler %d bei CHECKPOINT", -"Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: `%-.64s' (%-.64s)", -"Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump", -"Binlog geschlossen. Kann RESET MASTER nicht ausführen", -"Neuerstellung des Indizes der Dump-Tabelle '%-.64s' fehlgeschlagen", -"Fehler vom Master: '%-.64s'", -"Netzfehler beim Lesen vom Master", -"Netzfehler beim Schreiben zum Master", -"Kann keinen FULLTEXT-Index finden, der der Spaltenliste entspricht", -"Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen", -"Unbekannte Systemvariable '%-.64s'", -"Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden", -"Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl", -"Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden", -"Transaktionen, die aus mehreren Befehlen bestehen, benötigen mehr als 'max_binlog_cache_size' Bytes an Speicher. Diese mysqld-Variable bitte vergrössern und erneut versuchen", -"Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Bitte zuerst STOP SLAVE ausführen", -"Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren", -"Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Konnte keinen Slave-Thread starten. Bitte System-Ressourcen überprüfen", -"Benutzer '%-.64s' hat mehr als max_user_connections aktive Verbindungen", -"Bei SET dürfen nur konstante Ausdrücke verwendet werden", -"Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten", -"Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle", -"Während einer READ UNCOMMITED-Transaktion können keine UPDATE-Sperren angefordert werden", -"DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält", -"CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält", -"Falsche Argumente für %s", -"'%-.32s'@'%-.64s' is nicht berechtigt, neue Benutzer hinzuzufügen", -"Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden", -"Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion erneut zu starten", -"Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes", -"Fremdschlüssel-Beschränkung konnte nicht hinzugefügt werden", -"Hinzufügen eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl", -"Löschen eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl", -"Fehler bei der Verbindung zum Master: %-.128s", -"Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s", -"Fehler beim Ausführen des Befehls %s: %-.128s", -"Falsche Verwendung von %s und %s", -"Die verwendeten SELECT-Befehle liefern eine unterschiedliche Anzahl von Spalten zurück", -"Augrund eines READ LOCK-Konflikts kann die Abfrage nicht ausgeführt werden", -"Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert", -"Option '%s' wird im Befehl zweimal verwendet", -"Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)", -"Befehl nicht zulässig. Hierfür wird die Berechtigung %-.128s benötigt", -"Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden", -"Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden", -"Variable '%-.64s' hat keinen Vorgabewert", -"Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden", -"Falscher Argumenttyp für Variable '%-.64s'", -"Variable '%-.64s' kann nur verändert, nicht gelesen werden", -"Falsche Verwendung oder Platzierung von '%s'", -"Diese MySQL-Version unterstützt '%s' nicht", -"Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs aufgetreten", -"Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert", -"Variable '%-.64s' is a %s variable", -"Falsche Fremdschlüssel-Definition für '%-64s': %s", -"Schlüssel- und Tabellenverweis passen nicht zusammen", -"Operand solle %d Spalte(n) enthalten", -"Unterabfrage lieferte mehr als einen Datensatz zurück", -"Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben", -"Die Hilfe-Datenbank ist beschädigt oder existiert nicht", -"Zyklischer Verweis in Unterabfragen", -"Spalte '%s' wird von %s nach %s umgewandelt", -"Verweis '%-.64s' wird nicht unterstützt (%s)", -"Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden", -"Select %u wurde während der Optimierung reduziert", -"Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden", -"Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client", -"Alle Teile eines SPATIAL KEY müssen als NOT NULL deklariert sein", -"COLLATION '%s' ist für CHARACTER SET '%s' ungültig", -"Slave läuft bereits", -"Slave wurde bereits angehalten", -"Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d", -"ZLIB: Steht nicht genug Speicher zur Verfügung", -"ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)", -"ZLIB: Eingabedaten beschädigt", -"%d Zeile(n) durch GROUP_CONCAT() abgeschnitten", -"Anzahl der Datensätze in Zeile %ld geringer als Anzahl der Spalten", -"Anzahl der Datensätze in Zeile %ld größer als Anzahl der Spalten", -"Daten abgeschnitten, NULL für NOT NULL-Spalte '%s' in Zeile %ld angegeben", -"Daten abgeschnitten, außerhalb des Wertebereichs für Spalte '%s' in Zeile %ld", -"Daten abgeschnitten für Spalte '%s' in Zeile %ld", -"Für Tabelle '%s' wird Speicher-Engine %s benutzt", -"Unerlaubte Vermischung der Kollationen (%s,%s) und (%s,%s) für die Operation '%s'", -"Kann einen oder mehrere der angegebenen Benutzer nicht löschen", -"Kann nicht alle Berechtigungen widerrufen, grant for one or more of the requested users", -"Unerlaubte Vermischung der Kollationen (%s,%s), (%s,%s), (%s,%s) für die Operation '%s'", -"Unerlaubte Vermischung der Kollationen für die Operation '%s'", -"Variable '%-.64s' ist keine Variablen-Komponenten (kann nicht als XXXX.variablen_name verwendet werden)", -"Unbekannte Kollation: '%-.64s'", -"SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn der MySQL-Slave mit SSL gestartet wird", -"Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern", -"Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst", -"Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL", -"Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn der Slave-Server unerwartet neu startet", -"SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt deleted file mode 100644 index 749b96e5d51..00000000000 --- a/sql/share/greek/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=greek - -"hashchk", -"isamchk", -"Ï×É", -"ÍÁÉ", -"Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç", -"Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé", -"ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)", -"ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)", -"ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç áíÜãíùóç åããñáöÞò áðü ðßíáêá ôïõ óõóôÞìáôïò", -"Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ï öÜêåëëïò åñãáóßáò äåí âñÝèçêå (êùäéêüò ëÜèïõò: %d)", -"Ôï áñ÷åßï äåí ìðïñåß íá êëåéäùèåß (êùäéêüò ëÜèïõò: %d)", -"Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Äåí âñÝèçêå ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'", -"Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò...", -"Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'", -"ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.64s' to '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"'%-.64s' äåí åðéôñÝðïíôáé áëëáãÝò", -"Ç äéáäéêáóßá ôáîéíüìéóçò áêõñþèçêå", -"Ôï View '%-.64s' äåí õðÜñ÷åé ãéá '%-.64s'", -"ÅëÞöèç ìÞíõìá ëÜèïõò %d áðü ôïí ÷åéñéóôÞ ðßíáêá (table handler)", -"Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ", -"Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'", -"ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.64s'", -"ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!", -"Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!", -"'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç", -"Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç. ÐñïóðáèÞóôå ðÜëé, åðáíåêéíþíôáò ôç äéáäéêáóßá (demon) (÷ñåéÜæïíôáé %d bytes)", -"Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç ãéá ôáîéíüìéóç. ÁõîÞóôå ôï sort buffer size ãéá ôç äéáäéêáóßá (demon)", -"ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"ÕðÜñ÷ïõí ðïëëÝò óõíäÝóåéò...", -"Ðñüâëçìá ìå ôç äéáèÝóéìç ìíÞìç (Out of thread space/memory)", -"Äåí Ýãéíå ãíùóôü ôï hostname ãéá ôçí address óáò", -"Ç áíáãíþñéóç (handshake) äåí Ýãéíå óùóôÜ", -"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'", -"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)", -"Äåí åðéëÝ÷èçêå âÜóç äåäïìÝíùí", -"Áãíùóôç åíôïëÞ", -"Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)", -"Áãíùóôç âÜóç äåäïìÝíùí '%-.64s'", -"Ï ðßíáêáò '%-.64s' õðÜñ÷åé Þäç", -"Áãíùóôïò ðßíáêáò '%-.64s'", -"Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß", -"Åíáñîç äéáäéêáóßáò áðïóýíäåóçò ôïõ åîõðçñåôçôÞ (server shutdown)", -"Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'", -"×ñçóéìïðïéÞèçêå '%-.64s' ðïõ äåí õðÞñ÷å óôï group by", -"Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.64s'", -"Ç äéáôýðùóç ðåñéÝ÷åé sum functions êáé columns óôçí ßäéá äéáôýðùóç", -"Ôï Column count äåí ôáéñéÜæåé ìå ôï value count", -"Ôï identifier name '%-.100s' åßíáé ðïëý ìåãÜëï", -"ÅðáíÜëçøç column name '%-.64s'", -"ÅðáíÜëçøç key name '%-.64s'", -"ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß %d", -"ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.64s'", -"%s ðëçóßïí '%-.80s' óôç ãñáììÞ %d", -"Ôï åñþôçìá (query) ðïõ èÝóáôå Þôáí êåíü", -"Áäýíáôç ç áíåýñåóç unique table/alias: '%-.64s'", -"ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.64s'", -"Ðåñéóóüôåñá áðü Ýíá primary key ïñßóôçêáí", -"ÐÜñá ðïëëÜ key ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé", -"ÐÜñá ðïëëÜ key parts ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé", -"Ôï êëåéäß ðïõ ïñßóèçêå åßíáé ðïëý ìåãÜëï. Ôï ìÝãéóôï ìÞêïò åßíáé %d", -"Ôï ðåäßï êëåéäß '%-.64s' äåí õðÜñ÷åé óôïí ðßíáêá", -"Ðåäßï ôýðïõ Blob '%-.64s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)", -"Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.64s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB", -"Ìðïñåß íá õðÜñ÷åé ìüíï Ýíá auto field êáé ðñÝðåé íá Ý÷åé ïñéóèåß óáí key", -"%s: óå áíáìïíÞ óõíäÝóåùí", -"%s: ÖõóéïëïãéêÞ äéáäéêáóßá shutdown\n", -"%s: ÅëÞöèç ôï ìÞíõìá %d. Ç äéáäéêáóßá åãêáôáëåßðåôáé!\n", -"%s: Ç äéáäéêáóßá Shutdown ïëïêëçñþèçêå\n", -"%s: Ôï thread èá êëåßóåé %ld user: '%-.64s'\n", -"Äåí åßíáé äõíáôÞ ç äçìéïõñãßá IP socket", -"Ï ðßíáêáò '%-.64s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá", -"Ï äéá÷ùñéóôÞò ðåäßùí äåí åßíáé áõôüò ðïõ áíáìåíüôáí. Ðáñáêáëþ áíáôñÝîôå óôï manual", -"Äåí ìðïñåßôå íá ÷ñçóéìïðïéÞóåôå fixed rowlength óå BLOBs. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'fields terminated by'.", -"Ôï áñ÷åßï '%-.64s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò", -"Ôï áñ÷åßï '%-.64s' õðÜñ÷åé Þäç", -"ÅããñáöÝò: %ld ÄéáãñáöÝò: %ld ÐáñåêÜìöèçóáí: %ld ÐñïåéäïðïéÞóåéò: %ld", -"ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld", -"ÅóöáëìÝíï sub part key. Ôï ÷ñçóéìïðïéïýìåíï key part äåí åßíáé string Þ ôï ìÞêïò ôïõ åßíáé ìåãáëýôåñï", -"Äåí åßíáé äõíáôÞ ç äéáãñáöÞ üëùí ôùí ðåäßùí ìå ALTER TABLE. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå DROP TABLE", -"Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé", -"ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld ÐñïåéäïðïéÞóåéò: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Áãíùóôï thread id: %lu", -"Äåí åßóèå owner ôïõ thread %lu", -"Äåí ÷ñçóéìïðïéÞèçêáí ðßíáêåò", -"ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET", -"Áäýíáôç ç äçìéïõñãßá unique log-filename %-.64s.(1-999)\n", -"Ï ðßíáêáò '%-.64s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò", -"Ï ðßíáêáò '%-.64s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES", -"Ôá Blob ðåäßá '%-.64s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)", -"ËÜèïò üíïìá âÜóçò äåäïìÝíùí '%-.100s'", -"ËÜèïò üíïìá ðßíáêá '%-.100s'", -"Ôï SELECT èá åîåôÜóåé ìåãÜëï áñéèìü åããñáöþí êáé ðéèáíþò èá êáèõóôåñÞóåé. Ðáñáêáëþ åîåôÜóôå ôéò ðáñáìÝôñïõò ôïõ WHERE êáé ÷ñçóéìïðïéåßóôå SET SQL_BIG_SELECTS=1 áí ôï SELECT åßíáé óùóôü", -"ÐñïÝêõøå Üãíùóôï ëÜèïò", -"Áãíùóôç äéáäéêáóßá '%-.64s'", -"ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'", -"ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'", -"Áãíùóôïò ðßíáêáò '%-.64s' óå %s", -"Ôï ðåäßï '%-.64s' Ý÷åé ïñéóèåß äýï öïñÝò", -"ÅóöáëìÝíç ÷ñÞóç ôçò group function", -"Ï ðßíáêò '%-.64s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL", -"Åíáò ðßíáêáò ðñÝðåé íá Ý÷åé ôïõëÜ÷éóôïí Ýíá ðåäßï", -"Ï ðßíáêáò '%-.64s' åßíáé ãåìÜôïò", -"Áãíùóôï character set: '%-.64s'", -"Ðïëý ìåãÜëïò áñéèìüò ðéíÜêùí. Ç MySQL ìðïñåß íá ÷ñçóéìïðïéÞóåé %d ðßíáêåò óå äéáäéêáóßá join", -"Ðïëý ìåãÜëïò áñéèìüò ðåäßùí", -"Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %d. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs", -"Stack overrun óôï thread: Used: %ld of a %ld stack. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'mysqld -O thread_stack=#' ãéá íá ïñßóåôå Ýíá ìåãáëýôåñï stack áí ÷ñåéÜæåôáé", -"Cross dependency âñÝèçêå óå OUTER JOIN. Ðáñáêáëþ åîåôÜóôå ôéò óõíèÞêåò ðïõ èÝóáôå óôï ON", -"Ôï ðåäßï '%-.64s' ÷ñçóéìïðïéåßôáé óáí UNIQUE Þ INDEX áëëÜ äåí Ý÷åé ïñéóèåß óáí NOT NULL", -"Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.64s'", -"Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.64s'; %-.80s", -"Äåí âñÝèçêáí paths ãéá ôçí shared library", -"Ç óõíÜñôçóç '%-.64s' õðÜñ÷åé Þäç", -"Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.64s' (êùäéêüò ëÜèïõò: %d %s)", -"Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò óõíÜñôçóçò '%-.64s' óôçí âéâëéïèÞêç'", -"Ç óõíÜñôçóç '%-.64s' äåí Ý÷åé ïñéóèåß", -"Ï õðïëïãéóôÞò Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'", -"Ï õðïëïãéóôÞò äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server", -"×ñçóéìïðïéåßôå ôçí MySQL óáí anonymous user êáé Ýôóé äåí ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí", -"ÐñÝðåé íá Ý÷åôå äéêáßùìá äéüñèùóçò ðéíÜêùí (update) óôç âÜóç äåäïìÝíùí mysql ãéá íá ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí", -"Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò áíôßóôïé÷çò åããñáöÞò óôïí ðßíáêá ôùí ÷ñçóôþí", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s'", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%-.64s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"You have an error in your SQL syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not identically defined", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt deleted file mode 100644 index 9c7d495fcf1..00000000000 --- a/sql/share/hungarian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translated by Feher Peter. Forditotta Feher Peter (feherp@mail.matav.hu) 1998 - Updated May, 2000 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NEM", -"IGEN", -"A '%-.64s' file nem hozhato letre (hibakod: %d)", -"A '%-.64s' tabla nem hozhato letre (hibakod: %d)", -"Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)", -"Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik", -"A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik", -"Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)", -"Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)", -"Torlesi hiba: '%-.64s' (hibakod: %d)", -"Nem olvashato rekord a rendszertablaban", -"A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)", -"A munkakonyvtar nem allapithato meg (hibakod: %d)", -"A file nem zarolhato. (hibakod: %d)", -"A '%-.64s' file nem nyithato meg (hibakod: %d)", -"A(z) '%-.64s' file nem talalhato (hibakod: %d)", -"A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)", -"Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)", -"A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota", -"A lemez megtelt (%s).", -"Irasi hiba, duplikalt kulcs a '%-.64s' tablaban.", -"Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)", -"Hiba a '%-.64s'file olvasasakor. (hibakod: %d)", -"Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)", -"Hiba a '%-.64s' file irasakor. (hibakod: %d)", -"'%-.64s' a valtoztatas ellen zarolva", -"Sikertelen rendezes", -"A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz", -"%d hibajelzes a tablakezelotol", -"A(z) '%-.64s' tablakezelonek nincs ilyen opcioja", -"Nem talalhato a rekord '%-.64s'-ben", -"Ervenytelen info a file-ban: '%-.64s'", -"Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!", -"Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!", -"'%-.64s' irasvedett", -"Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)", -"Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet", -"Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)", -"Tul sok kapcsolat", -"Elfogyott a thread-memoria", -"A gepnev nem allapithato meg a cimbol", -"A kapcsolatfelvetel nem sikerult (Bad handshake)", -"A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz.", -"A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)", -"Nincs kivalasztott adatbazis", -"Ervenytelen parancs", -"A(z) '%-.64s' oszlop erteke nem lehet nulla", -"Ervenytelen adatbazis: '%-.64s'", -"A(z) '%-.64s' tabla mar letezik", -"Ervenytelen tabla: '%-.64s'", -"A(z) '%-.64s' oszlop %-.64s-ben ketertelmu", -"A szerver leallitasa folyamatban", -"A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben", -"Used '%-.64s' with wasn't in group by", -"A group nem hasznalhato: '%-.64s'", -"Statement has sum functions and columns in same statement", -"Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel", -"A(z) '%-.100s' azonositonev tul hosszu.", -"Duplikalt oszlopazonosito: '%-.64s'", -"Duplikalt kulcsazonosito: '%-.64s'", -"Duplikalt bejegyzes '%-.64s' a %d kulcs szerint.", -"Rossz oszlopazonosito: '%-.64s'", -"A %s a '%-.80s'-hez kozeli a %d sorban", -"Ures lekerdezes.", -"Nem egyedi tabla/alias: '%-.64s'", -"Ervenytelen ertek: '%-.64s'", -"Tobbszoros elsodleges kulcs definialas.", -"Tul sok kulcs. Maximum %d kulcs engedelyezett.", -"Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett", -"A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d", -"A(z) '%-.64s'kulcsoszlop nem letezik a tablaban", -"Blob objektum '%-.64s' nem hasznalhato kulcskent", -"A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb.", -"Csak egy auto mezo lehetseges, es azt kulcskent kell definialni.", -"%s: kapcsolatra kesz", -"%s: Normal leallitas\n", -"%s: %d jelzes. Megszakitva!\n", -"%s: A leallitas kesz\n", -"%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n", -"Az IP socket nem hozhato letre", -"A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat", -"A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!", -"Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' .", -"A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak", -"A '%-.64s' file mar letezik.", -"Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld", -"Rekordok: %ld Duplikalva: %ld", -"Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz", -"Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette", -"A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e", -"Rekordok: %ld Duplikalva: %ld Warnings: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ervenytelen szal (thread) id: %lu", -"A %lu thread-nek mas a tulajdonosa", -"Nincs hasznalt tabla", -"Tul sok karakter: %-.64s es SET", -"Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n", -"A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni", -"A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel", -"A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke", -"Hibas adatbazisnev: '%-.100s'", -"Hibas tablanev: '%-.100s'", -"A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay", -"Ismeretlen hiba", -"Ismeretlen eljaras: '%-.64s'", -"Rossz parameter a(z) '%-.64s'eljaras szamitasanal", -"Rossz parameter a(z) '%-.64s' eljarasban", -"Ismeretlen tabla: '%-.64s' %s-ban", -"A(z) '%-.64s' mezot ketszer definialta", -"A group funkcio ervenytelen hasznalata", -"A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban.", -"A tablanak legalabb egy oszlopot tartalmazni kell", -"A '%-.64s' tabla megtelt", -"Ervenytelen karakterkeszlet: '%-.64s'", -"Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor", -"Tul sok mezo", -"Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia", -"Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz", -"Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket", -"A(z) '%-.64s' oszlop INDEX vagy UNIQUE (egyedi), de a definicioja szerint nem NOT NULL", -"A(z) '%-.64s' fuggveny nem toltheto be", -"A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s", -"Nincs ut a megosztott konyvtarakhoz (shared library)", -"A '%-.64s' fuggveny mar letezik", -"A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %s)", -"A(z) '%-.64s' fuggveny nem talalhato a konyvtarban", -"A '%-.64s' fuggveny nem definialt", -"A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot", -"A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez", -"Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas", -"Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz", -"Nincs megegyezo sor a user tablaban", -"Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld", -"Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet", -"Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel", -"Nem lehet ujra-megnyitni a tablat: '%-.64s", -"A NULL ervenytelen hasznalata", -"'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)", -"A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul", -"A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on", -"%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban", -"%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban", -"Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek", -"A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban", -"A '%-.64s.%s' tabla nem letezik", -"A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett", -"A hasznalt parancs nem engedelyezett ebben a MySQL verzioban", -"Szintaktikai hiba", -"A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz", -"Tul sok kesletetett thread (delayed)", -"Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)", -"A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'", -"Olvasasi hiba a kapcsolat soran", -"Hiba a fcntl() fuggvenyben", -"Helytelen sorrendben erkezett adatcsomagok", -"A kommunikacios adatcsomagok nem tomorithetok ki", -"HIba a kommunikacios adatcsomagok olvasasa soran", -"Idotullepes a kommunikacios adatcsomagok olvasasa soran", -"Hiba a kommunikacios csomagok irasa soran", -"Idotullepes a kommunikacios csomagok irasa soran", -"Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'", -"A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket", -"A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket", -"Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)", -"Ervenytelen mezonev: '%-.100s'", -"A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni", -"A MERGE tablaban talalhato tablak definicioja nem azonos", -"A '%-.64s' nem irhato, az egyedi mezok miatt", -"BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul", -"Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot", -"Az eredmeny tobb, mint egy sort tartalmaz", -"Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo", -"Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot", -"On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column", -"A '%-.64s' kulcs nem letezik a '%-.64s' tablaban", -"Nem tudom megnyitni a tablat", -"A tabla kezeloje (handler) nem tamogatja az %s", -"Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban", -"%d hiba a COMMIT vegrehajtasa soran", -"%d hiba a ROLLBACK vegrehajtasa soran", -"%d hiba a FLUSH_LOGS vegrehajtasa soran", -"%d hiba a CHECKPOINT vegrehajtasa soran", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt deleted file mode 100644 index db4f297dca3..00000000000 --- a/sql/share/italian/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"SI", -"Impossibile creare il file '%-.64s' (errno: %d)", -"Impossibile creare la tabella '%-.64s' (errno: %d)", -"Impossibile creare il database '%-.64s' (errno: %d)", -"Impossibile creare il database '%-.64s'; il database esiste", -"Impossibile cancellare '%-.64s'; il database non esiste", -"Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)", -"Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)", -"Errore durante la cancellazione di '%-.64s' (errno: %d)", -"Impossibile leggere il record dalla tabella di sistema", -"Impossibile leggere lo stato di '%-.64s' (errno: %d)", -"Impossibile leggere la directory di lavoro (errno: %d)", -"Impossibile il locking il file (errno: %d)", -"Impossibile aprire il file: '%-.64s' (errno: %d)", -"Impossibile trovare il file: '%-.64s' (errno: %d)", -"Impossibile leggere la directory di '%-.64s' (errno: %d)", -"Impossibile cambiare la directory in '%-.64s' (errno: %d)", -"Il record e` cambiato dall'ultima lettura della tabella '%-.64s'", -"Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio...", -"Scrittura impossibile: chiave duplicata nella tabella '%-.64s'", -"Errore durante la chiusura di '%-.64s' (errno: %d)", -"Errore durante la lettura del file '%-.64s' (errno: %d)", -"Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)", -"Errore durante la scrittura del file '%-.64s' (errno: %d)", -"'%-.64s' e` soggetto a lock contro i cambiamenti", -"Operazione di ordinamento abbandonata", -"La view '%-.64s' non esiste per '%-.64s'", -"Rilevato l'errore %d dal gestore delle tabelle", -"Il gestore delle tabelle per '%-.64s' non ha questa opzione", -"Impossibile trovare il record in '%-.64s'", -"Informazione errata nel file: '%-.64s'", -"File chiave errato per la tabella : '%-.64s'; prova a riparalo", -"File chiave vecchio per la tabella '%-.64s'; riparalo!", -"'%-.64s' e` di sola lettura", -"Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)", -"Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone", -"Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)", -"Troppe connessioni", -"Fine dello spazio/memoria per i thread", -"Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)", -"Negoziazione impossibile", -"Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'", -"Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)", -"Nessun database selezionato", -"Comando sconosciuto", -"La colonna '%-.64s' non puo` essere nulla", -"Database '%-.64s' sconosciuto", -"La tabella '%-.64s' esiste gia`", -"Tabella '%-.64s' sconosciuta", -"Colonna: '%-.64s' di %-.64s e` ambigua", -"Shutdown del server in corso", -"Colonna sconosciuta '%-.64s' in '%-.64s'", -"Usato '%-.64s' che non e` nel GROUP BY", -"Impossibile raggruppare per '%-.64s'", -"Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY", -"Il numero delle colonne non e` uguale al numero dei valori", -"Il nome dell'identificatore '%-.100s' e` troppo lungo", -"Nome colonna duplicato '%-.64s'", -"Nome chiave duplicato '%-.64s'", -"Valore duplicato '%-.64s' per la chiave %d", -"Specifica errata per la colonna '%-.64s'", -"%s vicino a '%-.80s' linea %d", -"La query e` vuota", -"Tabella/alias non unico: '%-.64s'", -"Valore di default non valido per '%-.64s'", -"Definite piu` chiave primarie", -"Troppe chiavi. Sono ammesse max %d chiavi", -"Troppe parti di chiave specificate. Sono ammesse max %d parti", -"La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d", -"La colonna chiave '%-.64s' non esiste nella tabella", -"La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave", -"La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB.", -"Puo` esserci solo un campo AUTO e deve essere definito come chiave", -"%s: Pronto per le connessioni\n", -"%s: Shutdown normale\n", -"%s: Ricevuto segnale %d. Interruzione!\n", -"%s: Shutdown completato\n", -"%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n", -"Impossibile creare il socket IP", -"La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella", -"L'argomento 'Field separator' non e` quello atteso. Controlla il manuale", -"Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'.", -"Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti", -"Il file '%-.64s' esiste gia`", -"Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld", -"Records: %ld Duplicati: %ld", -"Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave.", -"Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE", -"Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista", -"Records: %ld Duplicati: %ld Avvertimenti: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Thread id: %lu sconosciuto", -"Utente non proprietario del thread %lu", -"Nessuna tabella usata", -"Troppe stringhe per la colonna %-.64s e la SET", -"Impossibile generare un nome del file log unico %-.64s.(1-999)\n", -"La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata", -"Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES", -"Il campo BLOB '%-.64s' non puo` avere un valore di default", -"Nome database errato '%-.100s'", -"Nome tabella errato '%-.100s'", -"La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto.", -"Errore sconosciuto", -"Procedura '%-.64s' sconosciuta", -"Numero di parametri errato per la procedura '%-.64s'", -"Parametri errati per la procedura '%-.64s'", -"Tabella '%-.64s' sconosciuta in %s", -"Campo '%-.64s' specificato 2 volte", -"Uso non valido di una funzione di raggruppamento", -"La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL", -"Una tabella deve avere almeno 1 colonna", -"La tabella '%-.64s' e` piena", -"Set di caratteri '%-.64s' sconosciuto", -"Troppe tabelle. MySQL puo` usare solo %d tabelle in una join", -"Troppi campi", -"Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB", -"Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande.", -"Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON", -"La colonna '%-.64s' e` usata con UNIQUE o INDEX ma non e` definita come NOT NULL", -"Impossibile caricare la funzione '%-.64s'", -"Impossibile inizializzare la funzione '%-.64s'; %-.80s", -"Non sono ammessi path per le librerie condivisa", -"La funzione '%-.64s' esiste gia`", -"Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %s)", -"Impossibile trovare la funzione '%-.64s' nella libreria", -"La funzione '%-.64s' non e` definita", -"Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'", -"Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL", -"Impossibile cambiare la password usando MySQL come utente anonimo", -"E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti", -"Impossibile trovare la riga corrispondente nella tabella user", -"Rows riconosciute: %ld Cambiate: %ld Warnings: %ld", -"Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO", -"Il numero delle colonne non corrisponde al conteggio alla riga %ld", -"Impossibile riaprire la tabella: '%-.64s'", -"Uso scorretto del valore NULL", -"Errore '%-.64s' da regexp", -"Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY", -"GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'", -"Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'", -"Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'", -"Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati.", -"L'argomento host o utente per la GRANT e` troppo lungo", -"La tabella '%-.64s.%s' non esiste", -"GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'", -"Il comando utilizzato non e` supportato in questa versione di MySQL", -"Errore di sintassi nella query SQL", -"Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s", -"Troppi threads ritardati in uso", -"Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)", -"Ricevuto un pacchetto piu` grande di 'max_allowed_packet'", -"Rilevato un errore di lettura dalla pipe di connessione", -"Rilevato un errore da fcntl()", -"Ricevuti pacchetti non in ordine", -"Impossibile scompattare i pacchetti di comunicazione", -"Rilevato un errore ricevendo i pacchetti di comunicazione", -"Rilevato un timeout ricevendo i pacchetti di comunicazione", -"Rilevato un errore inviando i pacchetti di comunicazione", -"Rilevato un timeout inviando i pacchetti di comunicazione", -"La stringa di risposta e` piu` lunga di 'max_allowed_packet'", -"Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT", -"Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT", -"L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'", -"Nome colonna '%-.100s' non corretto", -"Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'", -"Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica", -"Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`", -"La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza", -"Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE", -"Il risultato consiste di piu` di una riga", -"Questo tipo di tabella richiede una chiave primaria", -"Questa versione di MYSQL non e` compilata con il supporto RAID", -"In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave", -"La chiave '%-.64s' non esiste nella tabella '%-.64s'", -"Impossibile aprire la tabella", -"Il gestore per la tabella non supporta il %s", -"Non puoi eseguire questo comando in una transazione", -"Rilevato l'errore %d durante il COMMIT", -"Rilevato l'errore %d durante il ROLLBACK", -"Rilevato l'errore %d durante il FLUSH_LOGS", -"Rilevato l'errore %d durante il CHECKPOINT", -"Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)", -"Il gestore per la tabella non supporta il dump binario", -"Binlog e` stato chiuso durante l'esecuzione del FLUSH MASTER", -"Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'", -"Errore dal master: '%-.64s", -"Errore di rete durante la ricezione dal master", -"Errore di rete durante l'invio al master", -"Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne", -"Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto", -"Variabile di sistema '%-.64s' sconosciuta", -"La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata", -"La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita", -"Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)", -"La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare", -"Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE", -"Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE", -"Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Impossibile creare il thread 'slave', controllare le risorse di sistema", -"L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive", -"Si possono usare solo espressioni costanti con SET", -"E' scaduto il timeout per l'attesa del lock", -"Il numero totale di lock e' maggiore della grandezza della tabella di lock", -"I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'", -"DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura", -"CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura", -"Argomenti errati a %s", -"A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti", -"Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database", -"Trovato deadlock durante il lock; Provare a far ripartire la transazione", -"La tabella usata non supporta gli indici FULLTEXT", -"Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)", -"Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto", -"Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto", -"Errore durante la connessione al master: %-.128s", -"Errore eseguendo una query sul master: %-.128s", -"Errore durante l'esecuzione del comando %s: %-.128s", -"Uso errato di %s e %s", -"La SELECT utilizzata ha un numero di colonne differente", -"Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura", -"E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali", -"L'opzione '%s' e' stata usata due volte nel comando", -"L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)", -"Accesso non consentito. Serve il privilegio %-.128s per questa operazione", -"La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL", -"La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL", -"La variabile '%-.64s' non ha un valore di default", -"Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'", -"Tipo di valore errato per la variabile '%-.64s'", -"Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto", -"Uso/posizione di '%s' sbagliato", -"Questa versione di MySQL non supporta ancora '%s'", -"Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/japanese-sjis/errmsg.txt b/sql/share/japanese-sjis/errmsg.txt deleted file mode 100644 index 91f9b1cab92..00000000000 --- a/sql/share/japanese-sjis/errmsg.txt +++ /dev/null @@ -1,325 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Shift-JIS Japanese -*/ - -character-set=sjis - -"hashchk", -"isamchk", -"NO", -"YES", -"'%-.64s' ƒtƒ@ƒCƒ‹‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)", -"'%-.64s' ƒe[ƒuƒ‹‚ªì‚ê‚Ü‚¹‚ñ.(errno: %d)", -"'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ (errno: %d)", -"'%-.64s' ƒf[ƒ^ƒx[ƒX‚ªì‚ê‚Ü‚¹‚ñ.Šù‚É‚»‚̃f[ƒ^ƒx[ƒX‚ª‘¶Ý‚µ‚Ü‚·", -"'%-.64s' ƒf[ƒ^ƒx[ƒX‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ. ‚»‚̃f[ƒ^ƒx[ƒX‚ª‚È‚¢‚̂ł·.", -"ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð휂ł«‚Ü‚¹‚ñ, errno: %d)", -"ƒf[ƒ^ƒx[ƒX”jŠüƒGƒ‰[ ('%-.64s' ‚ð rmdir ‚Å‚«‚Ü‚¹‚ñ, errno: %d)", -"'%-.64s' ‚Ì휂ªƒGƒ‰[ (errno: %d)", -"system table ‚̃ŒƒR[ƒh‚ð“ǂގ–‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½", -"'%-.64s' ‚̃XƒeƒCƒ^ƒX‚ª“¾‚ç‚ê‚Ü‚¹‚ñ. (errno: %d)", -"working directory ‚𓾂鎖‚ª‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½ (errno: %d)", -"ƒtƒ@ƒCƒ‹‚ðƒƒbƒN‚Å‚«‚Ü‚¹‚ñ (errno: %d)", -"'%-.64s' ƒtƒ@ƒCƒ‹‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", -"'%-.64s' ƒtƒ@ƒCƒ‹‚ðŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", -"'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚ª“ǂ߂܂¹‚ñ.(errno: %d)", -"'%-.64s' ƒfƒBƒŒƒNƒgƒŠ‚É chdir ‚Å‚«‚Ü‚¹‚ñ.(errno: %d)", -"Record has changed since last read in table '%-.64s'", -"Disk full (%s). ’N‚©‚ª‰½‚©‚ðŒ¸‚ç‚·‚܂ł܂Á‚Ä‚‚¾‚³‚¢...", -"table '%-.64s' ‚É key ‚ªd•¡‚µ‚Ä‚¢‚Ä‘‚«‚±‚߂܂¹‚ñ", -"Error on close of '%-.64s' (errno: %d)", -"'%-.64s' ƒtƒ@ƒCƒ‹‚̓ǂݞ‚݃Gƒ‰[ (errno: %d)", -"'%-.64s' ‚ð '%-.64s' ‚É rename ‚Å‚«‚Ü‚¹‚ñ (errno: %d)", -"'%-.64s' ƒtƒ@ƒCƒ‹‚ð‘‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d)", -"'%-.64s' ‚̓ƒbƒN‚³‚ê‚Ä‚¢‚Ü‚·", -"Sort ’†’f", -"View '%-.64s' ‚ª '%-.64s' ‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", -"Got error %d from table handler", -"Table handler for '%-.64s' doesn't have this option", -"'%-.64s'‚̂Ȃ©‚ɃŒƒR[ƒh‚ªŒ©•t‚©‚è‚Ü‚¹‚ñ", -"ƒtƒ@ƒCƒ‹ '%-.64s' ‚Ì info ‚ªŠÔˆá‚Á‚Ä‚¢‚邿‚¤‚Å‚·", -"'%-.64s' ƒe[ƒuƒ‹‚Ì key file ‚ªŠÔˆá‚Á‚Ä‚¢‚邿‚¤‚Å‚·. C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", -"'%-.64s' ƒe[ƒuƒ‹‚͌¢Œ`Ž®‚Ì key file ‚̂悤‚Å‚·; C•œ‚ð‚µ‚Ä‚‚¾‚³‚¢", -"'%-.64s' ‚͓ǂݞ‚Ýê—p‚Å‚·", -"Out of memory. ƒf[ƒ‚ƒ“‚ðƒŠƒXƒ^[ƒg‚µ‚Ă݂Ă‚¾‚³‚¢ (%d bytes •K—v)", -"Out of sort memory. sort buffer size ‚ª‘«‚è‚È‚¢‚悤‚Å‚·.", -"'%-.64s' ƒtƒ@ƒCƒ‹‚ð“ǂݞ‚Ý’†‚É EOF ‚ª—\Šú‚¹‚ÊŠ‚ÅŒ»‚ê‚Ü‚µ‚½. (errno: %d)", -"Ú‘±‚ª‘½‚·‚¬‚Ü‚·", -"Out of memory; mysqld ‚©‚»‚Ì‘¼‚̃vƒƒZƒX‚ªƒƒ‚ƒŠ[‚ð‘S‚ÄŽg‚Á‚Ä‚¢‚é‚©Šm”F‚µ‚Ä‚‚¾‚³‚¢. ƒƒ‚ƒŠ[‚ðŽg‚¢Ø‚Á‚Ä‚¢‚È‚¢ê‡A'ulimit' ‚ðݒ肵‚Ä mysqld ‚̃ƒ‚ƒŠ[Žg—pŒÀŠE—ʂ𑽂‚·‚é‚©Aswap space ‚ð‘‚₵‚Ă݂Ă‚¾‚³‚¢", -"‚»‚Ì address ‚Ì hostname ‚ªˆø‚¯‚Ü‚¹‚ñ.", -"Bad handshake", -"ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚Ì '%-.64s' ƒf[ƒ^ƒx[ƒX‚ւ̃AƒNƒZƒX‚ð‹‘”Û‚µ‚Ü‚·", -"ƒ†[ƒU[ '%-.32s'@'%-.64s' ‚ð‹‘”Û‚µ‚Ü‚·.uUsing password: %s)", -"ƒf[ƒ^ƒx[ƒX‚ª‘I‘ð‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.", -"‚»‚̃Rƒ}ƒ“ƒh‚͉½H", -"Column '%-.64s' ‚Í null ‚ɂ͂ł«‚È‚¢‚̂ł·", -"'%-.64s' ‚È‚ñ‚ăf[ƒ^ƒx[ƒX‚Í’m‚è‚Ü‚¹‚ñ.", -"Table '%-.64s' ‚ÍŠù‚É‚ ‚è‚Ü‚·", -"table '%-.64s' ‚Í‚ ‚è‚Ü‚¹‚ñ.", -"Column: '%-.64s' in %-.64s is ambiguous", -"Server ‚ð shutdown ’†...", -"'%-.64s' column ‚Í '%-.64s' ‚ɂ͂ ‚è‚Ü‚¹‚ñ.", -"'%-.64s' isn't in GROUP BY", -"Can't group on '%-.64s'", -"Statement has sum functions and columns in same statement", -"Column count doesn't match value count", -"Identifier name '%-.100s' ‚Í’·‚·‚¬‚Ü‚·", -"'%-.64s' ‚Æ‚¢‚¤ column –¼‚Íd•¡‚µ‚Ă܂·", -"'%-.64s' ‚Æ‚¢‚¤ key ‚Ì–¼‘O‚Íd•¡‚µ‚Ä‚¢‚Ü‚·", -"'%-.64s' ‚Í key %d ‚É‚¨‚¢‚Äd•¡‚µ‚Ä‚¢‚Ü‚·", -"Incorrect column specifier for column '%-.64s'", -"%s : '%-.80s' •t‹ß : %d s–Ú", -"Query ‚ª‹ó‚Å‚·.", -"'%-.64s' ‚͈êˆÓ‚Ì table/alias –¼‚ł͂ ‚è‚Ü‚¹‚ñ", -"Invalid default value for '%-.64s'", -"•¡”‚Ì primary key ‚ª’è‹`‚³‚ê‚Ü‚µ‚½", -"key ‚ÌŽw’肪‘½‚·‚¬‚Ü‚·. key ‚ÍÅ‘å %d ‚܂łł·", -"Too many key parts specified; max %d parts allowed", -"key ‚ª’·‚·‚¬‚Ü‚·. key ‚Ì’·‚³‚ÍÅ‘å %d ‚Å‚·", -"Key column '%-.64s' ‚ªƒe[ƒuƒ‹‚É‚ ‚è‚Ü‚¹‚ñ.", -"BLOB column '%-.64s' can't be used in key specification with the used table type", -"column '%-.64s' ‚Í,Šm•Û‚·‚é column ‚̑傫‚³‚ª‘½‚·‚¬‚Ü‚·. (Å‘å %d ‚Ü‚Å). BLOB ‚ð‚©‚í‚è‚ÉŽg—p‚µ‚Ä‚‚¾‚³‚¢.", -"ƒe[ƒuƒ‹‚Ì’è‹`‚ªˆá‚¢‚Ü‚·; there can be only one auto column and it must be defined as a key", -"%s: €”õŠ®—¹", -"%s: Normal shutdown\n", -"%s: Got signal %d. ’†’f!\n", -"%s: Shutdown Š®—¹\n", -"%s: ƒXƒŒƒbƒh %ld ‹§I—¹ user: '%-.64s'\n", -"IP socket ‚ªì‚ê‚Ü‚¹‚ñ", -"Table '%-.64s' ‚Í‚»‚̂悤‚È index ‚ðŽ‚Á‚Ä‚¢‚Ü‚¹‚ñ(CREATE INDEX ŽÀsŽž‚ÉŽw’肳‚ê‚Ä‚¢‚Ü‚¹‚ñ). ƒe[ƒuƒ‹‚ðì‚è’¼‚µ‚Ä‚‚¾‚³‚¢", -"Field separator argument is not what is expected; check the manual", -"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'.", -"ƒtƒ@ƒCƒ‹ '%-.64s' ‚Í databse ‚Ì directory ‚É‚ ‚é‚©‘S‚Ẵ†[ƒU[‚ª“Ç‚ß‚é‚æ‚¤‚É‹–‰Â‚³‚ê‚Ä‚¢‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", -"File '%-.64s' ‚ÍŠù‚É‘¶Ý‚µ‚Ü‚·", -"ƒŒƒR[ƒh”: %ld íœ: %ld Skipped: %ld Warnings: %ld", -"ƒŒƒR[ƒh”: %ld d•¡: %ld", -"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", -"ALTER TABLE ‚Å‘S‚Ä‚Ì column ‚Í휂ł«‚Ü‚¹‚ñ. DROP TABLE ‚ðŽg—p‚µ‚Ä‚‚¾‚³‚¢", -"'%-.64s' ‚ð”jŠü‚Å‚«‚Ü‚¹‚ñ‚Å‚µ‚½; check that column/key exists", -"ƒŒƒR[ƒh”: %ld d•¡”: %ld Warnings: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"thread id: %lu ‚Í‚ ‚è‚Ü‚¹‚ñ", -"thread %lu ‚̃I[ƒi[‚ł͂ ‚è‚Ü‚¹‚ñ", -"No tables used", -"Too many strings for column %-.64s and SET", -"Can't generate a unique log-filename %-.64s.(1-999)\n", -"Table '%-.64s' ‚Í READ lock ‚ɂȂÁ‚Ä‚¢‚ÄAXV‚͂ł«‚Ü‚¹‚ñ", -"Table '%-.64s' ‚Í LOCK TABLES ‚É‚æ‚Á‚ăƒbƒN‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", -"BLOB column '%-.64s' can't have a default value", -"Žw’肵‚½ database –¼ '%-.100s' ‚ªŠÔˆá‚Á‚Ä‚¢‚Ü‚·", -"Žw’肵‚½ table –¼ '%-.100s' ‚͂܂¿‚ª‚Á‚Ä‚¢‚Ü‚·", -"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", -"Unknown error", -"Unknown procedure '%-.64s'", -"Incorrect parameter count to procedure '%-.64s'", -"Incorrect parameters to procedure '%-.64s'", -"Unknown table '%-.64s' in %s", -"Column '%-.64s' specified twice", -"Invalid use of group function", -"Table '%-.64s' uses an extension that doesn't exist in this MySQL version", -"ƒe[ƒuƒ‹‚ÍÅ’á 1 ŒÂ‚Ì column ‚ª•K—v‚Å‚·", -"table '%-.64s' ‚Í‚¢‚Á‚Ï‚¢‚Å‚·", -"character set '%-.64s' ‚̓Tƒ|[ƒg‚µ‚Ä‚¢‚Ü‚¹‚ñ", -"ƒe[ƒuƒ‹‚ª‘½‚·‚¬‚Ü‚·; MySQL can only use %d tables in a join", -"column ‚ª‘½‚·‚¬‚Ü‚·", -"row size ‚ª‘å‚«‚·‚¬‚Ü‚·. BLOB ‚ðŠÜ‚܂Ȃ¢ê‡‚Ì row size ‚ÌÅ‘å‚Í %d ‚Å‚·. ‚¢‚‚‚©‚Ì field ‚ð BLOB ‚ɕς¦‚Ä‚‚¾‚³‚¢.", -"Thread stack overrun: Used: %ld of a %ld stack. ƒXƒ^ƒbƒN—̈æ‚𑽂‚Ƃ肽‚¢ê‡A'mysqld -O thread_stack=#' ‚ÆŽw’肵‚Ä‚‚¾‚³‚¢", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.64s' ‚ª UNIQUE ‚© INDEX ‚ÅŽg—p‚³‚ê‚Ü‚µ‚½. ‚±‚̃Jƒ‰ƒ€‚Í NOT NULL ‚Æ’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ.", -"function '%-.64s' ‚ð ƒ[ƒh‚Å‚«‚Ü‚¹‚ñ", -"function '%-.64s' ‚ð‰Šú‰»‚Å‚«‚Ü‚¹‚ñ; %-.80s", -"shared library ‚ւ̃pƒX‚ª’Ê‚Á‚Ä‚¢‚Ü‚¹‚ñ", -"Function '%-.64s' ‚ÍŠù‚É’è‹`‚³‚ê‚Ä‚¢‚Ü‚·", -"shared library '%-.64s' ‚ðŠJ‚Ž–‚ª‚Å‚«‚Ü‚¹‚ñ (errno: %d %s)", -"function '%-.64s' ‚ðƒ‰ƒCƒuƒ‰ƒŠ[’†‚ÉŒ©•t‚¯‚鎖‚ª‚Å‚«‚Ü‚¹‚ñ", -"Function '%-.64s' ‚Í’è‹`‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", -"Host '%-.64s' ‚Í many connection error ‚Ì‚½‚ßA‹‘”Û‚³‚ê‚Ü‚µ‚½. 'mysqladmin flush-hosts' ‚ʼn𜂵‚Ä‚‚¾‚³‚¢", -"Host '%-.64s' ‚Í MySQL server ‚ÉÚ‘±‚ð‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", -"MySQL ‚ð anonymous users ‚ÅŽg—p‚µ‚Ä‚¢‚éó‘Ô‚Å‚ÍAƒpƒXƒ[ƒh‚Ì•ÏX‚͂ł«‚Ü‚¹‚ñ", -"‘¼‚̃†[ƒU[‚̃pƒXƒ[ƒh‚ð•ÏX‚·‚邽‚߂ɂÍ, mysql ƒf[ƒ^ƒx[ƒX‚ɑ΂µ‚Ä update ‚Ì‹–‰Â‚ª‚È‚¯‚ê‚΂Ȃè‚Ü‚¹‚ñ.", -"Can't find any matching row in the user table", -"ˆê’v”(Rows matched): %ld •ÏX: %ld Warnings: %ld", -"V‹K‚ɃXƒŒƒbƒh‚ªì‚ê‚Ü‚¹‚ñ‚Å‚µ‚½ (errno %d). ‚à‚µÅ‘åŽg—p‹–‰Âƒƒ‚ƒŠ[”‚ð‰z‚¦‚Ä‚¢‚È‚¢‚̂ɃGƒ‰[‚ª”¶‚µ‚Ä‚¢‚é‚È‚ç, ƒ}ƒjƒ…ƒAƒ‹‚Ì’†‚©‚ç 'possible OS-dependent bug' ‚Æ‚¢‚¤•¶Žš‚ð’T‚µ‚Ä‚‚݂Ă¾‚³‚¢.", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s'", -"NULL ’l‚ÌŽg—p•û–@‚ª•s“K؂ł·", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"ƒ†[ƒU[ '%-.32s' (ƒzƒXƒg '%-.64s' ‚̃†[ƒU[) ‚Í‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", -"ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s' ,ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", -"ƒRƒ}ƒ“ƒh %-.16s ‚Í ƒ†[ƒU[ '%-.32s'@'%-.64s'\n ƒJƒ‰ƒ€ '%-.64s' ƒe[ƒuƒ‹ '%-.64s' ‚ɑ΂µ‚Ä‹–‰Â‚³‚ê‚Ä‚¢‚Ü‚¹‚ñ", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got NDB error %d '%-.100s'", -"Got temporary NDB error %d '%-.100s'", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt deleted file mode 100644 index 08a5ec7ad26..00000000000 --- a/sql/share/japanese/errmsg.txt +++ /dev/null @@ -1,325 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - 3.22.10-beta euc-japanese (ujis) text -*/ - -character-set=ujis - -"hashchk", -"isamchk", -"NO", -"YES", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)", -"'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹", -"'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹.", -"¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤òºï½ü¤Ç¤¤Þ¤»¤ó, errno: %d)", -"¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤ò rmdir ¤Ç¤¤Þ¤»¤ó, errno: %d)", -"'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)", -"system table ¤Î¥ì¥³¡¼¥É¤òÆÉ¤à»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿", -"'%-.64s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)", -"working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿ (errno: %d)", -"¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)", -"'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬ÆÉ¤á¤Þ¤»¤ó.(errno: %d)", -"'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤¤Þ¤»¤ó.(errno: %d)", -"Record has changed since last read in table '%-.64s'", -"Disk full (%s). 狼¤¬²¿¤«¤ò¸º¤é¤¹¤Þ¤Ç¤Þ¤Ã¤Æ¤¯¤À¤µ¤¤...", -"table '%-.64s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤¤³¤á¤Þ¤»¤ó", -"Error on close of '%-.64s' (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ÎÆÉ¤ß¹þ¤ß¥¨¥é¡¼ (errno: %d)", -"'%-.64s' ¤ò '%-.64s' ¤Ë rename ¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹", -"Sort ÃæÃÇ", -"View '%-.64s' ¤¬ '%-.64s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"Got error %d from table handler", -"Table handler for '%-.64s' doesn't have this option", -"'%-.64s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó", -"¥Õ¥¡¥¤¥ë '%-.64s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹", -"'%-.64s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤", -"'%-.64s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤", -"'%-.64s' ¤ÏÆÉ¤ß¹þ¤ßÀìÍѤǤ¹", -"Out of memory. ¥Ç¡¼¥â¥ó¤ò¥ê¥¹¥¿¡¼¥È¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤ (%d bytes ɬÍ×)", -"Out of sort memory. sort buffer size ¤¬Â¤ê¤Ê¤¤¤è¤¦¤Ç¤¹.", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤òÆÉ¤ß¹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)", -"Àܳ¤¬Â¿¤¹¤®¤Þ¤¹", -"Out of memory; mysqld ¤«¤½¤Î¾¤Î¥×¥í¥»¥¹¤¬¥á¥â¥ê¡¼¤òÁ´¤Æ»È¤Ã¤Æ¤¤¤ë¤«³Îǧ¤·¤Æ¤¯¤À¤µ¤¤. ¥á¥â¥ê¡¼¤ò»È¤¤ÀڤäƤ¤¤Ê¤¤¾ì¹ç¡¢'ulimit' ¤òÀßÄꤷ¤Æ mysqld ¤Î¥á¥â¥ê¡¼»ÈÍѸ³¦Î̤ò¿¤¯¤¹¤ë¤«¡¢swap space ¤òÁý¤ä¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤", -"¤½¤Î address ¤Î hostname ¤¬°ú¤±¤Þ¤»¤ó.", -"Bad handshake", -"¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹", -"¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)", -"¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ÁªÂò¤µ¤ì¤Æ¤¤¤Þ¤»¤ó.", -"¤½¤Î¥³¥Þ¥ó¥É¤Ï²¿¡©", -"Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤¤Ê¤¤¤Î¤Ç¤¹", -"'%-.64s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó.", -"Table '%-.64s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹", -"table '%-.64s' ¤Ï¤¢¤ê¤Þ¤»¤ó.", -"Column: '%-.64s' in %-.64s is ambiguous", -"Server ¤ò shutdown Ãæ...", -"'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó.", -"'%-.64s' isn't in GROUP BY", -"Can't group on '%-.64s'", -"Statement has sum functions and columns in same statement", -"Column count doesn't match value count", -"Identifier name '%-.100s' ¤ÏŤ¹¤®¤Þ¤¹", -"'%-.64s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹", -"'%-.64s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹", -"'%-.64s' ¤Ï key %d ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹", -"Incorrect column specifier for column '%-.64s'", -"%s : '%-.80s' ÉÕ¶á : %d ¹ÔÌÜ", -"Query ¤¬¶õ¤Ç¤¹.", -"'%-.64s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó", -"Invalid default value for '%-.64s'", -"Ê£¿ô¤Î primary key ¤¬ÄêµÁ¤µ¤ì¤Þ¤·¤¿", -"key ¤Î»ØÄ꤬¿¤¹¤®¤Þ¤¹. key ¤ÏºÇÂç %d ¤Þ¤Ç¤Ç¤¹", -"Too many key parts specified; max %d parts allowed", -"key ¤¬Ä¹¤¹¤®¤Þ¤¹. key ¤ÎŤµ¤ÏºÇÂç %d ¤Ç¤¹", -"Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó.", -"BLOB column '%-.64s' can't be used in key specification with the used table type", -"column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤.", -"¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; there can be only one auto column and it must be defined as a key", -"%s: ½àÈ÷´°Î»", -"%s: Normal shutdown\n", -"%s: Got signal %d. ÃæÃÇ!\n", -"%s: Shutdown ´°Î»\n", -"%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.64s'\n", -"IP socket ¤¬ºî¤ì¤Þ¤»¤ó", -"Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤", -"Field separator argument is not what is expected; check the manual", -"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'.", -"¥Õ¥¡¥¤¥ë '%-.64s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬ÆÉ¤á¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó.", -"File '%-.64s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹", -"¥ì¥³¡¼¥É¿ô: %ld ºï½ü: %ld Skipped: %ld Warnings: %ld", -"¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£: %ld", -"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", -"ALTER TABLE ¤ÇÁ´¤Æ¤Î column ¤Ïºï½ü¤Ç¤¤Þ¤»¤ó. DROP TABLE ¤ò»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤", -"'%-.64s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists", -"¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£¿ô: %ld Warnings: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"thread id: %lu ¤Ï¤¢¤ê¤Þ¤»¤ó", -"thread %lu ¤Î¥ª¡¼¥Ê¡¼¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó", -"No tables used", -"Too many strings for column %-.64s and SET", -"Can't generate a unique log-filename %-.64s.(1-999)\n", -"Table '%-.64s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤¤Þ¤»¤ó", -"Table '%-.64s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"BLOB column '%-.64s' can't have a default value", -"»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹", -"»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹", -"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", -"Unknown error", -"Unknown procedure '%-.64s'", -"Incorrect parameter count to procedure '%-.64s'", -"Incorrect parameters to procedure '%-.64s'", -"Unknown table '%-.64s' in %s", -"Column '%-.64s' specified twice", -"Invalid use of group function", -"Table '%-.64s' uses an extension that doesn't exist in this MySQL version", -"¥Æ¡¼¥Ö¥ë¤ÏºÇÄã 1 ¸Ä¤Î column ¤¬É¬ÍפǤ¹", -"table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹", -"character set '%-.64s' ¤Ï¥µ¥Ý¡¼¥È¤·¤Æ¤¤¤Þ¤»¤ó", -"¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹; MySQL can only use %d tables in a join", -"column ¤¬Â¿¤¹¤®¤Þ¤¹", -"row size ¤¬Â礤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %d ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤.", -"Thread stack overrun: Used: %ld of a %ld stack. ¥¹¥¿¥Ã¥¯Îΰè¤ò¿¤¯¤È¤ê¤¿¤¤¾ì¹ç¡¢'mysqld -O thread_stack=#' ¤È»ØÄꤷ¤Æ¤¯¤À¤µ¤¤", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó.", -"function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤¤Þ¤»¤ó", -"function '%-.64s' ¤ò½é´ü²½¤Ç¤¤Þ¤»¤ó; %-.80s", -"shared library ¤Ø¤Î¥Ñ¥¹¤¬Ä̤äƤ¤¤Þ¤»¤ó", -"Function '%-.64s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹", -"shared library '%-.64s' ¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d %s)", -"function '%-.64s' ¤ò¥é¥¤¥Ö¥é¥ê¡¼Ãæ¤Ë¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó", -"Function '%-.64s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"Host '%-.64s' ¤Ï many connection error ¤Î¤¿¤á¡¢µñÈݤµ¤ì¤Þ¤·¤¿. 'mysqladmin flush-hosts' ¤Ç²ò½ü¤·¤Æ¤¯¤À¤µ¤¤", -"Host '%-.64s' ¤Ï MySQL server ¤ËÀܳ¤òµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"MySQL ¤ò anonymous users ¤Ç»ÈÍѤ·¤Æ¤¤¤ë¾õÂ֤Ǥϡ¢¥Ñ¥¹¥ï¡¼¥É¤ÎÊѹ¹¤Ï¤Ç¤¤Þ¤»¤ó", -"¾¤Î¥æ¡¼¥¶¡¼¤Î¥Ñ¥¹¥ï¡¼¥É¤òÊѹ¹¤¹¤ë¤¿¤á¤Ë¤Ï, mysql ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ËÂФ·¤Æ update ¤Îµö²Ä¤¬¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó.", -"Can't find any matching row in the user table", -"°ìÃ׿ô(Rows matched): %ld Êѹ¹: %ld Warnings: %ld", -"¿·µ¬¤Ë¥¹¥ì¥Ã¥É¤¬ºî¤ì¤Þ¤»¤ó¤Ç¤·¤¿ (errno %d). ¤â¤·ºÇÂç»ÈÍѵö²Ä¥á¥â¥ê¡¼¿ô¤ò±Û¤¨¤Æ¤¤¤Ê¤¤¤Î¤Ë¥¨¥é¡¼¤¬È¯À¸¤·¤Æ¤¤¤ë¤Ê¤é, ¥Þ¥Ë¥å¥¢¥ë¤ÎÃæ¤«¤é 'possible OS-dependent bug' ¤È¤¤¤¦Ê¸»ú¤òõ¤·¤Æ¤¯¤ß¤Æ¤À¤µ¤¤.", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s'", -"NULL ÃͤλÈÍÑÊýË¡¤¬ÉÔŬÀڤǤ¹", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"¥æ¡¼¥¶¡¼ '%-.32s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got NDB error %d '%-.100s'", -"Got temporary NDB error %d '%-.100s'", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt deleted file mode 100644 index 326158d0116..00000000000 --- a/sql/share/korean/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=euckr - -"hashchk", -"isamchk", -"¾Æ´Ï¿À", -"¿¹", -"ÈÀÏ '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"Å×À̺í '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)", -"µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ", -"µ¥ÀÌŸº£À̽º '%-.64s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ ", -"µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.64s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)", -"µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.64s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)", -"'%-.64s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"system Å×ÀÌºí¿¡¼ ·¹Äڵ带 ÀÐÀ» ¼ö ¾ø½À´Ï´Ù.", -"'%-.64s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"¼öÇà µð·ºÅ丮¸¦ ãÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"ÈÀÏÀ» Àá±×Áö(lock) ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)", -"ÈÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)", -"'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"Å×À̺í '%-.64s'¿¡¼ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù.", -"Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù...", -"±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼ Áߺ¹ Ű", -"'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'ÈÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'¸¦ '%-.64s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'ÈÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù.", -"¼ÒÆ®°¡ ÁߴܵǾú½À´Ï´Ù.", -"ºä '%-.64s'°¡ '%-.64s'¿¡¼´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù.", -"Å×À̺í handler¿¡¼ %d ¿¡·¯°¡ ¹ß»ý ÇÏ¿´½À´Ï´Ù.", -"'%-.64s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù.", -"'%-.64s'¿¡¼ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù.", -"ÈÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.64s'", -"'%-.64s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!", -"'%-.64s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!", -"Å×À̺í '%-.64s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù.", -"Out of memory. µ¥¸óÀ» Àç ½ÇÇà ÈÄ ´Ù½Ã ½ÃÀÛÇϽÿÀ (needed %d bytes)", -"Out of sort memory. daemon sort bufferÀÇ Å©±â¸¦ Áõ°¡½ÃŰ¼¼¿ä", -"'%-.64s' ÈÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)", -"³Ê¹« ¸¹Àº ¿¬°á... max_connectionÀ» Áõ°¡ ½ÃŰ½Ã¿À...", -"Out of memory; mysqld³ª ¶Ç´Ù¸¥ ÇÁ·Î¼¼¼¿¡¼ »ç¿ë°¡´ÉÇÑ ¸Þ¸ð¸®¸¦ »ç¿ëÇÑÁö äũÇϽÿÀ. ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é ulimit ¸í·ÉÀ» ÀÌ¿¿ëÇÏ¿© ´õ¸¹Àº ¸Þ¸ð¸®¸¦ »ç¿ëÇÒ ¼ö ÀÖµµ·Ï Çϰųª ½º¿Ò ½ºÆÐÀ̽º¸¦ Áõ°¡½ÃŰ½Ã¿À", -"´ç½ÅÀÇ ÄÄÇ»ÅÍÀÇ È£½ºÆ®À̸§À» ¾òÀ» ¼ö ¾øÀ¾´Ï´Ù.", -"Bad handshake", -"'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù.", -"'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)", -"¼±ÅÃµÈ µ¥ÀÌŸº£À̽º°¡ ¾ø½À´Ï´Ù.", -"¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä...", -"Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. ", -"µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½", -"Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ", -"Å×À̺í '%-.64s'´Â ¾Ë¼ö ¾øÀ½", -"Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ", -"Server°¡ ¼Ë´Ù¿î ÁßÀÔ´Ï´Ù.", -"Unknown Ä®·³ '%-.64s' in '%-.64s'", -"'%-.64s'Àº GROUP BY¼Ó¿¡ ¾øÀ½", -"'%-.64s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½", -"Statement °¡ sum±â´ÉÀ» µ¿ÀÛÁßÀ̰í Ä®·³µµ µ¿ÀÏÇÑ statementÀÔ´Ï´Ù.", -"Ä®·³ÀÇ Ä«¿îÆ®°¡ °ªÀÇ Ä«¿îÆ®¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù.", -"Identifier '%-.100s'´Â ³Ê¹« ±æ±º¿ä.", -"Áߺ¹µÈ Ä®·³ À̸§: '%-.64s'", -"Áߺ¹µÈ Ű À̸§ : '%-.64s'", -"Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key %d", -"Ä®·³ '%-.64s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ", -"'%-.64s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)", -"Äõ¸®°á°ú°¡ ¾ø½À´Ï´Ù.", -"Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.64s'", -"'%-.64s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù.", -"Multiple primary key°¡ Á¤ÀǵǾî ÀÖ½¿", -"³Ê¹« ¸¹Àº ۰¡ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %dÀÇ Å°°¡ °¡´ÉÇÔ", -"³Ê¹« ¸¹Àº Ű ºÎºÐ(parts)µéÀÌ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %d ºÎºÐÀÌ °¡´ÉÇÔ", -"Á¤ÀÇµÈ Å°°¡ ³Ê¹« ±é´Ï´Ù. ÃÖ´ë ŰÀÇ ±æÀÌ´Â %dÀÔ´Ï´Ù.", -"Key Ä®·³ '%-.64s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.", -"BLOB Ä®·³ '%-.64s'´Â Ű Á¤ÀÇ¿¡¼ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù.", -"Ä®·³ '%-.64s'ÀÇ Ä®·³ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä.", -"ºÎÁ¤È®ÇÑ Å×À̺í Á¤ÀÇ; Å×À̺íÀº ÇϳªÀÇ auto Ä®·³ÀÌ Á¸ÀçÇϰí Ű·Î Á¤ÀǵǾîÁ®¾ß ÇÕ´Ï´Ù.", -"%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù", -"%s: Á¤»óÀûÀÎ shutdown\n", -"%s: %d ½ÅÈ£°¡ µé¾î¿ÔÀ½. ÁßÁö!\n", -"%s: Shutdown ÀÌ ¿Ï·áµÊ!\n", -"%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.64s'\n", -"IP ¼ÒÄÏÀ» ¸¸µéÁö ¸øÇß½À´Ï´Ù.", -"Å×À̺í '%-.64s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä...", -"ÇÊµå ±¸ºÐÀÚ ÀμöµéÀÌ ¿ÏÀüÇÏÁö ¾Ê½À´Ï´Ù. ¸Þ´º¾óÀ» ã¾Æ º¸¼¼¿ä.", -"BLOB·Î´Â °íÁ¤±æÀÌÀÇ lowlength¸¦ »ç¿ëÇÒ ¼ö ¾ø½À´Ï´Ù. 'fields terminated by'¸¦ »ç¿ëÇϼ¼¿ä.", -"'%-.64s' ÈÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù.", -"'%-.64s' ÈÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù.", -"·¹ÄÚµå: %ld°³ »èÁ¦: %ld°³ ½ºÅµ: %ld°³ °æ°í: %ld°³", -"·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³", -"ºÎÁ¤È®ÇÑ ¼¹ö ÆÄÆ® Ű. »ç¿ëµÈ Ű ÆÄÆ®°¡ ½ºÆ®¸µÀÌ ¾Æ´Ï°Å³ª Ű ÆÄÆ®ÀÇ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù.", -"ALTER TABLE ¸í·ÉÀ¸·Î´Â ¸ðµç Ä®·³À» Áö¿ï ¼ö ¾ø½À´Ï´Ù. DROP TABLE ¸í·ÉÀ» ÀÌ¿ëÇϼ¼¿ä.", -"'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª ۰¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä.", -"·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³ °æ°í: %ld°³", -"You can't specify target table '%-.64s' for update in FROM clause", -"¾Ë¼ö ¾ø´Â ¾²·¹µå id: %lu", -"¾²·¹µå(Thread) %luÀÇ ¼ÒÀ¯ÀÚ°¡ ¾Æ´Õ´Ï´Ù.", -"¾î¶² Å×ÀÌºíµµ »ç¿ëµÇÁö ¾Ê¾Ò½À´Ï´Ù.", -"Ä®·³ %-.64s¿Í SET¿¡¼ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù.", -"Unique ·Î±×ÈÀÏ '%-.64s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n", -"Å×À̺í '%-.64s'´Â READ ¶ôÀÌ Àá°ÜÀÖ¾î¼ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù.", -"Å×À̺í '%-.64s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù.", -"BLOB Ä®·³ '%-.64s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù.", -"'%-.100s' µ¥ÀÌŸº£À̽ºÀÇ À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù.", -"'%-.100s' Å×À̺í À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù.", -"SELECT ¸í·É¿¡¼ ³Ê¹« ¸¹Àº ·¹Äڵ带 ã±â ¶§¹®¿¡ ¸¹Àº ½Ã°£ÀÌ ¼Ò¿äµË´Ï´Ù. µû¶ó¼ WHERE ¹®À» Á¡°ËÇϰųª, ¸¸¾à SELECT°¡ okµÇ¸é SET SQL_BIG_SELECTS=1 ¿É¼ÇÀ» »ç¿ëÇϼ¼¿ä.", -"¾Ë¼ö ¾ø´Â ¿¡·¯ÀÔ´Ï´Ù.", -"¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'", -"'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ", -"'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ", -"¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %s)", -"Ä®·³ '%-.64s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.", -"À߸øµÈ ±×·ì ÇÔ¼ö¸¦ »ç¿ëÇÏ¿´½À´Ï´Ù.", -"Å×À̺í '%-.64s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.", -"ÇϳªÀÇ Å×ÀÌºí¿¡¼´Â Àû¾îµµ ÇϳªÀÇ Ä®·³ÀÌ Á¸ÀçÇÏ¿©¾ß ÇÕ´Ï´Ù.", -"Å×À̺í '%-.64s'°¡ full³µ½À´Ï´Ù. ", -"¾Ë¼ö¾ø´Â ¾ð¾î Set: '%-.64s'", -"³Ê¹« ¸¹Àº Å×À̺íÀÌ JoinµÇ¾ú½À´Ï´Ù. MySQL¿¡¼´Â JOIN½Ã %d°³ÀÇ Å×ÀÌºí¸¸ »ç¿ëÇÒ ¼ö ÀÖ½À´Ï´Ù.", -"Ä®·³ÀÌ ³Ê¹« ¸¹½À´Ï´Ù.", -"³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %dÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä..", -"¾²·¹µå ½ºÅÃÀÌ ³ÑÃÆ½À´Ï´Ù. »ç¿ë: %ld°³ ½ºÅÃ: %ld°³. ¸¸¾à ÇÊ¿ä½Ã ´õÅ« ½ºÅÃÀ» ¿øÇÒ¶§¿¡´Â 'mysqld -O thread_stack=#' ¸¦ Á¤ÀÇÇϼ¼¿ä", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä...", -"'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù.", -"'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s", -"°øÀ¯ ¶óÀ̹ö·¯¸®¸¦ À§ÇÑ ÆÐ½º°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù.", -"'%-.64s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù.", -"'%-.64s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %s)", -"¶óÀ̹ö·¯¸®¿¡¼ '%-.64s' ÇÔ¼ö¸¦ ãÀ» ¼ö ¾ø½À´Ï´Ù.", -"'%-.64s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù.", -"³Ê¹« ¸¹Àº ¿¬°á¿À·ù·Î ÀÎÇÏ¿© È£½ºÆ® '%-.64s'´Â ºí¶ôµÇ¾ú½À´Ï´Ù. 'mysqladmin flush-hosts'¸¦ ÀÌ¿ëÇÏ¿© ºí¶ôÀ» ÇØÁ¦Çϼ¼¿ä", -"'%-.64s' È£½ºÆ®´Â ÀÌ MySQL¼¹ö¿¡ Á¢¼ÓÇÒ Çã°¡¸¦ ¹ÞÁö ¸øÇß½À´Ï´Ù.", -"´ç½ÅÀº MySQL¼¹ö¿¡ À͸íÀÇ »ç¿ëÀÚ·Î Á¢¼ÓÀ» Çϼ̽À´Ï´Ù.À͸íÀÇ »ç¿ëÀÚ´Â ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ¾ø½À´Ï´Ù.", -"´ç½ÅÀº ´Ù¸¥»ç¿ëÀÚµéÀÇ ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ÀÖµµ·Ï µ¥ÀÌŸº£À̽º º¯°æ±ÇÇÑÀ» °¡Á®¾ß ÇÕ´Ï´Ù.", -"»ç¿ëÀÚ Å×ÀÌºí¿¡¼ ÀÏÄ¡ÇÏ´Â °ÍÀ» ãÀ» ¼ö ¾øÀ¾´Ï´Ù.", -"ÀÏÄ¡ÇÏ´Â Rows : %ld°³ º¯°æµÊ: %ld°³ °æ°í: %ld°³", -"»õ·Î¿î ¾²·¹µå¸¦ ¸¸µé ¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£ %d). ¸¸¾à ¿©À¯¸Þ¸ð¸®°¡ ÀÖ´Ù¸é OS-dependent¹ö±× ÀÇ ¸Þ´º¾ó ºÎºÐÀ» ã¾Æº¸½Ã¿À.", -"Row %ld¿¡¼ Ä®·³ Ä«¿îÆ®¿Í value Ä«¿îÅÍ¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù.", -"Å×À̺íÀ» ´Ù½Ã ¿¼ö ¾ø±º¿ä: '%-.64s", -"NULL °ªÀ» À߸ø »ç¿ëÇϼ̱º¿ä...", -"regexp¿¡¼ '%-.64s'°¡ ³µ½À´Ï´Ù.", -"Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT(),...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause", -"»ç¿ëÀÚ '%-.32s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù.", -"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'", -"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'", -"À߸øµÈ GRANT/REVOKE ¸í·É. ¾î¶² ±Ç¸®¿Í ½ÂÀÎÀÌ »ç¿ëµÇ¾î Áú ¼ö ÀÖ´ÂÁö ¸Þ´º¾óÀ» º¸½Ã¿À.", -"½ÂÀÎ(GRANT)À» À§ÇÏ¿© »ç¿ëÇÑ »ç¿ëÀÚ³ª È£½ºÆ®ÀÇ °ªµéÀÌ ³Ê¹« ±é´Ï´Ù.", -"Å×À̺í '%-.64s.%s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.", -"»ç¿ëÀÚ '%-.32s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.64s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. ", -"»ç¿ëµÈ ¸í·ÉÀº ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â ÀÌ¿ëµÇÁö ¾Ê½À´Ï´Ù.", -"SQL ±¸¹®¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù.", -"Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.64sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù.", -"³Ê¹« ¸¹Àº Áö¿¬ ¾²·¹µå¸¦ »ç¿ëÇϰí ÀÖ½À´Ï´Ù.", -"µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.64s' (%s)", -"'max_allowed_packet'º¸´Ù ´õÅ« ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù.", -"¿¬°á ÆÄÀÌÇÁ·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"fcntl() ÇÔ¼ö·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"¼ø¼°¡ ¸ÂÁö¾Ê´Â ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù.", -"Åë½Å ÆÐŶÀÇ ¾ÐÃàÇØÁ¦¸¦ ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù.", -"Åë½Å ÆÐŶÀ» Àд Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Åë½Å ÆÐŶÀ» Àд Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Åë½Å ÆÐŶÀ» ±â·ÏÇÏ´Â Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Åë½Å ÆÐÆÂÀ» ±â·ÏÇÏ´Â Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt deleted file mode 100644 index 3112bb94041..00000000000 --- a/sql/share/norwegian-ny/errmsg.txt +++ /dev/null @@ -1,323 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Roy-Magne Mo rmo@www.hivolda.no 97 */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEI", -"JA", -"Kan ikkje opprette fila '%-.64s' (Feilkode: %d)", -"Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)", -"Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)", -"Kan ikkje opprette databasen '%-.64s'; databasen eksisterer", -"Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje", -"Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)", -"Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)", -"Feil ved sletting av '%-.64s' (Feilkode: %d)", -"Kan ikkje lese posten i systemkatalogen", -"Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)", -"Kan ikkje lese aktiv katalog(Feilkode: %d)", -"Kan ikkje låse fila (Feilkode: %d)", -"Kan ikkje åpne fila: '%-.64s' (Feilkode: %d)", -"Kan ikkje finne fila: '%-.64s' (Feilkode: %d)", -"Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)", -"Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)", -"Posten har vorte endra sidan den sist vart lesen '%-.64s'", -"Ikkje meir diskplass (%s). Ventar på å få frigjort plass...", -"Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'", -"Feil ved lukking av '%-.64s' (Feilkode: %d)", -"Feil ved lesing av '%-.64s' (Feilkode: %d)", -"Feil ved omdøyping av '%-.64s' til '%-.64s' (Feilkode: %d)", -"Feil ved skriving av fila '%-.64s' (Feilkode: %d)", -"'%-.64s' er låst mot oppdateringar", -"Sortering avbrote", -"View '%-.64s' eksisterar ikkje for '%-.64s'", -"Mottok feil %d fra tabell handterar", -"Tabell håndteraren for '%-.64s' har ikkje denne moglegheita", -"Kan ikkje finne posten i '%-.64s'", -"Feil informasjon i fila: '%-.64s'", -"Tabellen '%-.64s' har feil i nykkelfila; prøv å reparere den", -"Gammel nykkelfil for tabellen '%-.64s'; reparer den!", -"'%-.64s' er skrivetryggja", -"Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)", -"Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten", -"Uventa slutt på fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)", -"For mange tilkoplingar (connections)", -"Tomt for tråd plass/minne", -"Kan ikkje få tak i vertsnavn for di adresse", -"Feil handtrykk (handshake)", -"Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta", -"Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)", -"Ingen database vald", -"Ukjent kommando", -"Kolonne '%-.64s' kan ikkje vere null", -"Ukjent database '%-.64s'", -"Tabellen '%-.64s' eksisterar allereide", -"Ukjent tabell '%-.64s'", -"Kolonne: '%-.64s' i tabell %s er ikkje eintydig", -"Tenar nedkopling er i gang", -"Ukjent felt '%-.64s' i tabell %s", -"Brukte '%-.64s' som ikkje var i group by", -"Kan ikkje gruppere på '%-.64s'", -"Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk", -"Kolonne telling stemmer verdi telling", -"Identifikator '%-.64s' er for lang", -"Feltnamnet '%-.64s' eksisterte frå før", -"Nøkkelnamnet '%-.64s' eksisterte frå før", -"Like verdiar '%-.64s' for nykkel %d", -"Feil kolonne spesifikator for kolonne '%-.64s'", -"%s attmed '%-.64s' på line %d", -"Førespurnad var tom", -"Ikkje unikt tabell/alias: '%-.64s'", -"Ugyldig standardverdi for '%-.64s'", -"Fleire primærnyklar spesifisert", -"For mange nykler spesifisert. Maks %d nyklar tillatt", -"For mange nykkeldelar spesifisert. Maks %d delar tillatt", -"Spesifisert nykkel var for lang. Maks nykkellengde er %d", -"Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen", -"Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar", -"For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor", -"Bare eitt auto felt kan være definert som nøkkel.", -"%s: klar for tilkoblingar", -"%s: Normal nedkopling\n", -"%s: Oppdaga signal %d. Avsluttar!\n", -"%s: Nedkopling komplett\n", -"%s: Påtvinga avslutning av tråd %ld brukar: '%-.64s'\n", -"Kan ikkje opprette IP socket", -"Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt", -"Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen", -"Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'.", -"Filen '%-.64s' må være i database-katalogen for å være lesbar for alle", -"Filen '%-.64s' eksisterte allereide", -"Poster: %ld Fjerna: %ld Hoppa over: %ld Åtvaringar: %ld", -"Poster: %ld Like: %ld", -"Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden", -"Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor.", -"Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar.", -"Postar: %ld Like: %ld Åtvaringar: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ukjent tråd id: %lu", -"Du er ikkje eigar av tråd %lu", -"Ingen tabellar i bruk", -"For mange tekststrengar felt %s og SET", -"Kan ikkje lage unikt loggfilnavn %s.(1-999)\n", -"Tabellen '%-.64s' var låst med READ lås og kan ikkje oppdaterast", -"Tabellen '%-.64s' var ikkje låst med LOCK TABLES", -"Blob feltet '%-.64s' kan ikkje ha ein standard verdi", -"Ugyldig database namn '%-.64s'", -"Ugyldig tabell namn '%-.64s'", -"SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt", -"Ukjend feil", -"Ukjend prosedyre %s", -"Feil parameter tal til prosedyra %s", -"Feil parameter til prosedyra %s", -"Ukjend tabell '%-.64s' i %s", -"Feltet '%-.64s' er spesifisert to gangar", -"Invalid use of group function", -"Table '%-.64s' uses a extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %s)", -"Can't find function '%-.64s' in library'", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Mottok feil %d '%-.100s' fra %s", -"Mottok temporary feil %d '%-.100s' fra %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt deleted file mode 100644 index 9f0b0fb21a8..00000000000 --- a/sql/share/norwegian/errmsg.txt +++ /dev/null @@ -1,323 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Roy-Magne Mo rmo@www.hivolda.no 97 */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEI", -"JA", -"Kan ikke opprette fila '%-.64s' (Feilkode: %d)", -"Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)", -"Kan ikke opprette databasen '%-.64s' (Feilkode: %d)", -"Kan ikke opprette databasen '%-.64s'; databasen eksisterer", -"Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke", -"Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)", -"Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)", -"Feil ved sletting av '%-.64s' (Feilkode: %d)", -"Kan ikke lese posten i systemkatalogen", -"Kan ikke lese statusen til '%-.64s' (Feilkode: %d)", -"Kan ikke lese aktiv katalog(Feilkode: %d)", -"Kan ikke låse fila (Feilkode: %d)", -"Kan ikke åpne fila: '%-.64s' (Feilkode: %d)", -"Kan ikke finne fila: '%-.64s' (Feilkode: %d)", -"Kan ikke lese katalogen '%-.64s' (Feilkode: %d)", -"Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)", -"Posten har blitt endret siden den ble lest '%-.64s'", -"Ikke mer diskplass (%s). Venter på å få frigjort plass...", -"Kan ikke skrive, flere like nøkler i tabellen '%-.64s'", -"Feil ved lukking av '%-.64s' (Feilkode: %d)", -"Feil ved lesing av '%-.64s' (Feilkode: %d)", -"Feil ved omdøping av '%-.64s' til '%-.64s' (Feilkode: %d)", -"Feil ved skriving av fila '%-.64s' (Feilkode: %d)", -"'%-.64s' er låst mot oppdateringer", -"Sortering avbrutt", -"View '%-.64s' eksisterer ikke for '%-.64s'", -"Mottok feil %d fra tabell håndterer", -"Tabell håndtereren for '%-.64s' har ikke denne muligheten", -"Kan ikke finne posten i '%-.64s'", -"Feil informasjon i filen: '%-.64s'", -"Tabellen '%-.64s' har feil i nøkkelfilen; forsøk å reparer den", -"Gammel nøkkelfil for tabellen '%-.64s'; reparer den!", -"'%-.64s' er skrivebeskyttet", -"Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)", -"Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten", -"Uventet slutt på fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)", -"For mange tilkoblinger (connections)", -"Tomt for tråd plass/minne", -"Kan ikke få tak i vertsnavn for din adresse", -"Feil håndtrykk (handshake)", -"Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet", -"Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)", -"Ingen database valgt", -"Ukjent kommando", -"Kolonne '%-.64s' kan ikke vere null", -"Ukjent database '%-.64s'", -"Tabellen '%-.64s' eksisterer allerede", -"Ukjent tabell '%-.64s'", -"Felt: '%-.64s' i tabell %s er ikke entydig", -"Database nedkobling er i gang", -"Ukjent kolonne '%-.64s' i tabell %s", -"Brukte '%-.64s' som ikke var i group by", -"Kan ikke gruppere på '%-.64s'", -"Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk", -"Felt telling stemmer verdi telling", -"Identifikator '%-.64s' er for lang", -"Feltnavnet '%-.64s' eksisterte fra før", -"Nøkkelnavnet '%-.64s' eksisterte fra før", -"Like verdier '%-.64s' for nøkkel %d", -"Feil kolonne spesifikator for felt '%-.64s'", -"%s nær '%-.64s' på linje %d", -"Forespørsel var tom", -"Ikke unikt tabell/alias: '%-.64s'", -"Ugyldig standardverdi for '%-.64s'", -"Fleire primærnøkle spesifisert", -"For mange nøkler spesifisert. Maks %d nøkler tillatt", -"For mange nøkkeldeler spesifisert. Maks %d deler tillatt", -"Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d", -"Nøkkel felt '%-.64s' eksiterer ikke i tabellen", -"Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nøkler", -"For stor nøkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor", -"Bare ett auto felt kan være definert som nøkkel.", -"%s: klar for tilkoblinger", -"%s: Normal avslutning\n", -"%s: Oppdaget signal %d. Avslutter!\n", -"%s: Avslutning komplett\n", -"%s: Påtvinget avslutning av tråd %ld bruker: '%-.64s'\n", -"Kan ikke opprette IP socket", -"Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen", -"Felt skiller argumentene er ikke som forventet, se dokumentasjonen", -"En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'.", -"Filen '%-.64s' må være i database-katalogen for å være lesbar for alle", -"Filen '%-.64s' eksisterte allerede", -"Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld", -"Poster: %ld Like: %ld", -"Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden", -"En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden.", -"Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer.", -"Poster: %ld Like: %ld Advarsler: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ukjent tråd id: %lu", -"Du er ikke eier av tråden %lu", -"Ingen tabeller i bruk", -"For mange tekststrenger kolonne %s og SET", -"Kan ikke lage unikt loggfilnavn %s.(1-999)\n", -"Tabellen '%-.64s' var låst med READ lås og kan ikke oppdateres", -"Tabellen '%-.64s' var ikke låst med LOCK TABLES", -"Blob feltet '%-.64s' kan ikke ha en standard verdi", -"Ugyldig database navn '%-.64s'", -"Ugyldig tabell navn '%-.64s'", -"SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt", -"Ukjent feil", -"Ukjent prosedyre %s", -"Feil parameter antall til prosedyren %s", -"Feil parametre til prosedyren %s", -"Ukjent tabell '%-.64s' i %s", -"Feltet '%-.64s' er spesifisert to ganger", -"Invalid use of group function", -"Table '%-.64s' uses a extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %s)", -"Can't find function '%-.64s' in library'", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Mottok feil %d '%-.100s' fa %s", -"Mottok temporary feil %d '%-.100s' fra %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt deleted file mode 100644 index 4511a139554..00000000000 --- a/sql/share/polish/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Changed by Jaroslaw Lewandowski <jotel@itnet.com.pl> - Charset ISO-8859-2 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NIE", -"TAK", -"Nie mo¿na stworzyæ pliku '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na stworzyæ tabeli '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje", -"Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje", -"B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)", -"B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)", -"B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na odczytaæ rekordu z tabeli systemowej", -"Nie mo¿na otrzymaæ statusu '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na rozpoznaæ aktualnego katalogu (Kod b³êdu: %d)", -"Nie mo¿na zablokowaæ pliku (Kod b³êdu: %d)", -"Nie mo¿na otworzyæ pliku: '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na znale¥æ pliku: '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)", -"Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'", -"Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca...", -"Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'", -"B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)", -"B³?d podczas odczytu pliku '%-.64s' (Kod b³êdu: %d)", -"B³?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod b³êdu: %d)", -"B³?d podczas zapisywania pliku '%-.64s' (Kod b³êdu: %d)", -"'%-.64s' jest zablokowany na wypadek zmian", -"Sortowanie przerwane", -"Widok '%-.64s' nie istnieje dla '%-.64s'", -"Otrzymano b³?d %d z obs³ugi tabeli", -"Obs³uga tabeli '%-.64s' nie posiada tej opcji", -"Nie mo¿na znale¥æ rekordu w '%-.64s'", -"Niew³a?ciwa informacja w pliku: '%-.64s'", -"Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'; spróbuj go naprawiæ", -"Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!", -"'%-.64s' jest tylko do odczytu", -"Zbyt ma³o pamiêci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)", -"Zbyt ma³o pamiêci dla sortowania. Zwiêksz wielko?æ bufora demona dla sortowania", -"Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod b³êdu: %d)", -"Zbyt wiele po³?czeñ", -"Zbyt ma³o miejsca/pamiêci dla w?tku", -"Nie mo¿na otrzymaæ nazwy hosta dla twojego adresu", -"Z³y uchwyt(handshake)", -"Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'", -"Access denied for user '%-.32s'@'%-.64s' (using password: %s)", -"Nie wybrano ¿adnej bazy danych", -"Nieznana komenda", -"Kolumna '%-.64s' nie mo¿e byæ null", -"Nieznana baza danych '%-.64s'", -"Tabela '%-.64s' ju¿ istnieje", -"Nieznana tabela '%-.64s'", -"Kolumna: '%-.64s' w %s jest dwuznaczna", -"Trwa koñczenie dzia³ania serwera", -"Nieznana kolumna '%-.64s' w %s", -"U¿yto '%-.64s' bez umieszczenia w group by", -"Nie mo¿na grupowaæ po '%-.64s'", -"Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu", -"Liczba kolumn nie odpowiada liczbie warto?ci", -"Nazwa identyfikatora '%-.64s' jest zbyt d³uga", -"Powtórzona nazwa kolumny '%-.64s'", -"Powtórzony nazwa klucza '%-.64s'", -"Powtórzone wyst?pienie '%-.64s' dla klucza %d", -"B³êdna specyfikacja kolumny dla kolumny '%-.64s'", -"%s obok '%-.64s' w linii %d", -"Zapytanie by³o puste", -"Tabela/alias nie s? unikalne: '%-.64s'", -"Niew³a?ciwa warto?æ domy?lna dla '%-.64s'", -"Zdefiniowano wiele kluczy podstawowych", -"Okre?lono zbyt wiele kluczy. Dostêpnych jest maksymalnie %d kluczy", -"Okre?lono zbyt wiele czê?ci klucza. Dostêpnych jest maksymalnie %d czê?ci", -"Zdefinowany klucz jest zbyt d³ugi. Maksymaln? d³ugo?ci? klucza jest %d", -"Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli", -"Kolumna typu Blob '%-.64s' nie mo¿e byæ u¿yta w specyfikacji klucza", -"Zbyt du¿a d³ugo?æ kolumny '%-.64s' (maks. = %d). W zamian u¿yj typu BLOB", -"W tabeli mo¿e byæ tylko jedno pole auto i musi ono byæ zdefiniowane jako klucz", -"%s: gotowe do po³?czenia", -"%s: Standardowe zakoñczenie dzia³ania\n", -"%s: Otrzymano sygna³ %d. Koñczenie dzia³ania!\n", -"%s: Zakoñczenie dzia³ania wykonane\n", -"%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.64s'\n", -"Nie mo¿na stworzyæ socket'u IP", -"Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê", -"Nie oczekiwano separatora. Sprawd¥ podrêcznik", -"Nie mo¿na u¿yæ sta³ej d³ugo?ci wiersza z polami typu BLOB. U¿yj 'fields terminated by'.", -"Plik '%-.64s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich", -"Plik '%-.64s' ju¿ istnieje", -"Recordów: %ld Usuniêtych: %ld Pominiêtych: %ld Ostrze¿eñ: %ld", -"Rekordów: %ld Duplikatów: %ld", -"B³êdna podczê?æ klucza. U¿yta czê?æ klucza nie jest ³añcuchem lub u¿yta d³ugo?æ jest wiêksza ni¿ czê?æ klucza", -"Nie mo¿na usun?æ wszystkich pól wykorzystuj?c ALTER TABLE. W zamian u¿yj DROP TABLE", -"Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje", -"Rekordów: %ld Duplikatów: %ld Ostrze¿eñ: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Nieznany identyfikator w?tku: %lu", -"Nie jeste? w³a?cicielem w?tku %lu", -"Nie ma ¿adej u¿ytej tabeli", -"Zbyt wiele ³añcuchów dla kolumny %s i polecenia SET", -"Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %s.(1-999)\n", -"Tabela '%-.64s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana", -"Tabela '%-.64s' nie zosta³a zablokowana poleceniem LOCK TABLES", -"Pole typu blob '%-.64s' nie mo¿e mieæ domy?lnej warto?ci", -"Niedozwolona nazwa bazy danych '%-.64s'", -"Niedozwolona nazwa tabeli '%-.64s'...", -"Operacja SELECT bêdzie dotyczy³a zbyt wielu rekordów i prawdopodobnie zajmie bardzo du¿o czasu. Sprawd¥ warunek WHERE i u¿yj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna", -"Unknown error", -"Unkown procedure %s", -"Incorrect parameter count to procedure %s", -"Incorrect parameters to procedure %s", -"Unknown table '%-.64s' in %s", -"Field '%-.64s' specified twice", -"Invalid use of group function", -"Table '%-.64s' uses a extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %s)", -"Can't find function '%-.64s' in library'", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt deleted file mode 100644 index fa2fb693026..00000000000 --- a/sql/share/portuguese/errmsg.txt +++ /dev/null @@ -1,323 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Updated by Thiago Delgado Pinto - thiagodp@ieg.com.br - 06.07.2002 */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NÃO", -"SIM", -"Não pode criar o arquivo '%-.64s' (erro no. %d)", -"Não pode criar a tabela '%-.64s' (erro no. %d)", -"Não pode criar o banco de dados '%-.64s' (erro no. %d)", -"Não pode criar o banco de dados '%-.64s'; este banco de dados já existe", -"Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe", -"Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)", -"Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)", -"Erro na remoção de '%-.64s' (erro no. %d)", -"Não pode ler um registro numa tabela do sistema", -"Não pode obter o status de '%-.64s' (erro no. %d)", -"Não pode obter o diretório corrente (erro no. %d)", -"Não pode travar o arquivo (erro no. %d)", -"Não pode abrir o arquivo '%-.64s' (erro no. %d)", -"Não pode encontrar o arquivo '%-.64s' (erro no. %d)", -"Não pode ler o diretório de '%-.64s' (erro no. %d)", -"Não pode mudar para o diretório '%-.64s' (erro no. %d)", -"Registro alterado desde a última leitura da tabela '%-.64s'", -"Disco cheio (%s). Aguardando alguém liberar algum espaço...", -"Não pode gravar. Chave duplicada na tabela '%-.64s'", -"Erro ao fechar '%-.64s' (erro no. %d)", -"Erro ao ler arquivo '%-.64s' (erro no. %d)", -"Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)", -"Erro ao gravar arquivo '%-.64s' (erro no. %d)", -"'%-.64s' está com travamento contra alterações", -"Ordenação abortada", -"Visão '%-.64s' não existe para '%-.64s'", -"Obteve erro %d no manipulador de tabelas", -"Manipulador de tabela para '%-.64s' não tem esta opção", -"Não pode encontrar registro em '%-.64s'", -"Informação incorreta no arquivo '%-.64s'", -"Arquivo de índice incorreto para tabela '%-.64s'; tente repará-lo", -"Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!", -"Tabela '%-.64s' é somente para leitura", -"Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)", -"Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação", -"Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)", -"Excesso de conexões", -"Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'", -"Não pode obter nome do 'host' para seu endereço", -"Negociação de acesso falhou", -"Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'", -"Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)", -"Nenhum banco de dados foi selecionado", -"Comando desconhecido", -"Coluna '%-.64s' não pode ser vazia", -"Banco de dados '%-.64s' desconhecido", -"Tabela '%-.64s' já existe", -"Tabela '%-.64s' desconhecida", -"Coluna '%-.64s' em '%-.64s' é ambígua", -"'Shutdown' do servidor em andamento", -"Coluna '%-.64s' desconhecida em '%-.64s'", -"'%-.64s' não está em 'GROUP BY'", -"Não pode agrupar em '%-.64s'", -"Cláusula contém funções de soma e colunas juntas", -"Contagem de colunas não confere com a contagem de valores", -"Nome identificador '%-.100s' é longo demais", -"Nome da coluna '%-.64s' duplicado", -"Nome da chave '%-.64s' duplicado", -"Entrada '%-.64s' duplicada para a chave %d", -"Especificador de coluna incorreto para a coluna '%-.64s'", -"%s próximo a '%-.80s' na linha %d", -"Consulta (query) estava vazia", -"Tabela/alias '%-.64s' não única", -"Valor padrão (default) inválido para '%-.64s'", -"Definida mais de uma chave primária", -"Especificadas chaves demais. O máximo permitido são %d chaves", -"Especificadas partes de chave demais. O máximo permitido são %d partes", -"Chave especificada longa demais. O comprimento de chave máximo permitido é %d", -"Coluna chave '%-.64s' não existe na tabela", -"Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado", -"Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar", -"Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave", -"%s: Pronto para conexões", -"%s: 'Shutdown' normal\n", -"%s: Obteve sinal %d. Abortando!\n", -"%s: 'Shutdown' completo\n", -"%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n", -"Não pode criar o soquete IP", -"Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela", -"Argumento separador de campos não é o esperado. Cheque o manual", -"Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado.", -"Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura possível para todos", -"Arquivo '%-.80s' já existe", -"Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld", -"Registros: %ld - Duplicados: %ld", -"Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas", -"Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar", -"Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe", -"Registros: %ld - Duplicados: %ld - Avisos: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"'Id' de 'thread' %lu desconhecido", -"Você não é proprietário da 'thread' %lu", -"Nenhuma tabela usada", -"'Strings' demais para coluna '%-.64s' e SET", -"Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n", -"Tabela '%-.64s' foi travada com trava de leitura e não pode ser atualizada", -"Tabela '%-.64s' não foi travada com LOCK TABLES", -"Coluna BLOB '%-.64s' não pode ter um valor padrão (default)", -"Nome de banco de dados '%-.100s' incorreto", -"Nome de tabela '%-.100s' incorreto", -"O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cláusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto", -"Erro desconhecido", -"'Procedure' '%-.64s' desconhecida", -"Número de parâmetros incorreto para a 'procedure' '%-.64s'", -"Parâmetros incorretos para a 'procedure' '%-.64s'", -"Tabela '%-.64s' desconhecida em '%-.32s'", -"Coluna '%-.64s' especificada duas vezes", -"Uso inválido de função de agrupamento (GROUP)", -"Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL", -"Uma tabela tem que ter pelo menos uma (1) coluna", -"Tabela '%-.64s' está cheia", -"Conjunto de caracteres '%-.64s' desconhecido", -"Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)", -"Colunas demais", -"Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %d. Você tem que mudar alguns campos para BLOBs", -"Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário", -"Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'", -"Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)", -"Não pode carregar a função '%-.64s'", -"Não pode inicializar a função '%-.64s' - '%-.80s'", -"Não há caminhos (paths) permitidos para biblioteca compartilhada", -"Função '%-.64s' já existe", -"Não pode abrir biblioteca compartilhada '%-.64s' (erro no. '%d' - '%-.64s')", -"Não pode encontrar a função '%-.64s' na biblioteca", -"Função '%-.64s' não está definida", -"'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'", -"'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL", -"Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas", -"Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros", -"Não pode encontrar nenhuma linha que combine na tabela usuário (user table)", -"Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld", -"Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional", -"Contagem de colunas não confere com a contagem de valores na linha %ld", -"Não pode reabrir a tabela '%-.64s", -"Uso inválido do valor NULL", -"Obteve erro '%-.64s' em regexp", -"Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)", -"Não existe tal permissão (grant) definida para o usuário '%-.32s' no 'host' '%-.64s'", -"Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'", -"Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'", -"Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados.", -"Argumento de 'host' ou de usuário para o GRANT é longo demais", -"Tabela '%-.64s.%-.64s' não existe", -"Não existe tal permissão (grant) definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'", -"Comando usado não é permitido para esta versão do MySQL", -"Você tem um erro de sintaxe no seu SQL", -"'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.64s'", -"Excesso de 'threads' retardadas (atrasadas) em uso", -"Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)", -"Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)", -"Obteve um erro de leitura no 'pipe' da conexão", -"Obteve um erro em fcntl()", -"Obteve pacotes fora de ordem", -"Não conseguiu descomprimir pacote de comunicação", -"Obteve um erro na leitura de pacotes de comunicação", -"Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação", -"Obteve um erro na escrita de pacotes de comunicação", -"Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação", -"'String' resultante é mais longa do que 'max_allowed_packet'", -"Tipo de tabela usado não permite colunas BLOB/TEXT", -"Tipo de tabela usado não permite colunas AUTO_INCREMENT", -"INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque ela está travada com LOCK TABLES", -"Nome de coluna '%-.100s' incorreto", -"O manipulador de tabela usado não pode indexar a coluna '%-.64s'", -"Todas as tabelas contidas na tabela fundida (MERGE) não estão definidas identicamente", -"Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'", -"Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave", -"Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar", -"O resultado consistiu em mais do que uma linha", -"Este tipo de tabela requer uma chave primária", -"Esta versão do MySQL não foi compilada com suporte a RAID", -"Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave", -"Chave '%-.64s' não existe na tabela '%-.64s'", -"Não pode abrir a tabela", -"O manipulador de tabela não suporta %s", -"Não lhe é permitido executar este comando em uma transação", -"Obteve erro %d durante COMMIT", -"Obteve erro %d durante ROLLBACK", -"Obteve erro %d durante FLUSH_LOGS", -"Obteve erro %d durante CHECKPOINT", -"Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' `%-.64s' ('%-.64s')", -"O manipulador de tabela não suporta 'dump' binário de tabela", -"Binlog fechado. Não pode fazer RESET MASTER", -"Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'", -"Erro no 'master' '%-.64s'", -"Erro de rede lendo do 'master'", -"Erro de rede gravando no 'master'", -"Não pode encontrar um índice para o texto todo que combine com a lista de colunas", -"Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa", -"Variável de sistema '%-.64s' desconhecida", -"Tabela '%-.64s' está marcada como danificada e deve ser reparada", -"Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou", -"Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)", -"Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente", -"Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro", -"Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE", -"O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema", -"Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas", -"Você pode usar apenas expressões constantes com SET", -"Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação.", -"O número total de travamentos excede o tamanho da tabela de travamentos", -"Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED", -"DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura", -"CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura", -"Argumentos errados para %s", -"Não é permitido a '%-.32s'@'%-.64s' criar novos usuários", -"Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados.", -"Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação.", -"O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)", -"Não pode acrescentar uma restrição de chave estrangeira", -"Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou", -"Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou", -"Erro conectando com o master: %-.128s", -"Erro rodando consulta no master: %-.128s", -"Erro quando executando comando %s: %-.128s", -"Uso errado de %s e %s", -"Os comandos SELECT usados têm diferente número de colunas", -"Não posso executar a consulta porque você tem um conflito de travamento de leitura", -"Mistura de tabelas transacional e não-transacional está desabilitada", -"Opção '%s' usada duas vezes no comando", -"Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)", -"Acesso negado. Você precisa o privilégio %-.128s para essa operação", -"Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL", -"Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL", -"Variável '%-.64s' não tem um valor padrão", -"Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'", -"Tipo errado de argumento para variável '%-.64s'", -"Variável '%-.64s' somente pode ser configurada, não lida", -"Errado uso/colocação de '%s'", -"Esta versão de MySQL não suporta ainda '%s'", -"Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log", -"Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela", -"Variable '%-.64s' is a %s variable", -"Definição errada da chave estrangeira para '%-.64s': %s", -"Referência da chave e referência da tabela não coincidem", -"Operand should contain %d column(s)", -"Subconsulta retorna mais que 1 registro", -"Desconhecido manipulador de declaração preparado (%.*s) determinado para %s", -"Banco de dado de ajuda corrupto ou não existente", -"Referência cíclica em subconsultas", -"Convertendo coluna '%s' de %s para %s", -"Referência '%-.64s' não suportada (%s)", -"Cada tabela derivada deve ter seu próprio alias", -"Select %u foi reduzido durante otimização", -"Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s", -"Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL", -"Todas as partes de uma SPATIAL KEY devem ser NOT NULL", -"COLLATION '%s' não é válida para CHARACTER SET '%s'", -"O slave já está rodando", -"O slave já está parado", -"Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)", -"ZLIB: Não suficiente memória disponível", -"ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)", -"ZLIB: Dados de entrada está corrupto", -"%d linha(s) foram cortada(s) por GROUP_CONCAT()", -"Conta de registro é menor que a conta de coluna na linha %ld", -"Conta de registro é maior que a conta de coluna na linha %ld", -"Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld", -"Dado truncado, fora de alcance para coluna '%s' na linha %ld", -"Dado truncado para coluna '%s' na linha %ld", -"Usando engine de armazenamento %s para tabela '%s'", -"Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'", -"Não pode remover um ou mais dos usuários pedidos", -"Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos", -"Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'", -"Ilegal combinação de collations para operação '%s'", -"Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)", -"Collation desconhecida: '%-.64s'", -"SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado.", -"Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato", -"Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d", -"Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL", -"É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo", -"Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas", -"Incorreto nome de índice '%-.100s'", -"Incorreto nome de catálogo '%-.100s'", -"Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu", -"Coluna '%-.64s' não pode ser parte de índice FULLTEXT", -"Key cache desconhecida '%-.100s'", -"MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar", -"Motor de tabela desconhecido '%s'", -"'%s' é desatualizado. Use '%s' em seu lugar", -"A tabela destino %-.100s do %s não é atualizável", -"O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando", -"O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando", -"Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" -"Truncado errado %-.32s valor: '%-.128s'" -"Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" -"Inválida cláusula ON UPDATE para campo '%-.64s'", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt deleted file mode 100644 index da9135e7156..00000000000 --- a/sql/share/romanian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translated into Romanian by Stefan Saroiu - e-mail: tzoompy@cs.washington.edu -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NU", -"DA", -"Nu pot sa creez fisierul '%-.64s' (Eroare: %d)", -"Nu pot sa creez tabla '%-.64s' (Eroare: %d)", -"Nu pot sa creez baza de date '%-.64s' (Eroare: %d)", -"Nu pot sa creez baza de date '%-.64s'; baza de date exista deja", -"Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta", -"Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)", -"Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)", -"Eroare incercind sa delete '%-.64s' (Eroare: %d)", -"Nu pot sa citesc cimpurile in tabla de system (system table)", -"Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)", -"Nu pot sa obtin directorul current (working directory) (Eroare: %d)", -"Nu pot sa lock fisierul (Eroare: %d)", -"Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)", -"Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)", -"Nu pot sa citesc directorul '%-.64s' (Eroare: %d)", -"Nu pot sa schimb directorul '%-.64s' (Eroare: %d)", -"Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'", -"Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu...", -"Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'", -"Eroare inchizind '%-.64s' (errno: %d)", -"Eroare citind fisierul '%-.64s' (errno: %d)", -"Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)", -"Eroare scriind fisierul '%-.64s' (errno: %d)", -"'%-.64s' este blocat pentry schimbari (loccked against change)", -"Sortare intrerupta", -"View '%-.64s' nu exista pentru '%-.64s'", -"Eroarea %d obtinuta din handlerul tabelei", -"Handlerul tabelei pentru '%-.64s' nu are aceasta optiune", -"Nu pot sa gasesc recordul in '%-.64s'", -"Informatie incorecta in fisierul: '%-.64s'", -"Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari", -"Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!", -"Tabela '%-.64s' e read-only", -"Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)", -"Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)", -"Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)", -"Prea multe conectiuni", -"Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)", -"Nu pot sa obtin hostname-ul adresei tale", -"Prost inceput de conectie (bad handshake)", -"Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'", -"Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)", -"Nici o baza de data nu a fost selectata inca", -"Comanda invalida", -"Coloana '%-.64s' nu poate sa fie null", -"Baza de data invalida '%-.64s'", -"Tabela '%-.64s' exista deja", -"Tabela '%-.64s' este invalida", -"Coloana: '%-.64s' in %-.64s este ambigua", -"Terminarea serverului este in desfasurare", -"Coloana invalida '%-.64s' in '%-.64s'", -"'%-.64s' nu exista in clauza GROUP BY", -"Nu pot sa grupez pe (group on) '%-.64s'", -"Comanda are functii suma si coloane in aceeasi comanda", -"Numarul de coloane nu este acelasi cu numarul valoarei", -"Numele indentificatorului '%-.100s' este prea lung", -"Numele coloanei '%-.64s' e duplicat", -"Numele cheiei '%-.64s' e duplicat", -"Cimpul '%-.64s' e duplicat pentru cheia %d", -"Specificandul coloanei '%-.64s' este incorect", -"%s linga '%-.80s' pe linia %d", -"Query-ul a fost gol", -"Tabela/alias: '%-.64s' nu este unic", -"Valoarea de default este invalida pentru '%-.64s'", -"Chei primare definite de mai multe ori", -"Prea multe chei. Numarul de chei maxim este %d", -"Prea multe chei. Numarul de chei maxim este %d", -"Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d", -"Coloana cheie '%-.64s' nu exista in tabela", -"Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit", -"Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine", -"Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie", -"%s: sint gata pentru conectii", -"%s: Terminare normala\n", -"%s: Semnal %d obtinut. Aborting!\n", -"%s: Terminare completa\n", -"%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n", -"Nu pot crea IP socket", -"Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela", -"Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul", -"Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'.", -"Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)", -"Fisierul '%-.80s' exista deja", -"Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld", -"Recorduri: %ld Duplicate: %ld", -"Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii", -"Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb", -"Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista", -"Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Id-ul: %lu thread-ului este necunoscut", -"Nu sinteti proprietarul threadului %lu", -"Nici o tabela folosita", -"Prea multe siruri pentru coloana %-.64s si SET", -"Nu pot sa generez un nume de log unic %-.64s.(1-999)\n", -"Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata", -"Tabela '%-.64s' nu a fost locked cu LOCK TABLES", -"Coloana BLOB '%-.64s' nu poate avea o valoare default", -"Numele bazei de date este incorect '%-.100s'", -"Numele tabelei este incorect '%-.100s'", -"SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay", -"Eroare unknown", -"Procedura unknown '%-.64s'", -"Procedura '%-.64s' are un numar incorect de parametri", -"Procedura '%-.64s' are parametrii incorecti", -"Tabla '%-.64s' invalida in %-.32s", -"Coloana '%-.64s' specificata de doua ori", -"Folosire incorecta a functiei group", -"Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL", -"O tabela trebuie sa aiba cel putin o coloana", -"Tabela '%-.64s' e plina", -"Set de caractere invalid: '%-.64s'", -"Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join", -"Prea multe coloane", -"Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri", -"Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare", -"Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON", -"Coloana '%-.64s' e folosita cu UNIQUE sau INDEX dar fara sa fie definita ca NOT NULL", -"Nu pot incarca functia '%-.64s'", -"Nu pot initializa functia '%-.64s'; %-.80s", -"Nici un paths nu e permis pentru o librarie shared", -"Functia '%-.64s' exista deja", -"Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.64s)", -"Nu pot gasi functia '%-.64s' in libraria", -"Functia '%-.64s' nu e definita", -"Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'", -"Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL", -"Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele", -"Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora", -"Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului", -"Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld", -"Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare", -"Numarul de coloane nu corespunde cu numarul de valori la linia %ld", -"Nu pot redeschide tabela: '%-.64s'", -"Folosirea unei value NULL e invalida", -"Eroarea '%-.64s' obtinuta din expresia regulara (regexp)", -"Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY", -"Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'", -"Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'", -"Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'", -"Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite.", -"Argumentul host-ului sau utilizatorului pentru GRANT e prea lung", -"Tabela '%-.64s.%-.64s' nu exista", -"Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'", -"Comanda folosita nu este permisa pentru aceasta versiune de MySQL", -"Aveti o eroare in sintaxa RSQL", -"Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s", -"Prea multe threaduri aminate care sint in uz", -"Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)", -"Un packet mai mare decit 'max_allowed_packet' a fost primit", -"Eroare la citire din cauza lui 'connection pipe'", -"Eroare obtinuta de la fcntl()", -"Packets care nu sint ordonati au fost gasiti", -"Nu s-a putut decompresa pachetul de comunicatie (communication packet)", -"Eroare obtinuta citind pachetele de comunicatie (communication packets)", -"Timeout obtinut citind pachetele de comunicatie (communication packets)", -"Eroare in scrierea pachetelor de comunicatie (communication packets)", -"Timeout obtinut scriind pachetele de comunicatie (communication packets)", -"Sirul rezultat este mai lung decit 'max_allowed_packet'", -"Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT", -"Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT", -"INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES", -"Nume increct de coloana '%-.100s'", -"Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'", -"Toate tabelele din tabela MERGE nu sint definite identic", -"Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'", -"Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita", -"Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb", -"Resultatul constista din mai multe linii", -"Aceast tip de tabela are nevoie de o cheie primara", -"Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt deleted file mode 100644 index 5b55f818fcc..00000000000 --- a/sql/share/russian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translation done in 2003 by Egor Egorov; Ensita.NET, http://www.ensita.net/ -*/ -/* charset: KOI8-R */ - -character-set=koi8r - -"hashchk", -"isamchk", -"îåô", -"äá", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ", -"ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.64s', ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ÚÁÐÉÓØ × ÓÉÓÔÅÍÎÏÊ ÔÁÂÌÉÃÅ", -"îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÏÐÒÅÄÅÌÉÔØ ÒÁÂÏÞÉÊ ËÁÔÁÌÏÇ (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÏÓÔÁ×ÉÔØ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÆÁÊÌÅ (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)", -"úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'", -"äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ...", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'", -"ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.64s' × '%-.64s' (ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)", -"'%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ", -"óÏÒÔÉÒÏ×ËÁ ÐÒÅÒ×ÁÎÁ", -"ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.64s'", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d ÏÔ ÏÂÒÁÂÏÔÞÉËÁ ÔÁÂÌÉÃ", -"ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ", -"îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.64s'", -"îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.64s'", -"îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.64s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ", -"óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!", -"ôÁÂÌÉÃÁ '%-.64s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ", -"îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ. ðÅÒÅÚÁÐÕÓÔÉÔÅ ÓÅÒ×ÅÒ É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ (ÎÕÖÎÏ %d ÂÁÊÔ)", -"îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ ÄÌÑ ÓÏÒÔÉÒÏ×ËÉ. õ×ÅÌÉÞØÔÅ ÒÁÚÍÅÒ ÂÕÆÅÒÁ ÓÏÒÔÉÒÏ×ËÉ ÎÁ ÓÅÒ×ÅÒÅ", -"îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÓÏÅÄÉÎÅÎÉÊ", -"îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ; ÕÄÏÓÔÏ×ÅÒØÔÅÓØ, ÞÔÏ mysqld ÉÌÉ ËÁËÏÊ-ÌÉÂÏ ÄÒÕÇÏÊ ÐÒÏÃÅÓÓ ÎÅ ÚÁÎÉÍÁÅÔ ×ÓÀ ÄÏÓÔÕÐÎÕÀ ÐÁÍÑÔØ. åÓÌÉ ÎÅÔ, ÔÏ ×Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ ulimit, ÞÔÏÂÙ ×ÙÄÅÌÉÔØ ÄÌÑ mysqld ÂÏÌØÛÅ ÐÁÍÑÔÉ, ÉÌÉ Õ×ÅÌÉÞÉÔØ ÏÂßÅÍ ÆÁÊÌÁ ÐÏÄËÁÞËÉ", -"îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÉÍÑ ÈÏÓÔÁ ÄÌÑ ×ÁÛÅÇÏ ÁÄÒÅÓÁ", -"îÅËÏÒÒÅËÔÎÏÅ ÐÒÉ×ÅÔÓÔ×ÉÅ", -"äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ", -"äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)", -"âÁÚÁ ÄÁÎÎÙÈ ÎÅ ×ÙÂÒÁÎÁ", -"îÅÉÚ×ÅÓÔÎÁÑ ËÏÍÁÎÄÁ ËÏÍÍÕÎÉËÁÃÉÏÎÎÏÇÏ ÐÒÏÔÏËÏÌÁ", -"óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL", -"îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.64s'", -"ôÁÂÌÉÃÁ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.175s'", -"óÔÏÌÂÅà '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ", -"óÅÒ×ÅÒ ÎÁÈÏÄÉÔÓÑ × ÐÒÏÃÅÓÓÅ ÏÓÔÁÎÏ×ËÉ", -"îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'", -"'%-.64s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.64s'", -"÷ÙÒÁÖÅÎÉÅ ÓÏÄÅÒÖÉÔ ÇÒÕÐÐÏ×ÙÅ ÆÕÎËÃÉÉ É ÓÔÏÌÂÃÙ, ÎÏ ÎÅ ×ËÌÀÞÁÅÔ GROUP BY. á ËÁË ×Ù ÕÍÕÄÒÉÌÉÓØ ÐÏÌÕÞÉÔØ ÜÔÏ ÓÏÏÂÝÅÎÉÅ Ï ÏÛÉÂËÅ?", -"ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ", -"óÌÉÛËÏÍ ÄÌÉÎÎÙÊ ÉÄÅÎÔÉÆÉËÁÔÏÒ '%-.100s'", -"äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.64s'", -"äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.64s'", -"äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ %d", -"îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s'", -"%s ÏËÏÌÏ '%-.80s' ÎÁ ÓÔÒÏËÅ %d", -"úÁÐÒÏÓ ÏËÁÚÁÌÓÑ ÐÕÓÔÙÍ", -"ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.64s'", -"îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.64s'", -"õËÁÚÁÎÏ ÎÅÓËÏÌØËÏ ÐÅÒ×ÉÞÎÙÈ ËÌÀÞÅÊ", -"õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ËÌÀÞÅÊ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ËÌÀÞÅÊ", -"õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÞÁÓÔÅÊ ÓÏÓÔÁ×ÎÏÇÏ ËÌÀÞÁ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ÞÁÓÔÅÊ", -"õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d ÂÁÊÔ", -"ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ", -"óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ", -"óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ", -"îÅËÏÒÒÅËÔÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ: ÍÏÖÅÔ ÓÕÝÅÓÔ×Ï×ÁÔØ ÔÏÌØËÏ ÏÄÉÎ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÊ ÓÔÏÌÂÅÃ, É ÏÎ ÄÏÌÖÅÎ ÂÙÔØ ÏÐÒÅÄÅÌÅÎ ËÁË ËÌÀÞ", -"%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d", -"%s: ëÏÒÒÅËÔÎÁÑ ÏÓÔÁÎÏ×ËÁ\n", -"%s: ðÏÌÕÞÅÎ ÓÉÇÎÁÌ %d. ðÒÅËÒÁÝÁÅÍ!\n", -"%s: ïÓÔÁÎÏ×ËÁ ÚÁ×ÅÒÛÅÎÁ\n", -"%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ IP-ÓÏËÅÔ", -"÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï", -"áÒÇÕÍÅÎÔ ÒÁÚÄÅÌÉÔÅÌÑ ÐÏÌÅÊ - ÎÅ ÔÏÔ, ËÏÔÏÒÙÊ ÏÖÉÄÁÌÓÑ. ïÂÒÁÝÁÊÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ", -"æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ, ÐÒÉÍÅÎÑÊÔÅ 'fields terminated by'", -"æÁÊÌ '%-.64s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ", -"æÁÊÌ '%-.80s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"úÁÐÉÓÅÊ: %ld õÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld", -"úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld", -"îÅËÏÒÒÅËÔÎÁÑ ÞÁÓÔØ ËÌÀÞÁ. éÓÐÏÌØÚÕÅÍÁÑ ÞÁÓÔØ ËÌÀÞÁ ÎÅ Ñ×ÌÑÅÔÓÑ ÓÔÒÏËÏÊ, ÕËÁÚÁÎÎÁÑ ÄÌÉÎÁ ÂÏÌØÛÅ, ÞÅÍ ÄÌÉÎÁ ÞÁÓÔÉ ËÌÀÞÁ, ÉÌÉ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÕÎÉËÁÌØÎÙÅ ÞÁÓÔÉ ËÌÀÞÁ", -"îÅÌØÚÑ ÕÄÁÌÉÔØ ×ÓÅ ÓÔÏÌÂÃÙ Ó ÐÏÍÏÝØÀ ALTER TABLE. éÓÐÏÌØÚÕÊÔÅ DROP TABLE", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.64s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ", -"úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld", -"îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.64s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ", -"îÅÉÚ×ÅÓÔÎÙÊ ÎÏÍÅÒ ÐÏÔÏËÁ: %lu", -"÷Ù ÎÅ Ñ×ÌÑÅÔÅÓØ ×ÌÁÄÅÌØÃÅÍ ÐÏÔÏËÁ %lu", -"îÉËÁËÉÅ ÔÁÂÌÉÃÙ ÎÅ ÉÓÐÏÌØÚÏ×ÁÎÙ", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.64s.(1-999)\n", -"ôÁÂÌÉÃÁ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ", -"ôÁÂÌÉÃÁ '%-.64s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES", -"îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.64s'", -"îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÂÁÚÙ ÄÁÎÎÙÈ '%-.100s'", -"îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÔÁÂÌÉÃÙ '%-.100s'", -"äÌÑ ÔÁËÏÊ ×ÙÂÏÒËÉ SELECT ÄÏÌÖÅÎ ÂÕÄÅÔ ÐÒÏÓÍÏÔÒÅÔØ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÚÁÐÉÓÅÊ É, ×ÉÄÉÍÏ, ÜÔÏ ÚÁÊÍÅÔ ÏÞÅÎØ ÍÎÏÇÏ ×ÒÅÍÅÎÉ. ðÒÏ×ÅÒØÔÅ ×ÁÛÅ ÕËÁÚÁÎÉÅ WHERE, É, ÅÓÌÉ × ÎÅÍ ×ÓÅ × ÐÏÒÑÄËÅ, ÕËÁÖÉÔÅ SET SQL_BIG_SELECTS=1", -"îÅÉÚ×ÅÓÔÎÁÑ ÏÛÉÂËÁ", -"îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'", -"îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'", -"îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'", -"îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s", -"óÔÏÌÂÅà '%-.64s' ÕËÁÚÁÎ Ä×ÁÖÄÙ", -"îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÇÒÕÐÐÏ×ÙÈ ÆÕÎËÃÉÊ", -"÷ ÔÁÂÌÉÃÅ '%-.64s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL", -"÷ ÔÁÂÌÉÃÅ ÄÏÌÖÅÎ ÂÙÔØ ËÁË ÍÉÎÉÍÕÍ ÏÄÉÎ ÓÔÏÌÂÅÃ", -"ôÁÂÌÉÃÁ '%-.64s' ÐÅÒÅÐÏÌÎÅÎÁ", -"îÅÉÚ×ÅÓÔÎÁÑ ËÏÄÉÒÏ×ËÁ '%-.64s'", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÔÁÂÌÉÃ. MySQL ÍÏÖÅÔ ÉÓÐÏÌØÚÏ×ÁÔØ ÔÏÌØËÏ %d ÔÁÂÌÉÃ × ÓÏÅÄÉÎÅÎÉÉ", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÓÔÏÌÂÃÏ×", -"óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %d. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB", -"óÔÅË ÐÏÔÏËÏ× ÐÅÒÅÐÏÌÎÅÎ: ÉÓÐÏÌØÚÏ×ÁÎÏ: %ld ÉÚ %ld ÓÔÅËÁ. ðÒÉÍÅÎÑÊÔÅ 'mysqld -O thread_stack=#' ÄÌÑ ÕËÁÚÁÎÉÑ ÂÏÌØÛÅÇÏ ÒÁÚÍÅÒÁ ÓÔÅËÁ, ÅÓÌÉ ÎÅÏÂÈÏÄÉÍÏ", -"÷ OUTER JOIN ÏÂÎÁÒÕÖÅÎÁ ÐÅÒÅËÒÅÓÔÎÁÑ ÚÁ×ÉÓÉÍÏÓÔØ. ÷ÎÉÍÁÔÅÌØÎÏ ÐÒÏÁÎÁÌÉÚÉÒÕÊÔÅ Ó×ÏÉ ÕÓÌÏ×ÉÑ ON", -"óÔÏÌÂÅà '%-.64s' ÉÓÐÏÌØÚÕÅÔÓÑ × UNIQUE ÉÌÉ × INDEX, ÎÏ ÎÅ ÏÐÒÅÄÅÌÅÎ ËÁË NOT NULL", -"îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.64s'", -"îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.64s'; %-.80s", -"îÅÄÏÐÕÓÔÉÍÏ ÕËÁÚÙ×ÁÔØ ÐÕÔÉ ÄÌÑ ÄÉÎÁÍÉÞÅÓËÉÈ ÂÉÂÌÉÏÔÅË", -"æÕÎËÃÉÑ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.64s' (ÏÛÉÂËÁ: %d %-.64s)", -"îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÆÕÎËÃÉÀ '%-.64s' × ÂÉÂÌÉÏÔÅËÅ", -"æÕÎËÃÉÑ '%-.64s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ", -"èÏÓÔ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÉÚ-ÚÁ ÓÌÉÛËÏÍ ÂÏÌØÛÏÇÏ ËÏÌÉÞÅÓÔ×Á ÏÛÉÂÏË ÓÏÅÄÉÎÅÎÉÑ. òÁÚÂÌÏËÉÒÏ×ÁÔØ ÅÇÏ ÍÏÖÎÏ Ó ÐÏÍÏÝØÀ 'mysqladmin flush-hosts'", -"èÏÓÔÕ '%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÐÏÄËÌÀÞÁÔØÓÑ Ë ÜÔÏÍÕ ÓÅÒ×ÅÒÕ MySQL", -"÷Ù ÉÓÐÏÌØÚÕÅÔÅ MySQL ÏÔ ÉÍÅÎÉ ÁÎÏÎÉÍÎÏÇÏ ÐÏÌØÚÏ×ÁÔÅÌÑ, Á ÁÎÏÎÉÍÎÙÍ ÐÏÌØÚÏ×ÁÔÅÌÑÍ ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÍÅÎÑÔØ ÐÁÒÏÌÉ", -"äÌÑ ÔÏÇÏ ÞÔÏÂÙ ÉÚÍÅÎÑÔØ ÐÁÒÏÌÉ ÄÒÕÇÉÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ, Õ ×ÁÓ ÄÏÌÖÎÙ ÂÙÔØ ÐÒÉ×ÉÌÅÇÉÉ ÎÁ ÉÚÍÅÎÅÎÉÅ ÔÁÂÌÉÃ × ÂÁÚÅ ÄÁÎÎÙÈ mysql", -"îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÄÈÏÄÑÝÕÀ ÚÁÐÉÓØ × ÔÁÂÌÉÃÅ ÐÏÌØÚÏ×ÁÔÅÌÅÊ", -"óÏ×ÐÁÌÏ ÚÁÐÉÓÅÊ: %ld éÚÍÅÎÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÎÏ×ÙÊ ÐÏÔÏË (ÏÛÉÂËÁ %d). åÓÌÉ ÜÔÏ ÎÅ ÓÉÔÕÁÃÉÑ, Ó×ÑÚÁÎÎÁÑ Ó ÎÅÈ×ÁÔËÏÊ ÐÁÍÑÔÉ, ÔÏ ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÕÞÉÔØ ÄÏËÕÍÅÎÔÁÃÉÀ ÎÁ ÐÒÅÄÍÅÔ ÏÐÉÓÁÎÉÑ ×ÏÚÍÏÖÎÏÊ ÏÛÉÂËÉ ÒÁÂÏÔÙ × ËÏÎËÒÅÔÎÏÊ ïó", -"ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ × ÚÁÐÉÓÉ %ld", -"îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.64s'", -"îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ×ÅÌÉÞÉÎÙ NULL", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ '%-.64s' ÏÔ ÒÅÇÕÌÑÒÎÏÇÏ ×ÙÒÁÖÅÎÉÑ", -"ïÄÎÏ×ÒÅÍÅÎÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÈ (GROUP) ÓÔÏÌÂÃÏ× (MIN(),MAX(),COUNT(),...) Ó ÎÅÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÍÉ ÓÔÏÌÂÃÁÍÉ Ñ×ÌÑÅÔÓÑ ÎÅËÏÒÒÅËÔÎÙÍ, ÅÓÌÉ × ×ÙÒÁÖÅÎÉÉ ÅÓÔØ GROUP BY", -"ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ÈÏÓÔÅ '%-.64s'", -"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'", -"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'", -"îÅ×ÅÒÎÁÑ ËÏÍÁÎÄÁ GRANT ÉÌÉ REVOKE. ïÂÒÁÔÉÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ, ÞÔÏÂÙ ×ÙÑÓÎÉÔØ, ËÁËÉÅ ÐÒÉ×ÉÌÅÇÉÉ ÍÏÖÎÏ ÉÓÐÏÌØÚÏ×ÁÔØ", -"óÌÉÛËÏÍ ÄÌÉÎÎÏÅ ÉÍÑ ÐÏÌØÚÏ×ÁÔÅÌÑ/ÈÏÓÔÁ ÄÌÑ GRANT", -"ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ", -"ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'", -"üÔÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÐÕÓËÁÅÔÓÑ × ÄÁÎÎÏÊ ×ÅÒÓÉÉ MySQL", -"õ ×ÁÓ ÏÛÉÂËÁ × ÚÁÐÒÏÓÅ. éÚÕÞÉÔÅ ÄÏËÕÍÅÎÔÁÃÉÀ ÐÏ ÉÓÐÏÌØÚÕÅÍÏÊ ×ÅÒÓÉÉ MySQL ÎÁ ÐÒÅÄÍÅÔ ËÏÒÒÅËÔÎÏÇÏ ÓÉÎÔÁËÓÉÓÁ", -"ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.64s", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÐÏÔÏËÏ×, ÏÂÓÌÕÖÉ×ÁÀÝÉÈ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert)", -"ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)", -"ðÏÌÕÞÅÎÎÙÊ ÐÁËÅÔ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ ÏÔ ÐÏÔÏËÁ ÓÏÅÄÉÎÅÎÉÑ (connection pipe)", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÏÔ fcntl()", -"ðÁËÅÔÙ ÐÏÌÕÞÅÎÙ × ÎÅ×ÅÒÎÏÍ ÐÏÒÑÄËÅ", -"îÅ×ÏÚÍÏÖÎÏ ÒÁÓÐÁËÏ×ÁÔØ ÐÁËÅÔ, ÐÏÌÕÞÅÎÎÙÊ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÐÒÉ ÐÅÒÅÄÁÞÅ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ × ÐÒÏÃÅÓÓÅ ÐÅÒÅÄÁÞÉ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"òÅÚÕÌØÔÉÒÕÀÝÁÑ ÓÔÒÏËÁ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'", -"éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÔÉÐÙ BLOB/TEXT", -"éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÅ ÓÔÏÌÂÃÙ", -"îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES", -"îÅ×ÅÒÎÏÅ ÉÍÑ ÓÔÏÌÂÃÁ '%-.100s'", -"éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.64s'", -"îÅ ×ÓÅ ÔÁÂÌÉÃÙ × MERGE ÏÐÒÅÄÅÌÅÎÙ ÏÄÉÎÁËÏ×Ï", -"îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.64s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ", -"óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ", -"÷ÓÅ ÞÁÓÔÉ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ (PRIMARY KEY) ÄÏÌÖÎÙ ÂÙÔØ ÏÐÒÅÄÅÌÅÎÙ ËÁË NOT NULL; åÓÌÉ ×ÁÍ ÎÕÖÎÁ ÐÏÄÄÅÒÖËÁ ×ÅÌÉÞÉÎ NULL × ËÌÀÞÅ, ×ÏÓÐÏÌØÚÕÊÔÅÓØ ÉÎÄÅËÓÏÍ UNIQUE", -"÷ ÒÅÚÕÌØÔÁÔÅ ×ÏÚ×ÒÁÝÅÎÁ ÂÏÌÅÅ ÞÅÍ ÏÄÎÁ ÓÔÒÏËÁ", -"üÔÏÔ ÔÉÐ ÔÁÂÌÉÃÙ ÔÒÅÂÕÅÔ ÏÐÒÅÄÅÌÅÎÉÑ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ", -"üÔÁ ×ÅÒÓÉÑ MySQL ÓËÏÍÐÉÌÉÒÏ×ÁÎÁ ÂÅÚ ÐÏÄÄÅÒÖËÉ RAID", -"÷Ù ÒÁÂÏÔÁÅÔÅ × ÒÅÖÉÍÅ ÂÅÚÏÐÁÓÎÙÈ ÏÂÎÏ×ÌÅÎÉÊ (safe update mode) É ÐÏÐÒÏÂÏ×ÁÌÉ ÉÚÍÅÎÉÔØ ÔÁÂÌÉÃÕ ÂÅÚ ÉÓÐÏÌØÚÏ×ÁÎÉÑ ËÌÀÞÅ×ÏÇÏ ÓÔÏÌÂÃÁ × ÞÁÓÔÉ WHERE", -"ëÌÀÞ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.64s'", -"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ", -"ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÏÇÏ: %s", -"÷ÁÍ ÎÅ ÒÁÚÒÅÛÅÎÏ ×ÙÐÏÌÎÑÔØ ÜÔÕ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËÃÉÉ", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ COMMIT", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ ROLLBACK", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ FLUSH_LOGS", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ CHECKPOINT", -"ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ `%-.64s' (%-.64s)", -"ïÂÒÁÂÏÔÞÉË ÜÔÏÊ ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Ä×ÏÉÞÎÏÇÏ ÓÏÈÒÁÎÅÎÉÑ ÏÂÒÁÚÁ ÔÁÂÌÉÃÙ (dump)", -"ä×ÏÉÞÎÙÊ ÖÕÒÎÁÌ ÏÂÎÏ×ÌÅÎÉÑ ÚÁËÒÙÔ, ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ RESET MASTER", -"ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'", -"ïÛÉÂËÁ ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ: '%-.64s'", -"÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ", -"÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÚÁÐÉÓÉ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ", -"îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÌÎÏÔÅËÓÔÏ×ÙÊ (FULLTEXT) ÉÎÄÅËÓ, ÓÏÏÔ×ÅÔÓÔ×ÕÀÝÉÊ ÓÐÉÓËÕ ÓÔÏÌÂÃÏ×", -"îÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÕËÁÚÁÎÎÕÀ ËÏÍÁÎÄÕ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÐÒÉÓÕÔÓÔ×ÕÀÔ ÁËÔÉ×ÎÏ ÚÁÂÌÏËÉÒÏ×ÁÎÎÙÅ ÔÁÂÌÉÃÁ ÉÌÉ ÏÔËÒÙÔÁÑ ÔÒÁÎÚÁËÃÉÑ", -"îÅÉÚ×ÅÓÔÎÁÑ ÓÉÓÔÅÍÎÁÑ ÐÅÒÅÍÅÎÎÁÑ '%-.64s'", -"ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ", -"ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ", -"÷ÎÉÍÁÎÉÅ: ÐÏ ÎÅËÏÔÏÒÙÍ ÉÚÍÅÎÅÎÎÙÍ ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍ ÔÁÂÌÉÃÁÍ ÎÅ×ÏÚÍÏÖÎÏ ÂÕÄÅÔ ÐÒÏÉÚ×ÅÓÔÉ ÏÔËÁÔ ÔÒÁÎÚÁËÃÉÉ", -"ôÒÁÎÚÁËÃÉÉ, ×ËÌÀÞÁÀÝÅÊ ÂÏÌØÛÏÅ ËÏÌÉÞÅÓÔ×Ï ËÏÍÁÎÄ, ÐÏÔÒÅÂÏ×ÁÌÏÓØ ÂÏÌÅÅ ÞÅÍ 'max_binlog_cache_size' ÂÁÊÔ. õ×ÅÌÉÞØÔÅ ÜÔÕ ÐÅÒÅÍÅÎÎÕÀ ÓÅÒ×ÅÒÁ mysqld É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ", -"üÔÕ ÏÐÅÒÁÃÉÀ ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÐÒÉ ÒÁÂÏÔÁÀÝÅÍ ÐÏÔÏËÅ ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ STOP SLAVE", -"äÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ ÔÒÅÂÕÅÔÓÑ ÒÁÂÏÔÁÀÝÉÊ ÐÏÄÞÉÎÅÎÎÙÊ ÓÅÒ×ÅÒ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ START SLAVE", -"üÔÏÔ ÓÅÒ×ÅÒ ÎÅ ÎÁÓÔÒÏÅÎ ËÁË ÐÏÄÞÉÎÅÎÎÙÊ. ÷ÎÅÓÉÔÅ ÉÓÐÒÁ×ÌÅÎÉÑ × ËÏÎÆÉÇÕÒÁÃÉÏÎÎÏÍ ÆÁÊÌÅ ÉÌÉ Ó ÐÏÍÏÝØÀ CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÐÏÔÏË ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. ðÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ", -"õ ÐÏÌØÚÏ×ÁÔÅÌÑ %-.64s ÕÖÅ ÂÏÌØÛÅ ÞÅÍ 'max_user_connections' ÁËÔÉ×ÎÙÈ ÓÏÅÄÉÎÅÎÉÊ", -"÷Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ × SET ÔÏÌØËÏ ËÏÎÓÔÁÎÔÎÙÅ ×ÙÒÁÖÅÎÉÑ", -"ôÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÂÌÏËÉÒÏ×ËÉ ÉÓÔÅË; ÐÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ", -"ïÂÝÅÅ ËÏÌÉÞÅÓÔ×Ï ÂÌÏËÉÒÏ×ÏË ÐÒÅ×ÙÓÉÌÏ ÒÁÚÍÅÒÙ ÔÁÂÌÉÃÙ ÂÌÏËÉÒÏ×ÏË", -"âÌÏËÉÒÏ×ËÉ ÏÂÎÏ×ÌÅÎÉÊ ÎÅÌØÚÑ ÐÏÌÕÞÉÔØ × ÐÒÏÃÅÓÓÅ ÞÔÅÎÉÑ ÎÅ ÐÒÉÎÑÔÏÊ (× ÒÅÖÉÍÅ READ UNCOMMITTED) ÔÒÁÎÚÁËÃÉÉ", -"îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ", -"îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ", -"îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s", -"'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ", -"îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ", -"÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ", -"éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×", -"îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÏÇÒÁÎÉÞÅÎÉÑ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ", -"îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÄÏÞÅÒÎÀÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÒÏÄÉÔÅÌØÓËÕÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ", -"ïÛÉÂËÁ ÓÏÅÄÉÎÅÎÉÑ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ: %-.128s", -"ïÛÉÂËÁ ×ÙÐÏÌÎÅÎÉÑ ÚÁÐÒÏÓÁ ÎÁ ÇÏÌÏ×ÎÏÍ ÓÅÒ×ÅÒÅ: %-.128s", -"ïÛÉÂËÁ ÐÒÉ ×ÙÐÏÌÎÅÎÉÉ ËÏÍÁÎÄÙ %s: %-.128s", -"îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ %s É %s", -"éÓÐÏÌØÚÏ×ÁÎÎÙÅ ÏÐÅÒÁÔÏÒÙ ×ÙÂÏÒËÉ (SELECT) ÄÁÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×", -"îÅ×ÏÚÍÏÖÎÏ ÉÓÐÏÌÎÉÔØ ÚÁÐÒÏÓ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÕÓÔÁÎÏ×ÌÅÎÙ ËÏÎÆÌÉËÔÕÀÝÉÅ ÂÌÏËÉÒÏ×ËÉ ÞÔÅÎÉÑ", -"éÓÐÏÌØÚÏ×ÁÎÉÅ ÔÒÁÎÚÁËÃÉÏÎÎÙÈ ÔÁÂÌÉà ÎÁÒÑÄÕ Ó ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍÉ ÚÁÐÒÅÝÅÎÏ", -"ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ", -"ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)", -"÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'", -"îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ", -"îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'", -"üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'", -"ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË", -"ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ", -"ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s", -"óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)", -"Every derived table must have its own alias", -"Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ", -"ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/serbian/errmsg.txt b/sql/share/serbian/errmsg.txt deleted file mode 100644 index e6f9abc412f..00000000000 --- a/sql/share/serbian/errmsg.txt +++ /dev/null @@ -1,314 +0,0 @@ -/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB - This file is public domain and comes with NO WARRANTY of any kind */ - -/* Serbian Translation, version 1.0: - Copyright 2002 Vladimir Kraljevic, vladimir_kraljevic@yahoo.com - This file is public domain and comes with NO WARRANTY of any kind. - Charset: cp1250 -*/ - -character-set=cp1250 - -"hashchk", -"isamchk", -"NE", -"DA", -"Ne mogu da kreiram file '%-.64s' (errno: %d)", -"Ne mogu da kreiram tabelu '%-.64s' (errno: %d)", -"Ne mogu da kreiram bazu '%-.64s' (errno: %d)", -"Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji.", -"Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji.", -"Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)", -"Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)", -"Greška pri brisanju '%-.64s' (errno: %d)", -"Ne mogu da proèitam slog iz sistemske tabele", -"Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)", -"Ne mogu da dobijem trenutni direktorijum (errno: %d)", -"Ne mogu da zakljuèam file (errno: %d)", -"Ne mogu da otvorim file: '%-.64s' (errno: %d)", -"Ne mogu da pronaðem file: '%-.64s' (errno: %d)", -"Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)", -"Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)", -"Slog je promenjen od zadnjeg èitanja tabele '%-.64s'", -"Disk je pun (%s). Èekam nekoga da doðe i oslobodi nešto mesta...", -"Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'", -"Greška pri zatvaranju '%-.64s' (errno: %d)", -"Greška pri èitanju file-a '%-.64s' (errno: %d)", -"Greška pri promeni imena '%-.64s' na '%-.64s' (errno: %d)", -"Greška pri upisu '%-.64s' (errno: %d)", -"'%-.64s' je zakljuèan za upis", -"Sortiranje je prekinuto", -"View '%-.64s' ne postoji za '%-.64s'", -"Handler tabela je vratio grešku %d", -"Handler tabela za '%-.64s' nema ovu opciju", -"Ne mogu da pronaðem slog u '%-.64s'", -"Pogrešna informacija u file-u: '%-.64s'", -"Pogrešan key file za tabelu: '%-.64s'; probajte da ga ispravite", -"Zastareo key file za tabelu '%-.64s'; ispravite ga", -"Tabelu '%-.64s' je dozvoljeno samo èitati", -"Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)", -"Nema memorije za sortiranje. Poveæajte velièinu sort buffer-a MySQL server-u", -"Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)", -"Previše konekcija", -"Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)", -"Ne mogu da dobijem ime host-a za vašu IP adresu", -"Loš poèetak komunikacije (handshake)", -"Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'", -"Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')", -"Ni jedna baza nije selektovana", -"Nepoznata komanda", -"Kolona '%-.64s' ne može biti NULL", -"Nepoznata baza '%-.64s'", -"Tabela '%-.64s' veæ postoji", -"Nepoznata tabela '%-.64s'", -"Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu", -"Gašenje servera je u toku", -"Nepoznata kolona '%-.64s' u '%-.64s'", -"Entitet '%-.64s' nije naveden u komandi 'GROUP BY'", -"Ne mogu da grupišem po '%-.64s'", -"Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme", -"Broj kolona ne odgovara broju vrednosti", -"Ime '%-.100s' je predugaèko", -"Duplirano ime kolone '%-.64s'", -"Duplirano ime kljuèa '%-.64s'", -"Dupliran unos '%-.64s' za kljuè '%d'", -"Pogrešan naziv kolone za kolonu '%-.64s'", -"'%s' u iskazu '%-.80s' na liniji %d", -"Upit je bio prazan", -"Tabela ili alias nisu bili jedinstveni: '%-.64s'", -"Loša default vrednost za '%-.64s'", -"Definisani višestruki primarni kljuèevi", -"Navedeno je previše kljuèeva. Maksimum %d kljuèeva je dozvoljeno", -"Navedeno je previše delova kljuèa. Maksimum %d delova je dozvoljeno", -"Navedeni kljuè je predug. Maksimalna dužina kljuèa je %d", -"Kljuèna kolona '%-.64s' ne postoji u tabeli", -"BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi", -"Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje", -"Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuèa", -"%s: Spreman za konekcije\n", -"%s: Normalno gašenje\n", -"%s: Dobio signal %d. Prekidam!\n", -"%s: Gašenje završeno\n", -"%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n", -"Ne mogu da kreiram IP socket", -"Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo", -"Argument separatora polja nije ono što se oèekivalo. Proverite uputstvo MySQL server-a", -"Ne možete koristiti fiksnu velièinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju.", -"File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa", -"File '%-.80s' veæ postoji", -"Slogova: %ld Izbrisano: %ld Preskoèeno: %ld Upozorenja: %ld", -"Slogova: %ld Duplikata: %ld", -"Pogrešan pod-kljuè dela kljuèa. Upotrebljeni deo kljuèa nije string, upotrebljena dužina je veæa od dela kljuèa ili handler tabela ne podržava jedinstvene pod-kljuèeve", -"Ne možete da izbrišete sve kolone pomoæu komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite", -"Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji", -"Slogova: %ld Duplikata: %ld Upozorenja: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Nepoznat thread identifikator: %lu", -"Vi niste vlasnik thread-a %lu", -"Nema upotrebljenih tabela", -"Previše string-ova za kolonu '%-.64s' i komandu 'SET'", -"Ne mogu da generišem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n", -"Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati", -"Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'", -"BLOB kolona '%-.64s' ne može imati default vrednost", -"Pogrešno ime baze '%-.100s'", -"Pogrešno ime tabele '%-.100s'", -"Komanda 'SELECT' æe ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu", -"Nepoznata greška", -"Nepoznata procedura '%-.64s'", -"Pogrešan broj parametara za proceduru '%-.64s'", -"Pogrešni parametri prosleðeni proceduri '%-.64s'", -"Nepoznata tabela '%-.64s' u '%-.32s'", -"Kolona '%-.64s' je navedena dva puta", -"Pogrešna upotreba 'GROUP' funkcije", -"Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a", -"Tabela mora imati najmanje jednu kolonu", -"Tabela '%-.64s' je popunjena do kraja", -"Nepoznati karakter-set: '%-.64s'", -"Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji", -"Previše kolona", -"Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB", -"Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veæi stack ako je potrebno", -"Unakrsna zavisnost pronaðena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove", -"Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'", -"Ne mogu da uèitam funkciju '%-.64s'", -"Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s", -"Ne postoje dozvoljene putanje do share-ovane biblioteke", -"Funkcija '%-.64s' veæ postoji", -"Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.64s)", -"Ne mogu da pronadjem funkciju '%-.64s' u biblioteci", -"Funkcija '%-.64s' nije definisana", -"Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoæu komande 'mysqladmin flush-hosts'", -"Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server", -"Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke", -"Morate imati privilegije da možete da update-ujete odreðene tabele ako želite da menjate lozinke za druge korisnike", -"Ne mogu da pronaðem odgovarajuæi slog u 'user' tabeli", -"Odgovarajuæih slogova: %ld Promenjeno: %ld Upozorenja: %ld", -"Ne mogu da kreiram novi thread (errno %d). Ako imate još slobodne memorije, trebali biste da pogledate u priruèniku da li je ovo specifièna greška vašeg operativnog sistema", -"Broj kolona ne odgovara broju vrednosti u slogu %ld", -"Ne mogu da ponovo otvorim tabelu '%-.64s'", -"Pogrešna upotreba vrednosti NULL", -"Funkcija regexp je vratila grešku '%-.64s'", -"Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz", -"Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'", -"%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'", -"%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'", -"Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruèniku koje vrednosti mogu biti upotrebljene.", -"Argument 'host' ili 'korisnik' prosleðen komandi 'GRANT' je predugaèak", -"Tabela '%-.64s.%-.64s' ne postoji", -"Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'", -"Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera", -"Imate grešku u vašoj SQL sintaksi", -"Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'", -"Previše prolongiranih thread-ova je u upotrebi", -"Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)", -"Primio sam mrežni paket veæi od definisane vrednosti 'max_allowed_packet'", -"Greška pri èitanju podataka sa pipe-a", -"Greška pri izvršavanju funkcije fcntl()", -"Primio sam mrežne pakete van reda", -"Ne mogu da dekompresujem mrežne pakete", -"Greška pri primanju mrežnih paketa", -"Vremenski limit za èitanje mrežnih paketa je istekao", -"Greška pri slanju mrežnih paketa", -"Vremenski limit za slanje mrežnih paketa je istekao", -"Rezultujuèi string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'", -"Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'", -"Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'", -"Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'", -"Pogrešno ime kolone '%-.100s'", -"Handler tabele ne može da indeksira kolonu '%-.64s'", -"Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti naèin", -"Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'", -"BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa", -"Svi delovi primarnog kljuèa moraju biti razlièiti od NULL; Ako Vam ipak treba NULL vrednost u kljuèu, upotrebite 'UNIQUE'", -"Rezultat je saèinjen od više slogova", -"Ovaj tip tabele zahteva da imate definisan primarni kljuè", -"Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID ureðaje", -"Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuèa", -"Kljuè '%-.64s' ne postoji u tabeli '%-.64s'", -"Ne mogu da otvorim tabelu", -"Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande", -"Nije Vam dozvoljeno da izvršite ovu komandu u transakciji", -"Greška %d za vreme izvršavanja komande 'COMMIT'", -"Greška %d za vreme izvršavanja komande 'ROLLBACK'", -"Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'", -"Greška %d za vreme izvršavanja komande 'CHECKPOINT'", -"Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: `%-.64s' (%-.64s)", -"Handler tabele ne podržava binarni dump tabele", -"Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'", -"Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela", -"Greška iz glavnog servera '%-.64s' u klasteru", -"Greška u primanju mrežnih paketa sa glavnog servera u klasteru", -"Greška u slanju mrežnih paketa na glavni server u klasteru", -"Ne mogu da pronaðem 'FULLTEXT' indeks koli odgovara listi kolona", -"Ne mogu da izvršim datu komandu zbog toga što su tabele zakljuèane ili je transakcija u toku", -"Nepoznata sistemska promenljiva '%-.64s'", -"Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena", -"Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela", -"Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'", -"Transakcija sa više stavki zahtevala je više od 'max_binlog_cache_size' bajtova skladišnog prostora. Uveæajte ovu promenljivu servera i pokušajte ponovo', -"Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server.", -"Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'", -"Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'", -"Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'", -"Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse", -"Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom", -"Možete upotrebiti samo konstantan iskaz sa komandom 'SET'", -"Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju", -"Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja", -"Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija", -"Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka", -"Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka", -"Pogrešni argumenti prosleðeni na %s", -"Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike", -"Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka", -"Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju", -"Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse", -"Ne mogu da dodam proveru spoljnog kljuèa", -"Ne mogu da dodam slog: provera spoljnog kljuèa je neuspela", -"Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela", -"Greška pri povezivanju sa glavnim serverom u klasteru: %-.128s", -"Greška pri izvršavanju upita na glavnom serveru u klasteru: %-.128s", -"Greška pri izvršavanju komande %s: %-.128s", -"Pogrešna upotreba %s i %s", -"Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona", -"Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu", -"Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno", -"Opcija '%s' je upotrebljena dva puta u istom iskazu", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updatable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working" -"The MySQL server is running with the %s option so it cannot execute this statement" -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt deleted file mode 100644 index f354aeab331..00000000000 --- a/sql/share/slovak/errmsg.txt +++ /dev/null @@ -1,329 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translated from both E n g l i s h & C z e c h error messages - by steve: (billik@sun.uniag.sk). - Encoding: ISO LATIN-8852-2 - Server version: 3.21.25-gamma - Date: Streda 11. November 1998 20:58:15 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NIE", -"Áno", -"Nemô¾em vytvori» súbor '%-.64s' (chybový kód: %d)", -"Nemô¾em vytvori» tabuµku '%-.64s' (chybový kód: %d)", -"Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)", -"Nemô¾em vytvori» databázu '%-.64s'; databáza existuje", -"Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje", -"Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)", -"Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)", -"Chyba pri mazaní '%-.64s' (chybový kód: %d)", -"Nemô¾em èíta» záznam v systémovej tabuµke", -"Nemô¾em zisti» stav '%-.64s' (chybový kód: %d)", -"Nemô¾em zisti» pracovný adresár (chybový kód: %d)", -"Nemô¾em zamknú» súbor (chybový kód: %d)", -"Nemô¾em otvori» súbor: '%-.64s' (chybový kód: %d)", -"Nemô¾em nájs» súbor: '%-.64s' (chybový kód: %d)", -"Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)", -"Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)", -"Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'", -"Disk je plný (%s), èakám na uvoµnenie miesta...", -"Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'", -"Chyba pri zatváraní '%-.64s' (chybový kód: %d)", -"Chyba pri èítaní súboru '%-.64s' (chybový kód: %d)", -"Chyba pri premenovávaní '%-.64s' na '%-.64s' (chybový kód: %d)", -"Chyba pri zápise do súboru '%-.64s' (chybový kód: %d)", -"'%-.64s' je zamknutý proti zmenám", -"Triedenie preru¹ené", -"Pohµad '%-.64s' neexistuje pre '%-.64s'", -"Obsluha tabuµky vrátila chybu %d", -"Obsluha tabuµky '%-.64s' nemá tento parameter", -"Nemô¾em nájs» záznam v '%-.64s'", -"Nesprávna informácia v súbore: '%-.64s'", -"Nesprávny kµúè pre tabuµku '%-.64s'; pokúste sa ho opravi»", -"Starý kµúèový súbor pre '%-.64s'; opravte ho!", -"'%-.64s' is èíta» only", -"Málo pamäti. Re¹tartujte daemona a skúste znova (je potrebných %d bytov)", -"Málo pamäti pre triedenie, zvý¹te veµkos» triediaceho bufferu", -"Neoèakávaný koniec súboru pri èítaní '%-.64s' (chybový kód: %d)", -"Príli¹ mnoho spojení", -"Málo miesta-pamäti pre vlákno", -"Nemô¾em zisti» meno hostiteµa pre va¹u adresu", -"Chyba pri nadväzovaní spojenia", -"Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'", -"Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)", -"Nebola vybraná databáza", -"Neznámy príkaz", -"Pole '%-.64s' nemô¾e by» null", -"Neznáma databáza '%-.64s'", -"Tabuµka '%-.64s' u¾ existuje", -"Neznáma tabuµka '%-.64s'", -"Pole: '%-.64s' v %-.64s je nejasné", -"Prebieha ukonèovanie práce servera", -"Neznáme pole '%-.64s' v '%-.64s'", -"Pou¾ité '%-.64s' nebolo v 'group by'", -"Nemô¾em pou¾i» 'group' na '%-.64s'", -"Príkaz obsahuje zároveò funkciu 'sum' a poµa", -"Poèet polí nezodpovedá zadanej hodnote", -"Meno identifikátora '%-.100s' je príli¹ dlhé", -"Opakované meno poµa '%-.64s'", -"Opakované meno kµúèa '%-.64s'", -"Opakovaný kµúè '%-.64s' (èíslo kµúèa %d)", -"Chyba v ¹pecifikácii poµa '%-.64s'", -"%s blízko '%-.80s' na riadku %d", -"Výsledok po¾iadavky bol prázdny", -"Nie jednoznaèná tabuµka/alias: '%-.64s'", -"Chybná implicitná hodnota pre '%-.64s'", -"Zadefinovaných viac primárnych kµúèov", -"Zadaných ríli¹ veµa kµúèov. Najviac %d kµúèov je povolených", -"Zadaných ríli¹ veµa èastí kµúèov. Je povolených najviac %d èastí", -"Zadaný kµúè je príli¹ dlhý, najväè¹ia då¾ka kµúèa je %d", -"Kµúèový ståpec '%-.64s' v tabuµke neexistuje", -"Blob pole '%-.64s' nemô¾e by» pou¾ité ako kµúè", -"Príli¹ veµká då¾ka pre pole '%-.64s' (maximum = %d). Pou¾ite BLOB", -"Mô¾ete ma» iba jedno AUTO pole a to musí by» definované ako kµúè", -"%s: pripravený na spojenie", -"%s: normálne ukonèenie\n", -"%s: prijatý signál %d, ukonèenie (Abort)!\n", -"%s: práca ukonèená\n", -"%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.64s'\n", -"Nemô¾em vytvori» IP socket", -"Tabuµka '%-.64s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova", -"Argument oddeµovaè polí nezodpovedá po¾iadavkám. Skontrolujte v manuáli", -"Nie je mo¾né pou¾i» fixnú då¾ku s BLOBom. Pou¾ite 'fields terminated by'.", -"Súbor '%-.64s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých", -"Súbor '%-.64s' u¾ existuje", -"Záznamov: %ld Zmazaných: %ld Preskoèených: %ld Varovania: %ld", -"Záznamov: %ld Opakovaných: %ld", -"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", -"One nemô¾em zmaza» all fields with ALTER TABLE; use DROP TABLE instead", -"Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe", -"Záznamov: %ld Opakovaných: %ld Varovania: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Neznáma identifikácia vlákna: %lu", -"Nie ste vlastníkom vlákna %lu", -"Nie je pou¾itá ¾iadna tabuµka", -"Príli¹ mnoho re»azcov pre pole %-.64s a SET", -"Nemô¾em vytvori» unikátne meno log-súboru %-.64s.(1-999)\n", -"Tabuµka '%-.64s' bola zamknutá s READ a nemô¾e by» zmenená", -"Tabuµka '%-.64s' nebola zamknutá s LOCK TABLES", -"Pole BLOB '%-.64s' nemô¾e ma» implicitnú hodnotu", -"Neprípustné meno databázy '%-.100s'", -"Neprípustné meno tabuµky '%-.100s'", -"Zadaná po¾iadavka SELECT by prechádzala príli¹ mnoho záznamov a trvala by príli¹ dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou¾ite SET SQL_BIG_SELECTS=1", -"Neznámá chyba", -"Neznámá procedúra '%-.64s'", -"Chybný poèet parametrov procedúry '%-.64s'", -"Chybné parametre procedúry '%-.64s'", -"Neznáma tabuµka '%-.64s' v %s", -"Pole '%-.64s' je zadané dvakrát", -"Nesprávne pou¾itie funkcie GROUP", -"Tabuµka '%-.64s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je", -"Tabuµka musí ma» aspoò 1 pole", -"Tabuµka '%-.64s' je plná", -"Neznáma znaková sada: '%-.64s'", -"Príli¹ mnoho tabuliek. MySQL mô¾e pou¾i» len %d v JOIN-e", -"Príli¹ mnoho polí", -"Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %d. Musíte zmeni» niektoré polo¾ky na BLOB", -"Preteèenie zásobníku vlákna: pou¾ité: %ld z %ld. Pou¾ite 'mysqld -O thread_stack=#' k zadaniu väè¹ieho zásobníka", -"V OUTER JOIN bol nájdený krí¾ový odkaz. Skontrolujte podmienky ON", -"Pole '%-.64s' je pou¾ité s UNIQUE alebo INDEX, ale nie je zadefinované ako NOT NULL", -"Nemô¾em naèíta» funkciu '%-.64s'", -"Nemô¾em inicializova» funkciu '%-.64s'; %-.80s", -"Neprípustné ¾iadne cesty k zdieµanej kni¾nici", -"Funkcia '%-.64s' u¾ existuje", -"Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %s)", -"Nemô¾em nájs» funkciu '%-.64s' v kni¾nici'", -"Funkcia '%-.64s' nie je definovaná", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run STOP SLAVE first", -"This operation requires a running slave, configure slave and do START SLAVE", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt deleted file mode 100644 index 8588d6e1cd4..00000000000 --- a/sql/share/spanish/errmsg.txt +++ /dev/null @@ -1,325 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Traduccion por Miguel Angel Fernandez Roiz -- LoboCom Sistemas, s.l. - From June 28, 2001 translated by Miguel Solorzano miguel@mysql.com */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"SI", -"No puedo crear archivo '%-.64s' (Error: %d)", -"No puedo crear tabla '%-.64s' (Error: %d)", -"No puedo crear base de datos '%-.64s' (Error: %d)", -"No puedo crear base de datos '%-.64s'; la base de datos ya existe", -"No puedo eliminar base de datos '%-.64s'; la base de datos no existe", -"Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)", -"Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)", -"Error en el borrado de '%-.64s' (Error: %d)", -"No puedo leer el registro en la tabla del sistema", -"No puedo obtener el estado de '%-.64s' (Error: %d)", -"No puedo acceder al directorio (Error: %d)", -"No puedo bloquear archivo: (Error: %d)", -"No puedo abrir archivo: '%-.64s' (Error: %d)", -"No puedo encontrar archivo: '%-.64s' (Error: %d)", -"No puedo leer el directorio de '%-.64s' (Error: %d)", -"No puedo cambiar al directorio de '%-.64s' (Error: %d)", -"El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'", -"Disco lleno (%s). Esperando para que se libere algo de espacio...", -"No puedo escribir, clave duplicada en la tabla '%-.64s'", -"Error en el cierre de '%-.64s' (Error: %d)", -"Error leyendo el fichero '%-.64s' (Error: %d)", -"Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)", -"Error escribiendo el archivo '%-.64s' (Error: %d)", -"'%-.64s' esta bloqueado contra cambios", -"Ordeancion cancelada", -"La vista '%-.64s' no existe para '%-.64s'", -"Error %d desde el manejador de la tabla", -"El manejador de la tabla de '%-.64s' no tiene esta opcion", -"No puedo encontrar el registro en '%-.64s'", -"Informacion erronea en el archivo: '%-.64s'", -"Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo", -"Clave de archivo antigua para la tabla '%-.64s'; reparelo!", -"'%-.64s' es de solo lectura", -"Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)", -"Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion", -"Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)", -"Demasiadas conexiones", -"Memoria/espacio de tranpaso insuficiente", -"No puedo obtener el nombre de maquina de tu direccion", -"Protocolo erroneo", -"Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'", -"Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)", -"Base de datos no seleccionada", -"Comando desconocido", -"La columna '%-.64s' no puede ser nula", -"Base de datos desconocida '%-.64s'", -"La tabla '%-.64s' ya existe", -"Tabla '%-.64s' desconocida", -"La columna: '%-.64s' en %s es ambigua", -"Desconexion de servidor en proceso", -"La columna '%-.64s' en %s es desconocida", -"Usado '%-.64s' el cual no esta group by", -"No puedo agrupar por '%-.64s'", -"El estamento tiene funciones de suma y columnas en el mismo estamento", -"La columna con count no tiene valores para contar", -"El nombre del identificador '%-.64s' es demasiado grande", -"Nombre de columna duplicado '%-.64s'", -"Nombre de clave duplicado '%-.64s'", -"Entrada duplicada '%-.64s' para la clave %d", -"Especificador de columna erroneo para la columna '%-.64s'", -"%s cerca '%-.64s' en la linea %d", -"La query estaba vacia", -"Tabla/alias: '%-.64s' es no unica", -"Valor por defecto invalido para '%-.64s'", -"Multiples claves primarias definidas", -"Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas", -"Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas", -"Declaracion de clave demasiado larga. La maxima longitud de clave es %d", -"La columna clave '%-.64s' no existe en la tabla", -"La columna Blob '%-.64s' no puede ser usada en una declaracion de clave", -"Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar", -"Puede ser solamente un campo automatico y este debe ser definido como una clave", -"%s: preparado para conexiones", -"%s: Apagado normal\n", -"%s: Recibiendo signal %d. Abortando!\n", -"%s: Apagado completado\n", -"%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n", -"No puedo crear IP socket", -"La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla", -"Los separadores de argumentos del campo no son los especificados. Comprueba el manual", -"No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '.", -"El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos", -"El archivo '%-.64s' ya existe", -"Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld", -"Registros: %ld Duplicados: %ld", -"Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave", -"No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo", -"No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe", -"Registros: %ld Duplicados: %ld Peligros: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Identificador del thread: %lu desconocido", -"Tu no eres el propietario del thread%lu", -"No ha tablas usadas", -"Muchas strings para columna %s y SET", -"No puede crear un unico archivo log %s.(1-999)\n", -"Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada", -"Tabla '%-.64s' no fue trabada con LOCK TABLES", -"Campo Blob '%-.64s' no puede tener valores patron", -"Nombre de base de datos ilegal '%-.64s'", -"Nombre de tabla ilegal '%-.64s'", -"El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto", -"Error desconocido", -"Procedimiento desconocido %s", -"Equivocado parametro count para procedimiento %s", -"Equivocados parametros para procedimiento %s", -"Tabla desconocida '%-.64s' in %s", -"Campo '%-.64s' especificado dos veces", -"Invalido uso de función en grupo", -"Tabla '%-.64s' usa una extensión que no existe en esta MySQL versión", -"Una tabla debe tener al menos 1 columna", -"La tabla '%-.64s' está llena", -"Juego de caracteres desconocido: '%-.64s'", -"Muchas tablas. MySQL solamente puede usar %d tablas en un join", -"Muchos campos", -"Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob", -"Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario", -"Dependencia cruzada encontrada en OUTER JOIN; examine su condición ON", -"Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL", -"No puedo cargar función '%-.64s'", -"No puedo inicializar función '%-.64s'; %-.80s", -"No pasos permitidos para librarias conjugadas", -"Función '%-.64s' ya existe", -"No puedo abrir libraria conjugada '%-.64s' (errno: %d %s)", -"No puedo encontrar función '%-.64s' en libraria'", -"Función '%-.64s' no está definida", -"Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mysqladmin flush-hosts'", -"Servidor '%-.64s' no está permitido para conectar con este servidor MySQL", -"Tu estás usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves", -"Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros", -"No puedo encontrar una línea correponsdiente en la tabla user", -"Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld", -"No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO", -"El número de columnas no corresponde al número en la línea %ld", -"No puedo reabrir tabla: '%-.64s", -"Invalido uso de valor NULL", -"Obtenido error '%-.64s' de regexp", -"Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY", -"No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'", -"%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'", -"%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'", -"Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados.", -"El argumento para servidor o usuario para GRANT es demasiado grande", -"Tabla '%-.64s.%s' no existe", -"No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'", -"El comando usado no es permitido con esta versión de MySQL", -"Algo está equivocado en su sintax", -"Thread de inserción retarda no pudiendo bloquear para la tabla %-.64s", -"Muchos threads retardados en uso", -"Conexión abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)", -"Obtenido un paquete mayor que 'max_allowed_packet'", -"Obtenido un error de lectura de la conexión pipe", -"Obtenido un error de fcntl()", -"Obtenido paquetes desordenados", -"No puedo descomprimir paquetes de comunicación", -"Obtenido un error leyendo paquetes de comunicación", -"Obtenido timeout leyendo paquetes de comunicación", -"Obtenido un error de escribiendo paquetes de comunicación", -"Obtenido timeout escribiendo paquetes de comunicación", -"La string resultante es mayor que 'max_allowed_packet'", -"El tipo de tabla usada no permite soporte para columnas BLOB/TEXT", -"El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT", -"INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES", -"Incorrecto nombre de columna '%-.100s'", -"El manipulador de tabla usado no puede indexar columna '%-.64s'", -"Todas las tablas en la MERGE tabla no estan definidas identicamente", -"No puedo escribir, debido al único constraint, para tabla '%-.64s'", -"Columna BLOB column '%-.64s' usada en especificación de clave sin tamaño de la clave", -"Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE", -"Resultado compuesto de mas que una línea", -"Este tipo de tabla necesita de una primary key", -"Esta versión de MySQL no es compilada con soporte RAID", -"Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna", -"Clave '%-.64s' no existe en la tabla '%-.64s'", -"No puedo abrir tabla", -"El manipulador de la tabla no permite soporte para %s", -"No tienes el permiso para ejecutar este comando en una transición", -"Obtenido error %d durante COMMIT", -"Obtenido error %d durante ROLLBACK", -"Obtenido error %d durante FLUSH_LOGS", -"Obtenido error %d durante CHECKPOINT", -"Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: `%-.64s' (%-.64s)", -"El manipulador de tabla no soporta dump para tabla binaria", -"Binlog cerrado mientras tentaba el FLUSH MASTER", -"Falla reconstruyendo el indice de la tabla dumped '%-.64s'", -"Error del master: '%-.64s'", -"Error de red leyendo del master", -"Error de red escribiendo para el master", -"No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas", -"No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa", -"Desconocida variable de sistema '%-.64s'", -"Tabla '%-.64s' está marcada como crashed y debe ser reparada", -"Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló", -"Aviso: Algunas tablas no transancionales no pueden tener rolled back", -"Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo", -"Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE", -"Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE", -"El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"No puedo crear el thread esclavo, verifique recursos del sistema", -"Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas", -"Tu solo debes usar expresiones constantes con SET", -"Tiempo de bloqueo de espera excedido", -"El número total de bloqueos excede el tamaño de bloqueo de la tabla", -"Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED", -"DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global", -"CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global", -"Argumentos errados para %s", -"'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios", -"Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos", -"Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición", -"El tipo de tabla usada no soporta índices FULLTEXT", -"No puede adicionar clave extranjera constraint", -"No puede adicionar una línea hijo: falla de clave extranjera constraint", -"No puede deletar una línea padre: falla de clave extranjera constraint", -"Error de coneccion a master: %-.128s", -"Error executando el query en master: %-.128s", -"Error de %s: %-.128s", -"Equivocado uso de %s y %s", -"El comando SELECT usado tiene diferente número de columnas", -"No puedo ejecutar el query porque usted tiene conflicto de traba de lectura", -"Mezla de transancional y no-transancional tablas está deshabilitada", -"Opción '%s' usada dos veces en el comando", -"Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)", -"Acceso negado. Usted necesita el privilegio %-.128s para esta operación", -"Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL", -"Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL", -"Variable '%-.64s' no tiene un valor patrón", -"Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'", -"Tipo de argumento equivocado para variable '%-.64s'", -"Variable '%-.64s' solamente puede ser configurada, no leída", -"Equivocado uso/colocación de '%s'", -"Esta versión de MySQL no soporta todavia '%s'", -"Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log", -"Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla", -"Variable '%-.64s' es una %s variable", -"Equivocada definición de llave extranjera para '%-.64s': %s", -"Referencia de llave y referencia de tabla no coinciden", -"Operando debe tener %d columna(s)", -"Subconsulta retorna mas que 1 línea", -"Desconocido preparado comando handler (%.*s) dado para %s", -"Base de datos Help está corrupto o no existe", -"Cíclica referencia en subconsultas", -"Convirtiendo columna '%s' de %s para %s", -"Referencia '%-.64s' no soportada (%s)", -"Cada tabla derivada debe tener su propio alias", -"Select %u fué reducido durante optimización", -"Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s", -"Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL", -"Todas las partes de una SPATIAL index deben ser NOT NULL", -"COLLATION '%s' no es válido para CHARACTER SET '%s'", -"Slave ya está funcionando", -"Slave ya fué parado", -"Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)", -"ZLIB: No suficiente memoria", -"ZLIB: No suficiente espacio en el búfer de salida (probablemente, extensión de datos descomprimidos fué corrompida)", -"ZLIB: Dato de entrada fué corrompido", -"%d línea(s) fueron cortadas por GROUP_CONCAT()", -"Línea %ld no contiene datos para todas las columnas", -"Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada", -"Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld", -"Datos truncados, fuera de gama para columna '%s' en la línea %ld", -"Datos truncados para columna '%s' en la línea %ld", -"Usando motor de almacenamiento %s para tabla '%s'", -"Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'", -"No puede remover uno o mas de los usuarios solicitados", -"No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados", -"Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'", -"Ilegal mezcla de collations para operación '%s'", -"Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)", -"Collation desconocida: '%-.64s'", -"Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado", -"Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato", -"Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d", -"Parametro equivocado o combinación de parametros para START SLAVE UNTIL", -"Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave", -"SQL thread no es inicializado tal que opciones UNTIL son ignoradas", -"Nombre de índice incorrecto '%-.100s'", -"Nombre de catalog incorrecto '%-.100s'", -"Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu", -"Columna '%-.64s' no puede ser parte de FULLTEXT index", -"Desconocida key cache '%-.100s'", -"MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar", -"Desconocido motor de tabla '%s'", -"'%s' está desaprobado, use '%s' en su lugar", -"La tabla destino %-.100s del %s no es actualizable", -"El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando", -"El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando", -"Columna '%-.100s' tiene valor doblado '%-.64s' en %s" -"Equivocado truncado %-.32s valor: '%-.128s'" -"Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" -"Inválido ON UPDATE cláusula para campo '%-.64s'", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt deleted file mode 100644 index b52ef77fbe9..00000000000 --- a/sql/share/swedish/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"YES", -"Kan inte skapa filen '%-.64s' (Felkod: %d)", -"Kan inte skapa tabellen '%-.64s' (Felkod: %d)", -"Kan inte skapa databasen '%-.64s' (Felkod: %d)", -"Databasen '%-.64s' existerar redan", -"Kan inte radera databasen '%-.64s'; databasen finns inte", -"Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)", -"Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)", -"Kan inte radera filen '%-.64s' (Felkod: %d)", -"Hittar inte posten i systemregistret", -"Kan inte läsa filinformationen (stat) från '%-.64s' (Felkod: %d)", -"Kan inte inte läsa aktivt bibliotek. (Felkod: %d)", -"Kan inte låsa filen. (Felkod: %d)", -"Kan inte använda '%-.64s' (Felkod: %d)", -"Hittar inte filen '%-.64s' (Felkod: %d)", -"Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)", -"Kan inte byta till '%-.64s' (Felkod: %d)", -"Posten har förändrats sedan den lästes i register '%-.64s'", -"Disken är full (%s). Väntar tills det finns ledigt utrymme...", -"Kan inte skriva, dubbel söknyckel i register '%-.64s'", -"Fick fel vid stängning av '%-.64s' (Felkod: %d)", -"Fick fel vid läsning av '%-.64s' (Felkod %d)", -"Kan inte byta namn från '%-.64s' till '%-.64s' (Felkod: %d)", -"Fick fel vid skrivning till '%-.64s' (Felkod %d)", -"'%-.64s' är låst mot användning", -"Sorteringen avbruten", -"Formulär '%-.64s' finns inte i '%-.64s'", -"Fick felkod %d från databashanteraren", -"Registrets databas har inte denna facilitet", -"Hittar inte posten", -"Felaktig fil: '%-.64s'", -"Fatalt fel vid hantering av register '%-.64s'; kör en reparation", -"Gammal nyckelfil '%-.64s'; reparera registret", -"'%-.64s' är skyddad mot förändring", -"Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)", -"Sorteringsbufferten räcker inte till. Kontrollera startparametrarna", -"Oväntat filslut vid läsning från '%-.64s' (Felkod: %d)", -"För många anslutningar", -"Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap", -"Kan inte hitta 'hostname' för din adress", -"Fel vid initiering av kommunikationen med klienten", -"Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s", -"Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)", -"Ingen databas i användning", -"Okänt commando", -"Kolumn '%-.64s' får inte vara NULL", -"Okänd databas: '%-.64s'", -"Tabellen '%-.64s' finns redan", -"Okänd tabell '%-.180s'", -"Kolumn '%-.64s' i %s är inte unik", -"Servern går nu ned", -"Okänd kolumn '%-.64s' i %s", -"'%-.64s' finns inte i GROUP BY", -"Kan inte använda GROUP BY med '%-.64s'", -"Kommandot har både sum functions och enkla funktioner", -"Antalet kolumner motsvarar inte antalet värden", -"Kolumnnamn '%-.64s' är för långt", -"Kolumnnamn '%-.64s finns flera gånger", -"Nyckelnamn '%-.64s' finns flera gånger", -"Dubbel nyckel '%-.64s' för nyckel %d", -"Felaktigt kolumntyp för kolumn '%-.64s'", -"%s nära '%-.64s' på rad %d", -"Frågan var tom", -"Icke unikt tabell/alias: '%-.64s'", -"Ogiltigt DEFAULT värde för '%-.64s'", -"Flera PRIMARY KEY använda", -"För många nycklar använda. Man får ha högst %d nycklar", -"För många nyckeldelar använda. Man får ha högst %d nyckeldelar", -"För lång nyckel. Högsta tillåtna nyckellängd är %d", -"Nyckelkolumn '%-.64s' finns inte", -"En BLOB '%-.64s' kan inte vara nyckel med den använda tabelltypen", -"För stor kolumnlängd angiven för '%-.64s' (max= %d). Använd en BLOB instället", -"Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel", -"%s: klar att ta emot klienter", -"%s: Normal avslutning\n", -"%s: Fick signal %d. Avslutar!\n", -"%s: Avslutning klar\n", -"%s: Stänger av tråd %ld; användare: '%-.64s'\n", -"Kan inte skapa IP-socket", -"Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen", -"Fältseparatorerna är vad som förväntades. Kontrollera mot manualen", -"Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'", -"Textfilen '%.64s' måste finnas i databasbiblioteket eller vara läsbar för alla", -"Filen '%-.64s' existerar redan", -"Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld", -"Rader: %ld Dubletter: %ld", -"Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden", -"Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället", -"Kan inte ta bort '%-.64s'. Kontrollera att fältet/nyckel finns", -"Rader: %ld Dubletter: %ld Varningar: %ld", -"INSERT-table '%-.64s' får inte finnas i FROM tabell-listan", -"Finns ingen tråd med id %lu", -"Du är inte ägare till tråd %lu", -"Inga tabeller angivna", -"För många alternativ till kolumn %s för SET", -"Kan inte generera ett unikt filnamn %s.(1-999)\n", -"Tabell '%-.64s' kan inte uppdateras emedan den är låst för läsning", -"Tabell '%-.64s' är inte låst med LOCK TABLES", -"BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde", -"Felaktigt databasnamn '%-.64s'", -"Felaktigt tabellnamn '%-.64s'", -"Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins", -"Oidentifierat fel", -"Okänd procedur: %s", -"Felaktigt antal parametrar till procedur %s", -"Felaktiga parametrar till procedur %s", -"Okänd tabell '%-.64s' i '%-.64s'", -"Fält '%-.64s' är redan använt", -"Felaktig användning av SQL grupp function", -"Tabell '%-.64s' har en extension som inte finns i denna version av MySQL", -"Tabeller måste ha minst 1 kolumn", -"Tabellen '%-.64s' är full", -"Okänd teckenuppsättning: '%-.64s'", -"För många tabeller. MySQL can ha högst %d tabeller i en och samma join", -"För många fält", -"För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %d. Ändra några av dina fält till BLOB", -"Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld -O thread_stack=#' ifall du behöver en större stack", -"Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket", -"Kolumn '%-.32s' är använd med UNIQUE eller INDEX men är inte definerad med NOT NULL", -"Kan inte ladda funktionen '%-.64s'", -"Kan inte initialisera funktionen '%-.64s'; '%-.80s'", -"Man får inte ange sökväg för dynamiska bibliotek", -"Funktionen '%-.64s' finns redan", -"Kan inte öppna det dynamiska biblioteket '%-.64s' (Felkod: %d %s)", -"Hittar inte funktionen '%-.64s' in det dynamiska biblioteket", -"Funktionen '%-.64s' är inte definierad", -"Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna", -"Denna dator, '%-.64s', har inte privileger att använda denna MySQL server", -"Du använder MySQL som en anonym användare och som sådan får du inte ändra ditt lösenord", -"För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen", -"Hittade inte användaren i 'user'-tabellen", -"Rader: %ld Uppdaterade: %ld Varningar: %ld", -"Kan inte skapa en ny tråd (errno %d)", -"Antalet kolumner motsvarar inte antalet värden på rad: %ld", -"Kunde inte stänga och öppna tabell '%-.64s", -"Felaktig använding av NULL", -"Fick fel '%-.64s' från REGEXP", -"Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del", -"Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s'", -"%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'", -"%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'", -"Felaktigt GRANT-privilegium använt", -"Felaktigt maskinnamn eller användarnamn använt med GRANT", -"Det finns ingen tabell som heter '%-.64s.%s'", -"Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s' för tabell '%-.64s'", -"Du kan inte använda detta kommando med denna MySQL version", -"Du har något fel i din syntax", -"DELAYED INSERT-tråden kunde inte låsa tabell '%-.64s'", -"Det finns redan 'max_delayed_threads' trådar i använding", -"Avbröt länken för tråd %ld till db '%-.64s', användare '%-.64s' (%s)", -"Kommunkationspaketet är större än 'max_allowed_packet'", -"Fick läsfel från klienten vid läsning från 'PIPE'", -"Fick fatalt fel från 'fcntl()'", -"Kommunikationspaketen kom i fel ordning", -"Kunde inte packa up kommunikationspaketet", -"Fick ett fel vid läsning från klienten", -"Fick 'timeout' vid läsning från klienten", -"Fick ett fel vid skrivning till klienten", -"Fick 'timeout' vid skrivning till klienten", -"Resultatsträngen är längre än 'max_allowed_packet'", -"Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner", -"Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner", -"INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES", -"Felaktigt kolumnnamn '%-.100s'", -"Den använda tabelltypen kan inte indexera kolumn '%-.64s'", -"Tabellerna i MERGE-tabellen är inte identiskt definierade", -"Kan inte skriva till tabell '%-.64s'; UNIQUE-test", -"Du har inte angett någon nyckellängd för BLOB '%-.64s'", -"Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället", -"Resultet bestod av mera än en rad", -"Denna tabelltyp kräver en PRIMARY KEY", -"Denna version av MySQL är inte kompilerad med RAID", -"Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel", -"Nyckel '%-.64s' finns inte in tabell '%-.64s'", -"Kan inte öppna tabellen", -"Tabellhanteraren för denna tabell kan inte göra %s", -"Du får inte utföra detta kommando i en transaktion", -"Fick fel %d vid COMMIT", -"Fick fel %d vid ROLLBACK", -"Fick fel %d vid FLUSH_LOGS", -"Fick fel %d vid CHECKPOINT", -"Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)", -"Tabellhanteraren klarar inte en binär kopiering av tabellen", -"Binärloggen stängdes medan FLUSH MASTER utfördes", -"Failed rebuilding the index of dumped table '%-.64s'", -"Fick en master: '%-.64s'", -"Fick nätverksfel vid läsning från master", -"Fick nätverksfel vid skrivning till master", -"Hittar inte ett FULLTEXT-index i kolumnlistan", -"Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion", -"Okänd systemvariabel: '%-.64s'", -"Tabell '%-.64s' är trasig och bör repareras med REPAIR TABLE", -"Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades", -"Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK", -"Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt", -"Denna operation kan inte göras under replikering; Gör STOP SLAVE först", -"Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE", -"Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO", -"Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information", -"Kunde inte starta en tråd för replikering", -"Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar", -"Man kan endast använda konstantuttryck med SET", -"Fick inte ett lås i tid ; Försök att starta om transaktionen", -"Antal lås överskrider antalet reserverade lås", -"Updateringslås kan inte göras när man använder READ UNCOMMITTED", -"DROP DATABASE är inte tillåtet när man har ett globalt läslås", -"CREATE DATABASE är inte tillåtet när man har ett globalt läslås", -"Felaktiga argument till %s", -"'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare", -"Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas", -"Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen", -"Tabelltypen har inte hantering av FULLTEXT-index", -"Kan inte lägga till 'FOREIGN KEY constraint'", -"FOREIGN KEY-konflikt: Kan inte skriva barn", -"FOREIGN KEY-konflikt: Kan inte radera fader", -"Fick fel vid anslutning till master: %-.128s", -"Fick fel vid utförande av command på mastern: %-.128s", -"Fick fel vid utförande av %s: %-.128s", -"Felaktig använding av %s and %s", -"SELECT-kommandona har olika antal kolumner", -"Kan inte utföra kommandot emedan du har ett READ-lås", -"Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat", -"Option '%s' användes två gånger", -"Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)", -"Du har inte privlegiet '%-.128s' som behövs för denna operation", -"Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL", -"Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL", -"Variabel '%-.64s' har inte ett DEFAULT-värde", -"Variabel '%-.64s' kan inte sättas till '%-.64s'", -"Fel typ av argument till variabel '%-.64s'", -"Variabeln '%-.64s' kan endast sättas, inte läsas", -"Fel använding/placering av '%s'", -"Denna version av MySQL kan ännu inte utföra '%s'", -"Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen", -"Slav SQL tråden ignorerade frågan pga en replicate-*-table regel", -"Variabel '%-.64s' är av typ %s", -"Felaktig FOREIGN KEY-definition för '%-.64s': %s", -"Nyckelreferensen och tabellreferensen stämmer inte överens", -"Operand should contain %d column(s)", -"Subquery returnerade mer än 1 rad", -"Okänd PREPARED STATEMENT id (%.*s) var given till %s", -"Hjälpdatabasen finns inte eller är skadad", -"Cyklisk referens i subqueries", -"Konvertar kolumn '%s' från %s till %s", -"Referens '%-.64s' stöds inte (%s)", -"Varje 'derived table' måste ha sitt eget alias", -"Select %u reducerades vid optimiering", -"Tabell '%-.64s' från en SELECT kan inte användas i %-.32s", -"Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet.", -"Alla delar av en SPATIAL index måste vara NOT NULL", -"COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'", -"Slaven har redan startat", -"Slaven har redan stoppat", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d rad(er) kapades av GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Använder handler %s för tabell '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Felaktigt index namn '%-.100s'", -"Felaktigt katalog namn '%-.100s'", -"Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu", -"Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index", -"Okänd nyckel cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"Tabel %-.100s använd med '%s' är inte uppdateringsbar", -"'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad", -"MySQL är startad med --skip-grant-tables. Pga av detta kan du inte använda detta kommando", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Fick felkod %d '%-.100s' från %s", -"Fick tilfällig felkod %d '%-.100s' från %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/share/ukrainian/errmsg.txt b/sql/share/ukrainian/errmsg.txt deleted file mode 100644 index feb23ada3dd..00000000000 --- a/sql/share/ukrainian/errmsg.txt +++ /dev/null @@ -1,327 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Ukrainian translation by Roman Festchook <roma@orta.zt.ua> - * Encoding: KOI8-U - * Version: 13/09/2001 mysql-3.23.41 - */ - -character-set=koi8u - -"hashchk", -"isamchk", -"î¶", -"ôáë", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.64s', ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÞÉÔÁÔÉ ÚÁÐÉÓ Ú ÓÉÓÔÅÍÎϧ ÔÁÂÌÉæ", -"îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ÒÏÂÏÞÕ ÔÅËÕ (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÁÂÌÏËÕ×ÁÔÉ ÆÁÊÌ (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.64s'", -"äÉÓË ÚÁÐÏ×ÎÅÎÉÊ (%s). ÷ÉÞÉËÕÀ, ÄÏËÉ Ú×¦ÌØÎÉÔØÓÑ ÔÒÏÈÉ Í¦ÓÃÑ...", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.64s'", -"îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.64s' Õ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"'%-.64s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ", -"óÏÒÔÕ×ÁÎÎÑ ÐÅÒÅÒ×ÁÎÏ", -"÷ÉÇÌÑÄ '%-.64s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.64s'", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d ×¦Ä ÄÅÓËÒÉÐÔÏÒÁ ÔÁÂÌÉæ", -"äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'", -"èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.64s'", -"èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ", -"óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏ×¦ÔØ ÊÏÇÏ!", -"ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ", -"âÒÁË ÐÁÍ'ÑÔ¦. òÅÓÔÁÒÔÕÊÔÅ ÓÅÒ×ÅÒ ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ (ÐÏÔÒ¦ÂÎÏ %d ÂÁÊÔ¦×)", -"âÒÁË ÐÁÍ'ÑÔ¦ ÄÌÑ ÓÏÒÔÕ×ÁÎÎÑ. ôÒÅÂÁ ÚÂ¦ÌØÛÉÔÉ ÒÏÚÍ¦Ò ÂÕÆÅÒÁ ÓÏÒÔÕ×ÁÎÎÑ Õ ÓÅÒ×ÅÒÁ", -"èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"úÁÂÁÇÁÔÏ Ú'¤ÄÎÁÎØ", -"âÒÁË ÐÁÍ'ÑÔ¦; ðÅÒÅצÒÔÅ ÞÉ mysqld ÁÂÏ ÑË¦ÓØ ¦ÎÛ¦ ÐÒÏÃÅÓÉ ×ÉËÏÒÉÓÔÏ×ÕÀÔØ ÕÓÀ ÄÏÓÔÕÐÎÕ ÐÁÍ'ÑÔØ. ñË Î¦, ÔÏ ×É ÍÏÖÅÔÅ ÓËÏÒÉÓÔÁÔÉÓÑ 'ulimit', ÁÂÉ ÄÏÚ×ÏÌÉÔÉ mysqld ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ Â¦ÌØÛÅ ÐÁÍ'ÑÔ¦ ÁÂÏ ×É ÍÏÖÅÔÅ ÄÏÄÁÔÉ Â¦ÌØÛŠͦÓÃÑ Ð¦Ä Ó×ÁÐ", -"îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ¦Í'Ñ ÈÏÓÔÕ ÄÌÑ ×ÁÛϧ ÁÄÒÅÓÉ", -"îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ", -"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'", -"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)", -"âÁÚÕ ÄÁÎÎÉÈ ÎÅ ×ÉÂÒÁÎÏ", -"îÅצÄÏÍÁ ËÏÍÁÎÄÁ", -"óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ", -"îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.64s'", -"ôÁÂÌÉÃÑ '%-.64s' ×ÖÅ ¦ÓÎÕ¤", -"îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.180s'", -"óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ", -"úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ", -"îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'", -"'%-.64s' ÎÅ ¤ Õ GROUP BY", -"îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.64s'", -"õ ×ÉÒÁÚ¦ ×ÉËÏÒÉÓÔÁÎÏ Ð¦ÄÓÕÍÏ×ÕÀÞ¦ ÆÕÎËæ§ ÐÏÒÑÄ Ú ¦ÍÅÎÁÍÉ ÓÔÏ×Âæ×", -"ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ", -"¶Í'Ñ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ", -"äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.64s'", -"äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'", -"äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ %d", -"îÅצÒÎÉÊ ÓÐÅÃÉÆ¦ËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'", -"%s ¦ÌÑ '%-.80s' × ÓÔÒÏæ %d", -"ðÕÓÔÉÊ ÚÁÐÉÔ", -"îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.64s'", -"îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.64s'", -"ðÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ ×ÉÚÎÁÞÅÎÏ ÎÅÏÄÎÏÒÁÚÏ×Ï", -"úÁÂÁÇÁÔÏ ËÌÀÞ¦× ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ËÌÀÞ¦×", -"úÁÂÁÇÁÔÏ ÞÁÓÔÉÎ ËÌÀÞÁ ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ÞÁÓÔÉÎ", -"úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁÊÂ¦ÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×", -"ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ", -"BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ", -"úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB", -"îÅצÒÎÅ ×ÉÚÎÁÞÅÎÎÑ ÔÁÂÌÉæ; íÏÖÅ ÂÕÔÉ ÌÉÛÅ ÏÄÉÎ Á×ÔÏÍÁÔÉÞÎÉÊ ÓÔÏ×ÂÅÃØ, ÝÏ ÐÏ×ÉÎÅÎ ÂÕÔÉ ×ÉÚÎÁÞÅÎÉÊ ÑË ËÌÀÞ", -"%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!", -"%s: îÏÒÍÁÌØÎÅ ÚÁ×ÅÒÛÅÎÎÑ\n", -"%s: ïÔÒÉÍÁÎÏ ÓÉÇÎÁÌ %d. ðÅÒÅÒÉ×ÁÀÓØ!\n", -"%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n", -"%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ IP ÒÏÚ'¤Í", -"ôÁÂÌÉÃÑ '%-.64s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ", -"èÉÂÎÉÊ ÒÏÚĦÌÀ×ÁÞ ÐÏ̦×. ðÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ", -"îÅ ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÓÔÁÌÕ ÄÏ×ÖÉÎÕ ÓÔÒÏËÉ Ú BLOB. úËÏÒÉÓÔÁÊÔÅÓÑ 'fields terminated by'", -"æÁÊÌ '%-.64s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È", -"æÁÊÌ '%-.80s' ×ÖÅ ¦ÓÎÕ¤", -"úÁÐÉÓ¦×: %ld ÷ÉÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld", -"úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld", -"îÅצÒÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ. ÷ÉËÏÒÉÓÔÁÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ ÎÅ ¤ ÓÔÒÏËÏÀ, ÚÁÄÏ×ÇÁ ÁÂÏ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ÕΦËÁÌØÎÉÈ ÞÁÓÔÉÎ ËÌÀÞÅÊ", -"îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE", -"îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤", -"úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld", -"ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM", -"îÅצÄÏÍÉÊ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒ Ç¦ÌËÉ: %lu", -"÷É ÎÅ ×ÏÌÏÄÁÒ Ç¦ÌËÉ %lu", -"îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ", -"úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET", -"îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.64s.(1-999)\n", -"ôÁÂÌÉÃÀ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ", -"ôÁÂÌÉÃÀ '%-.64s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES", -"óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ", -"îÅצÒÎÅ ¦Í'Ñ ÂÁÚÉ ÄÁÎÎÉÈ '%-.100s'", -"îÅצÒÎÅ ¦Í'Ñ ÔÁÂÌÉæ '%-.100s'", -"úÁÐÉÔÕ SELECT ÐÏÔÒ¦ÂÎÏ ÏÂÒÏÂÉÔÉ ÂÁÇÁÔÏ ÚÁÐÉÓ¦×, ÝÏ, ÐÅ×ÎÅ, ÚÁÊÍÅ ÄÕÖÅ ÂÁÇÁÔÏ ÞÁÓÕ. ðÅÒÅצÒÔÅ ×ÁÛÅ WHERE ÔÁ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ SET SQL_BIG_SELECTS=1, ÑËÝÏ ÃÅÊ ÚÁÐÉÔ SELECT ¤ צÒÎÉÍ", -"îÅצÄÏÍÁ ÐÏÍÉÌËÁ", -"îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'", -"èÉÂÎÁ Ë¦ÌØË¦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'", -"èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'", -"îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s", -"óÔÏ×ÂÅÃØ '%-.64s' ÚÁÚÎÁÞÅÎÏ Äצަ", -"èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÆÕÎËæ§ ÇÒÕÐÕ×ÁÎÎÑ", -"ôÁÂÌÉÃÑ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL", -"ôÁÂÌÉÃÑ ÐÏ×ÉÎÎÁ ÍÁÔÉ ÈÏÞÁ ÏÄÉÎ ÓÔÏ×ÂÅÃØ", -"ôÁÂÌÉÃÑ '%-.64s' ÚÁÐÏ×ÎÅÎÁ", -"îÅצÄÏÍÁ ËÏÄÏ×Á ÔÁÂÌÉÃÑ: '%-.64s'", -"úÁÂÁÇÁÔÏ ÔÁÂÌÉÃØ. MySQL ÍÏÖÅ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ %d ÔÁÂÌÉÃØ Õ ÏÂ'¤ÄÎÁÎΦ", -"úÁÂÁÇÁÔÏ ÓÔÏ×Âæ×", -"úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁÊÂ¦ÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %d. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB", -"óÔÅË Ç¦ÌÏË ÐÅÒÅÐÏ×ÎÅÎÏ: ÷ÉËÏÒÉÓÔÁÎÏ: %ld Ú %ld. ÷ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqld -O thread_stack=#' ÁÂÉ ÚÁÚÎÁÞÉÔÉ Â¦ÌØÛÉÊ ÓÔÅË, ÑËÝÏ ÎÅÏÂȦÄÎÏ", -"ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON", -"óÔÏ×ÂÅÃØ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ Ú UNIQUE ÁÂÏ INDEX, ÁÌÅ ÎÅ ×ÉÚÎÁÞÅÎÉÊ ÑË NOT NULL", -"îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.64s'", -"îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.64s'; %-.80s", -"îÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÐÕÔ¦ ÄÌÑ ÒÏÚĦÌÀ×ÁÎÉÈ Â¦Â̦ÏÔÅË", -"æÕÎËÃ¦Ñ '%-.64s' ×ÖÅ ¦ÓÎÕ¤", -"îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d %-.64s)", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÕÎËæÀ '%-.64s' Õ Â¦Â̦ÏÔÅæ'", -"æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ", -"èÏÓÔ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ú ÐÒÉÞÉÎÉ ×ÅÌÉËϧ Ë¦ÌØËÏÓÔ¦ ÐÏÍÉÌÏË Ú'¤ÄÎÁÎÎÑ. äÌÑ ÒÏÚÂÌÏËÕ×ÁÎÎÑ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqladmin flush-hosts'", -"èÏÓÔÕ '%-.64s' ÎÅ ÄÏ×ÏÌÅÎÏ Ú×'ÑÚÕ×ÁÔÉÓØ Ú ÃÉÍ ÓÅÒ×ÅÒÏÍ MySQL", -"÷É ×ÉËÏÒÉÓÔÏ×Õ¤ÔÅ MySQL ÑË ÁÎÏΦÍÎÉÊ ËÏÒÉÓÔÕ×ÁÞ, ÔÏÍÕ ×ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ÚͦÎÀ×ÁÔÉ ÐÁÒÏ̦", -"÷É ÐÏ×ÉΦ ÍÁÔÉ ÐÒÁ×Ï ÎÁ ÏÎÏ×ÌÅÎÎÑ ÔÁÂÌÉÃØ Õ ÂÁÚ¦ ÄÁÎÎÉÈ mysql, ÁÂÉ ÍÁÔÉ ÍÏÖÌÉצÓÔØ ÚͦÎÀ×ÁÔÉ ÐÁÒÏÌØ ¦ÎÛÉÍ", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ ×¦ÄÐÏצÄÎÉÈ ÚÁÐÉÓ¦× Õ ÔÁÂÌÉæ ËÏÒÉÓÔÕ×ÁÞÁ", -"úÁÐÉÓ¦× ×¦ÄÐÏצÄÁ¤: %ld úͦÎÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÎÏ×Õ Ç¦ÌËÕ (ÐÏÍÉÌËÁ %d). ñËÝÏ ×É ÎÅ ×ÉËÏÒÉÓÔÁÌÉ ÕÓÀ ÐÁÍ'ÑÔØ, ÔÏ ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÄÏ ×ÁÛϧ ïó - ÍÏÖÌÉ×Ï ÃÅ ÐÏÍÉÌËÁ ïó", -"ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ Õ ÓÔÒÏæ %ld", -"îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.64s'", -"èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÚÎÁÞÅÎÎÑ NULL", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ '%-.64s' ×¦Ä ÒÅÇÕÌÑÒÎÏÇÏ ×ÉÒÁÚÕ", -"úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY", -"ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s'", -"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", -"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", -"èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ; ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ", -"áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ", -"ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤", -"ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'", -"÷ÉËÏÒÉÓÔÏ×Õ×ÁÎÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL", -"õ ×ÁÓ ÐÏÍÉÌËÁ Õ ÓÉÎÔÁËÓÉÓ¦ SQL", -"ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s", -"úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ", -"ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)", -"ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö 'max_allowed_packet'", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()", -"ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ", -"îÅ ÍÏÖÕ ÄÅËÏÍÐÒÅÓÕ×ÁÔÉ ËÏÍÕΦËÁæÊÎÉÊ ÐÁËÅÔ", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö 'max_allowed_packet'", -"÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ", -"÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ", -"INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES", -"îÅצÒÎÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.100s'", -"÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.64s'", -"ôÁÂÌÉæ Õ MERGE TABLE ÍÁÀÔØ Ò¦ÚÎÕ ÓÔÒÕËÔÕÒÕ", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.64s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦", -"óÔÏ×ÂÅÃØ BLOB '%-.64s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ", -"õÓ¦ ÞÁÓÔÉÎÉ PRIMARY KEY ÐÏ×ÉÎΦ ÂÕÔÉ NOT NULL; ñËÝÏ ×É ÐÏÔÒÅÂÕ¤ÔÅ NULL Õ ËÌÀÞ¦, ÓËÏÒÉÓÔÁÊÔÅÓÑ UNIQUE", -"òÅÚÕÌØÔÁÔ ÚÎÁÈÏÄÉÔØÓÑ Õ Â¦ÌØÛÅ Î¦Ö ÏÄÎ¦Ê ÓÔÒÏæ", -"ãÅÊ ÔÉÐ ÔÁÂÌÉæ ÐÏÔÒÅÂÕ¤ ÐÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ", -"ãÑ ×ÅÒÓ¦Ñ MySQL ÎÅ ÚËÏÍÐ¦ÌØÏ×ÁÎÁ Ú Ð¦ÄÔÒÉÍËÏÀ RAID", -"÷É Õ ÒÅÖÉͦ ÂÅÚÐÅÞÎÏÇÏ ÏÎÏ×ÌÅÎÎÑ ÔÁ ÎÁÍÁÇÁ¤ÔÅÓØ ÏÎÏ×ÉÔÉ ÔÁÂÌÉÃÀ ÂÅÚ ÏÐÅÒÁÔÏÒÁ WHERE, ÝÏ ×ÉËÏÒÉÓÔÏ×Õ¤ KEY ÓÔÏ×ÂÅÃØ", -"ëÌÀÞ '%-.64s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.64s'", -"îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÔÁÂÌÉÃÀ", -"÷ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕÅ %s", -"÷ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÎÕ×ÁÔÉ ÃÀ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËæ§", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ COMMIT", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ ROLLBACK", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ FLUSH_LOGS", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT", -"ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: `%-.64s' (%-.64s)", -"ãÅÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ¦ÎÁÒÎÕ ÐÅÒÅÄÁÞÕ ÔÁÂÌÉæ", -"òÅÐ̦ËÁæÊÎÉÊ ÌÏÇ ÚÁËÒÉÔÏ, ÎÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ RESET MASTER", -"îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'", -"ðÏÍÉÌËÁ ×¦Ä ÇÏÌÏ×ÎÏÇÏ: '%-.64s'", -"íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÞÉÔÁÎÎÑ ×¦Ä ÇÏÌÏ×ÎÏÇÏ", -"íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÚÁÐÉÓÕ ÄÏ ÇÏÌÏ×ÎÏÇÏ", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ FULLTEXT ¦ÎÄÅËÓ, ÝÏ ×¦ÄÐÏצÄÁ¤ ÐÅÒÅ̦ËÕ ÓÔÏ×Âæ×", -"îÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ ÐÏÄÁÎÕ ËÏÍÁÎÄÕ ÔÏÍÕ, ÝÏ ÔÁÂÌÉÃÑ ÚÁÂÌÏËÏ×ÁÎÁ ÁÂÏ ×ÉËÏÎÕ¤ÔØÓÑ ÔÒÁÎÚÁËæÑ", -"îÅצÄÏÍÁ ÓÉÓÔÅÍÎÁ ÚͦÎÎÁ '%-.64s'", -"ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ", -"ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ", -"úÁÓÔÅÒÅÖÅÎÎÑ: äÅÑ˦ ÎÅÔÒÁÎÚÁËæÊΦ ÚͦÎÉ ÔÁÂÌÉÃØ ÎÅ ÍÏÖÎÁ ÂÕÄÅ ÐÏ×ÅÒÎÕÔÉ", -"ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. úÂ¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ", -"ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE", -"ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎÆ¦ÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE", -"óÅÒ×ÅÒ ÎÅ ÚËÏÎÆ¦ÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎÆ¦ÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ", -"ëÏÒÉÓÔÕ×ÁÞ %-.64s ×ÖÅ ÍÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_user_connections' ÁËÔÉ×ÎÉÈ Ú'¤ÄÎÁÎØ", -"íÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ ×ÉÒÁÚÉ Ú¦ ÓÔÁÌÉÍÉ Õ SET", -"úÁÔÒÉÍËÕ ÏÞ¦ËÕ×ÁÎÎÑ ÂÌÏËÕ×ÁÎÎÑ ×ÉÞÅÒÐÁÎÏ", -"úÁÇÁÌØÎÁ Ë¦ÌØË¦ÓÔØ ÂÌÏËÕ×ÁÎØ ÐÅÒÅ×ÉÝÉÌÁ ÒÏÚÍ¦Ò ÂÌÏËÕ×ÁÎØ ÄÌÑ ÔÁÂÌÉæ", -"ïÎÏ×ÉÔÉ ÂÌÏËÕ×ÁÎÎÑ ÎÅ ÍÏÖÌÉ×Ï ÎÁ ÐÒÏÔÑÚ¦ ÔÒÁÎÚÁËæ§ READ UNCOMMITTED", -"DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ", -"CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ", -"èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s", -"ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×", -"ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ Â¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ", -"ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s", -"ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)", -"Every derived table must have its own alias", -"Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'", diff --git a/sql/slave.cc b/sql/slave.cc index 75b18f6f307..ba8c3ff902a 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1,15 +1,14 @@ /* Copyright (C) 2000-2003 MySQL AB - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - + the Free Software Foundation; version 2 of the License. + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -26,6 +25,7 @@ #include <thr_alarm.h> #include <my_dir.h> #include <sql_common.h> +#include <errmsg.h> #define MAX_SLAVE_RETRY_PAUSE 5 bool use_slave_mask = 0; @@ -40,7 +40,8 @@ HASH replicate_do_table, replicate_ignore_table; DYNAMIC_ARRAY replicate_wild_do_table, replicate_wild_ignore_table; bool do_table_inited = 0, ignore_table_inited = 0; bool wild_do_table_inited = 0, wild_ignore_table_inited = 0; -bool table_rules_on= 0, replicate_same_server_id; +bool table_rules_on= 0; +my_bool replicate_same_server_id; ulonglong relay_log_space_limit = 0; /* @@ -52,7 +53,9 @@ ulonglong relay_log_space_limit = 0; int disconnect_slave_event_count = 0, abort_slave_event_count = 0; int events_till_abort = -1; +#ifndef DBUG_OFF static int events_till_disconnect = -1; +#endif typedef enum { SLAVE_THD_IO, SLAVE_THD_SQL} SLAVE_THD_TYPE; @@ -75,7 +78,6 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite); static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); - /* Find out which replications threads are running @@ -160,7 +162,7 @@ int init_slave() sql_print_error("Failed to allocate memory for the master info structure"); goto err; } - + if (init_master_info(active_mi,master_info_file,relay_log_info_file, !master_host, (SLAVE_IO | SLAVE_SQL))) { @@ -219,6 +221,13 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, pos Position in relay log file need_data_lock Set to 1 if this functions should do mutex locks errmsg Store pointer to error message here + look_for_description_event + 1 if we should look for such an event. We only need + this when the SQL thread starts and opens an existing + relay log and has to execute it (possibly from an + offset >4); then we need to read the first event of + the relay log to be able to parse the events we have + to execute. DESCRIPTION - Close old open relay log files. @@ -236,15 +245,35 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, ulonglong pos, bool need_data_lock, - const char** errmsg) + const char** errmsg, + bool look_for_description_event) { DBUG_ENTER("init_relay_log_pos"); + DBUG_PRINT("info", ("pos: %lu", (long) pos)); *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); if (need_data_lock) pthread_mutex_lock(&rli->data_lock); + + /* + Slave threads are not the only users of init_relay_log_pos(). CHANGE MASTER + is, too, and init_slave() too; these 2 functions allocate a description + event in init_relay_log_pos, which is not freed by the terminating SQL slave + thread as that thread is not started by these functions. So we have to free + the description_event here, in case, so that there is no memory leak in + running, say, CHANGE MASTER. + */ + delete rli->relay_log.description_event_for_exec; + /* + By default the relay log is in binlog format 3 (4.0). + Even if format is 4, this will work enough to read the first event + (Format_desc) (remember that format 4 is just lenghtened compared to format + 3; format 3 is a prefix of format 4). + */ + rli->relay_log.description_event_for_exec= new + Format_description_log_event(3); pthread_mutex_lock(log_lock); @@ -284,9 +313,8 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, In this case, we will use the same IO_CACHE pointer to read data as the IO thread is using to write data. */ - rli->cur_log= rli->relay_log.get_log_file(); - if (my_b_tell(rli->cur_log) == 0 && - check_binlog_magic(rli->cur_log, errmsg)) + my_b_seek((rli->cur_log=rli->relay_log.get_log_file()), (off_t)0); + if (check_binlog_magic(rli->cur_log,errmsg)) goto err; rli->cur_log_old_open_count=rli->relay_log.get_open_count(); } @@ -300,8 +328,85 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, goto err; rli->cur_log = &rli->cache_buf; } - if (pos >= BIN_LOG_HEADER_SIZE) + /* + In all cases, check_binlog_magic() has been called so we're at offset 4 for + sure. + */ + if (pos > BIN_LOG_HEADER_SIZE) /* If pos<=4, we stay at 4 */ + { + Log_event* ev; + while (look_for_description_event) + { + /* + Read the possible Format_description_log_event; if position + was 4, no need, it will be read naturally. + */ + DBUG_PRINT("info",("looking for a Format_description_log_event")); + + if (my_b_tell(rli->cur_log) >= pos) + break; + + /* + Because of we have rli->data_lock and log_lock, we can safely read an + event + */ + if (!(ev=Log_event::read_log_event(rli->cur_log,0, + rli->relay_log.description_event_for_exec))) + { + DBUG_PRINT("info",("could not read event, rli->cur_log->error=%d", + rli->cur_log->error)); + if (rli->cur_log->error) /* not EOF */ + { + *errmsg= "I/O error reading event at position 4"; + goto err; + } + break; + } + else if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT) + { + DBUG_PRINT("info",("found Format_description_log_event")); + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= (Format_description_log_event*) ev; + /* + As ev was returned by read_log_event, it has passed is_valid(), so + my_malloc() in ctor worked, no need to check again. + */ + /* + Ok, we found a Format_description event. But it is not sure that this + describes the whole relay log; indeed, one can have this sequence + (starting from position 4): + Format_desc (of slave) + Rotate (of master) + Format_desc (of master) + So the Format_desc which really describes the rest of the relay log + is the 3rd event (it can't be further than that, because we rotate + the relay log when we queue a Rotate event from the master). + But what describes the Rotate is the first Format_desc. + So what we do is: + go on searching for Format_description events, until you exceed the + position (argument 'pos') or until you find another event than Rotate + or Format_desc. + */ + } + else + { + DBUG_PRINT("info",("found event of another type=%d", + ev->get_type_code())); + look_for_description_event= (ev->get_type_code() == ROTATE_EVENT); + delete ev; + } + } my_b_seek(rli->cur_log,(off_t)pos); +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + } +#endif + + } err: /* @@ -316,13 +421,15 @@ err: if (need_data_lock) pthread_mutex_unlock(&rli->data_lock); + if (!rli->relay_log.description_event_for_exec->is_valid() && !*errmsg) + *errmsg= "Invalid Format_description log event; could be out of memory"; DBUG_RETURN ((*errmsg) ? 1 : 0); } /* - Init functio to set up array for errors that should be skipped for slave + Init function to set up array for errors that should be skipped for slave SYNOPSIS init_slave_skip_errors() @@ -361,16 +468,15 @@ void init_slave_skip_errors(const char* arg) } -void st_relay_log_info::inc_group_relay_log_pos(ulonglong val, - ulonglong log_pos, - bool skip_lock) +void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, + bool skip_lock) { if (!skip_lock) pthread_mutex_lock(&data_lock); - inc_event_relay_log_pos(val); + inc_event_relay_log_pos(); group_relay_log_pos= event_relay_log_pos; strmake(group_relay_log_name,event_relay_log_name, - sizeof(group_relay_log_name)-1); + sizeof(group_relay_log_name)-1); notify_group_relay_log_name_update(); @@ -384,24 +490,31 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong val, not advance as it should on the non-transactional slave (it advances by big leaps, whereas it should advance by small leaps). */ + /* + In 4.x we used the event's len to compute the positions here. This is + wrong if the event was 3.23/4.0 and has been converted to 5.0, because + then the event's len is not what is was in the master's binlog, so this + will make a wrong group_master_log_pos (yes it's a bug in 3.23->4.0 + replication: Exec_master_log_pos is wrong). Only way to solve this is to + have the original offset of the end of the event the relay log. This is + what we do in 5.0: log_pos has become "end_log_pos" (because the real use + of log_pos in 4.0 was to compute the end_log_pos; so better to store + end_log_pos instead of begin_log_pos. + If we had not done this fix here, the problem would also have appeared + when the slave and master are 5.0 but with different event length (for + example the slave is more recent than the master and features the event + UID). It would give false MASTER_POS_WAIT, false Exec_master_log_pos in + SHOW SLAVE STATUS, and so the user would do some CHANGE MASTER using this + value which would lead to badly broken replication. + Even the relay_log_pos will be corrupted in this case, because the len is + the relay log is not "val". + With the end_log_pos solution, we avoid computations involving lengthes. + */ + DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu", + (long) log_pos, (long) group_master_log_pos)); if (log_pos) // 3.23 binlogs don't have log_posx { -#if MYSQL_VERSION_ID < 50000 - /* - If the event was converted from a 3.23 format, get_event_len() has - grown by 6 bytes (at least for most events, except LOAD DATA INFILE - which is already a big problem for 3.23->4.0 replication); 6 bytes is - the difference between the header's size in 4.0 (LOG_EVENT_HEADER_LEN) - and the header's size in 3.23 (OLD_HEADER_LEN). Note that using - mi->old_format will not help if the I/O thread has not started yet. - Yes this is a hack but it's just to make 3.23->4.x replication work; - 3.23->5.0 replication is working much better. - */ - group_master_log_pos= log_pos + val - - (mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - group_master_log_pos= log_pos+ val; -#endif /* MYSQL_VERSION_ID < 5000 */ + group_master_log_pos= log_pos; } pthread_cond_broadcast(&data_cond); if (!skip_lock) @@ -442,9 +555,9 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, /* Even if rli->inited==0, we still try to empty rli->master_log_* variables. Indeed, rli->inited==0 does not imply that they already are empty. - It could be that slave's info initialization partly succeeded : + It could be that slave's info initialization partly succeeded : for example if relay-log.info existed but *relay-bin*.* - have been manually removed, init_relay_log_info reads the old + have been manually removed, init_relay_log_info reads the old relay-log.info and fills rli->master_log_*, then init_relay_log_info checks for the existence of the relay log, this fails and init_relay_log_info leaves rli->inited to 0. @@ -453,7 +566,7 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, MASTER, the callers of purge_relay_logs, will delete bogus *.info files or replace them with correct files), however if the user does SHOW SLAVE STATUS before START SLAVE, he will see old, confusing rli->master_log_*. - In other words, we reinit rli->master_log_* for SHOW SLAVE STATUS + In other words, we reinit rli->master_log_* for SHOW SLAVE STATUS to display fine in any case. */ @@ -471,6 +584,20 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, rli->slave_skip_counter=0; pthread_mutex_lock(&rli->data_lock); + + /* + we close the relay log fd possibly left open by the slave SQL thread, + to be able to delete it; the relay log fd possibly left open by the slave + I/O thread will be closed naturally in reset_logs() by the + close(LOG_CLOSE_TO_BE_OPENED) call + */ + if (rli->cur_log_fd >= 0) + { + end_io_cache(&rli->cache_buf); + my_close(rli->cur_log_fd, MYF(MY_WME)); + rli->cur_log_fd= -1; + } + if (rli->relay_log.reset_logs(thd)) { *errmsg = "Failed during log reset"; @@ -482,13 +609,16 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, sizeof(rli->group_relay_log_name)-1); strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(), sizeof(rli->event_relay_log_name)-1); - // Just first log with magic number and nothing else - rli->log_space_total= BIN_LOG_HEADER_SIZE; rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE; - rli->relay_log.reset_bytes_written(); + if (count_relay_log_space(rli)) + { + *errmsg= "Error counting relay log space"; + goto err; + } if (!just_reset) - error= init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 0 /* do not need data lock */, errmsg); + error= init_relay_log_pos(rli, rli->group_relay_log_name, + rli->group_relay_log_pos, + 0 /* do not need data lock */, errmsg, 0); err: #ifndef DBUG_OFF @@ -548,24 +678,26 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, pthread_cond_t* term_cond, volatile uint *slave_running) { + DBUG_ENTER("terminate_slave_thread"); if (term_lock) { pthread_mutex_lock(term_lock); if (!*slave_running) { pthread_mutex_unlock(term_lock); - return ER_SLAVE_NOT_RUNNING; + DBUG_RETURN(ER_SLAVE_NOT_RUNNING); } } DBUG_ASSERT(thd != 0); + THD_CHECK_SENTRY(thd); /* - Is is criticate to test if the slave is running. Otherwise, we might + Is is critical to test if the slave is running. Otherwise, we might be referening freed memory trying to kick it */ - THD_CHECK_SENTRY(thd); while (*slave_running) // Should always be true { + DBUG_PRINT("loop", ("killing slave thread")); KICK_SLAVE(thd); /* There is a small chance that slave thread might miss the first @@ -577,7 +709,7 @@ int terminate_slave_thread(THD* thd, pthread_mutex_t* term_lock, } if (term_lock) pthread_mutex_unlock(term_lock); - return 0; + DBUG_RETURN(0); } @@ -636,7 +768,7 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, thd->exit_cond(old_msg); pthread_mutex_lock(cond_lock); // re-acquire it as exit_cond() released if (thd->killed) - DBUG_RETURN(ER_SERVER_SHUTDOWN); + DBUG_RETURN(thd->killed_errno()); } } if (start_lock) @@ -737,7 +869,7 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len) SYNOPSIS tables_ok() - thd thread (SQL slave thread normally) + thd thread (SQL slave thread normally). Mustn't be null. tables list of tables to check NOTES @@ -746,7 +878,11 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len) rules (see code below). For that reason, users should not set conflicting rules because they may get unpredicted results (precedence order is explained in the manual). - + + Thought which arose from a question of a big customer "I want to include + all tables like "abc.%" except the "%.EFG"". This can't be done now. If we + supported Perl regexps we could do it with this pattern: /^abc\.(?!EFG)/ + (I could not find an equivalent in the regex library MySQL uses). RETURN VALUES 0 should not be logged/replicated @@ -758,7 +894,24 @@ bool tables_ok(THD* thd, TABLE_LIST* tables) bool some_tables_updating= 0; DBUG_ENTER("tables_ok"); - for (; tables; tables = tables->next) + /* + In routine, can't reliably pick and choose substatements, so always + replicate. + We can't reliably know if one substatement should be executed or not: + consider the case of this substatement: a SELECT on a non-replicated + constant table; if we don't execute it maybe it was going to fill a + variable which was going to be used by the next substatement to update + a replicated table? If we execute it maybe the constant non-replicated + table does not exist (and so we'll fail) while there was no need to + execute this as this SELECT does not influence replicated tables in the + rest of the routine? In other words: users are used to replicate-*-table + specifying how to handle updates to tables, these options don't say + anything about reads to tables; we can't guess. + */ + if (thd->spcont) + DBUG_RETURN(1); + + for (; tables; tables= tables->next_global) { char hash_key[2*NAME_LEN+2]; char *end; @@ -769,7 +922,7 @@ bool tables_ok(THD* thd, TABLE_LIST* tables) some_tables_updating= 1; end= strmov(hash_key, tables->db ? tables->db : thd->db); *end++= '.'; - len= (uint) (strmov(end, tables->real_name) - hash_key); + len= (uint) (strmov(end, tables->table_name) - hash_key); if (do_table_inited) // if there are any do's { if (hash_search(&replicate_do_table, (byte*) hash_key, len)) @@ -1026,24 +1179,6 @@ bool net_request_file(NET* net, const char* fname) } -const char *rewrite_db(const char* db, uint32 *new_len) -{ - if (replicate_rewrite_db.is_empty() || !db) - return db; - I_List_iterator<i_string_pair> it(replicate_rewrite_db); - i_string_pair* tmp; - - while ((tmp=it++)) - { - if (!strcmp(tmp->key, db)) - { - *new_len= (uint32)strlen(tmp->val); - return tmp->val; - } - } - return db; -} - /* From other comments and tests in code, it looks like sometimes Query_log_event and Load_log_event can have db == 0 @@ -1157,29 +1292,86 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val) return 1; } +/* + Note that we rely on the master's version (3.23, 4.0.14 etc) instead of + relying on the binlog's version. This is not perfect: imagine an upgrade + of the master without waiting that all slaves are in sync with the master; + then a slave could be fooled about the binlog's format. This is what happens + when people upgrade a 3.23 master to 4.0 without doing RESET MASTER: 4.0 + slaves are fooled. So we do this only to distinguish between 3.23 and more + recent masters (it's too late to change things for 3.23). + + RETURNS + 0 ok + 1 error +*/ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) { const char* errmsg= 0; - + /* - Note the following switch will bug when we have MySQL branch 30 ;) + Free old description_event_for_queue (that is needed if we are in + a reconnection). */ - switch (*mysql->server_version) { - case '3': - mi->old_format = - (strncmp(mysql->server_version, "3.23.57", 7) < 0) /* < .57 */ ? - BINLOG_FORMAT_323_LESS_57 : - BINLOG_FORMAT_323_GEQ_57 ; - break; - case '4': - mi->old_format = BINLOG_FORMAT_CURRENT; - break; - default: - /* 5.0 is not supported */ - errmsg = "Master reported an unrecognized MySQL version. Note that 4.1 \ -slaves can't replicate a 5.0 or newer master."; - break; + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= 0; + + if (!my_isdigit(&my_charset_bin,*mysql->server_version)) + errmsg = "Master reported unrecognized MySQL version"; + else + { + /* + Note the following switch will bug when we have MySQL branch 30 ;) + */ + switch (*mysql->server_version) + { + case '0': + case '1': + case '2': + errmsg = "Master reported unrecognized MySQL version"; + break; + case '3': + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(1, mysql->server_version); + break; + case '4': + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(3, mysql->server_version); + break; + default: + /* + Master is MySQL >=5.0. Give a default Format_desc event, so that we can + take the early steps (like tests for "is this a 3.23 master") which we + have to take before we receive the real master's Format_desc which will + override this one. Note that the Format_desc we create below is garbage + (it has the format of the *slave*); it's only good to help know if the + master is 3.23, 4.0, etc. + */ + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(4, mysql->server_version); + break; + } + } + + /* + This does not mean that a 5.0 slave will be able to read a 6.0 master; but + as we don't know yet, we don't want to forbid this for now. If a 5.0 slave + can't read a 6.0 master, this will show up when the slave can't read some + events sent by the master, and there will be error messages. + */ + + if (errmsg) + { + sql_print_error(errmsg); + return 1; + } + + /* as we are here, we tried to allocate the event */ + if (!mi->rli.relay_log.description_event_for_queue) + { + sql_print_error("Slave I/O thread failed to create a default Format_description_log_event"); + return 1; } /* @@ -1189,7 +1381,7 @@ slaves can't replicate a 5.0 or newer master."; MYSQL_RES *master_res= 0; MYSQL_ROW master_row; - if (!mysql_real_query(mysql, "SELECT UNIX_TIMESTAMP()", 23) && + if (!mysql_real_query(mysql, STRING_WITH_LEN("SELECT UNIX_TIMESTAMP()")) && (master_res= mysql_store_result(mysql)) && (master_row= mysql_fetch_row(master_res))) { @@ -1215,7 +1407,8 @@ do not trust column Seconds_Behind_Master of SHOW SLAVE STATUS"); Note: we could have put a @@SERVER_ID in the previous SELECT UNIX_TIMESTAMP() instead, but this would not have worked on 3.23 masters. */ - if (!mysql_real_query(mysql, "SHOW VARIABLES LIKE 'SERVER_ID'", 31) && + if (!mysql_real_query(mysql, + STRING_WITH_LEN("SHOW VARIABLES LIKE 'SERVER_ID'")) && (master_res= mysql_store_result(mysql))) { if ((master_row= mysql_fetch_row(master_res)) && @@ -1237,12 +1430,21 @@ not always make sense; please check the manual before using it)."; values of these 2 are never used (new connections don't use them). We don't test equality of global collation_database either as it's is going to be deprecated (made read-only) in 4.1 very soon. - We don't do it for <3.23.57 because masters <3.23.50 hang on - SELECT @@unknown_var (BUG#7965 - see changelog of 3.23.50). + The test is only relevant if master < 5.0.3 (we'll test only if it's older + than the 5 branch; < 5.0.3 was alpha...), as >= 5.0.3 master stores + charset info in each binlog event. + We don't do it for 3.23 because masters <3.23.50 hang on + SELECT @@unknown_var (BUG#7965 - see changelog of 3.23.50). So finally we + test only if master is 4.x. */ - if (mi->old_format == BINLOG_FORMAT_323_LESS_57) + + /* redundant with rest of code but safer against later additions */ + if (*mysql->server_version == '3') goto err; - if (!mysql_real_query(mysql, "SELECT @@GLOBAL.COLLATION_SERVER", 32) && + + if ((*mysql->server_version == '4') && + !mysql_real_query(mysql, + STRING_WITH_LEN("SELECT @@GLOBAL.COLLATION_SERVER")) && (master_res= mysql_store_result(mysql))) { if ((master_row= mysql_fetch_row(master_res)) && @@ -1265,8 +1467,11 @@ be equal for replication to work"; such check will broke everything for them. (And now everything will work for them because by default both their master and slave will have 'SYSTEM' time zone). + This check is only necessary for 4.x masters (and < 5.0.4 masters but + those were alpha). */ - if (!mysql_real_query(mysql, "SELECT @@GLOBAL.TIME_ZONE", 25) && + if ((*mysql->server_version == '4') && + !mysql_real_query(mysql, STRING_WITH_LEN("SELECT @@GLOBAL.TIME_ZONE")) && (master_res= mysql_store_result(mysql))) { if ((master_row= mysql_fetch_row(master_res)) && @@ -1310,14 +1515,14 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, TABLE_LIST tables; int error= 1; handler *file; - ulong save_options; + ulonglong save_options; NET *net= &mysql->net; DBUG_ENTER("create_table_from_dump"); packet_len= my_net_read(net); // read create table statement if (packet_len == packet_error) { - send_error(thd, ER_MASTER_NET_READ); + my_message(ER_MASTER_NET_READ, ER(ER_MASTER_NET_READ), MYF(0)); DBUG_RETURN(1); } if (net->read_pos[0] == 255) // error from master @@ -1326,7 +1531,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, err_msg= (char*) net->read_pos + ((mysql->server_capabilities & CLIENT_PROTOCOL_41) ? 3+SQLSTATE_LENGTH+1 : 3); - net_printf(thd, ER_MASTER, err_msg); + my_error(ER_MASTER, MYF(0), err_msg); DBUG_RETURN(1); } thd->command = COM_TABLE_DUMP; @@ -1335,7 +1540,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, if (!(query = thd->strmake((char*) net->read_pos, packet_len))) { sql_print_error("create_table_from_dump: out of memory"); - net_printf(thd, ER_GET_ERRNO, "Out of memory"); + my_message(ER_GET_ERRNO, "Out of memory", MYF(0)); DBUG_RETURN(1); } thd->query= query; @@ -1344,12 +1549,11 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, bzero((char*) &tables,sizeof(tables)); tables.db = (char*)db; - tables.alias= tables.real_name= (char*)table_name; + tables.alias= tables.table_name= (char*)table_name; /* Drop the table if 'overwrite' is true */ if (overwrite && mysql_rm_table(thd,&tables,1,0)) /* drop if exists */ { - send_error(thd); sql_print_error("create_table_from_dump: failed to drop the table"); goto err; } @@ -1361,9 +1565,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, // save old db in case we are creating in a different database save_db = thd->db; save_db_length= thd->db_length; - thd->db = (char*)db; - DBUG_ASSERT(thd->db); - thd->db_length= strlen(thd->db); + DBUG_ASSERT(db != 0); + thd->reset_db((char*)db, strlen(db)); mysql_parse(thd, thd->query, packet_len); // run create table thd->db = save_db; // leave things the way the were before thd->db_length= save_db_length; @@ -1376,7 +1579,6 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, tables.lock_type = TL_WRITE; if (!open_ltable(thd, &tables, TL_WRITE)) { - send_error(thd,0,0); // Send error from open_ltable sql_print_error("create_table_from_dump: could not open created table"); goto err; } @@ -1386,7 +1588,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, /* Copy the data file */ if (file->net_read_dump(net)) { - net_printf(thd, ER_MASTER_NET_READ); + my_message(ER_MASTER_NET_READ, ER(ER_MASTER_NET_READ), MYF(0)); sql_print_error("create_table_from_dump: failed in\ handler::net_read_dump()"); goto err; @@ -1403,10 +1605,10 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, save_vio = thd->net.vio; thd->net.vio = 0; /* Rebuild the index file from the copied data file (with REPAIR) */ - error=file->repair(thd,&check_opt) != 0; + error=file->ha_repair(thd,&check_opt) != 0; thd->net.vio = save_vio; if (error) - net_printf(thd, ER_INDEX_REBUILD,tables.table->real_name); + my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name); err: close_thread_tables(thd); @@ -1429,12 +1631,20 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, { if (!(mysql = mysql_init(NULL))) { - send_error(thd); // EOM DBUG_RETURN(1); } if (connect_to_master(thd, mysql, mi)) { - net_printf(thd, ER_CONNECT_TO_MASTER, mysql_error(mysql)); + my_error(ER_CONNECT_TO_MASTER, MYF(0), mysql_error(mysql)); + /* + We need to clear the active VIO since, theoretically, somebody + might issue an awake() on this thread. If we are then in the + middle of closing and destroying the VIO inside the + mysql_close(), we will have a problem. + */ +#ifdef SIGNAL_WITH_VIO_CLOSE + thd->clear_active_vio(); +#endif mysql_close(mysql); DBUG_RETURN(1); } @@ -1458,7 +1668,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, if (!called_connected) mysql_close(mysql); if (errmsg && thd->vio_ok()) - send_error(thd, error, errmsg); + my_message(error, errmsg, MYF(0)); DBUG_RETURN(test(error)); // Return 1 on error } @@ -1482,15 +1692,16 @@ void end_master_info(MASTER_INFO* mi) } -int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname) +static int init_relay_log_info(RELAY_LOG_INFO* rli, + const char* info_fname) { char fname[FN_REFLEN+128]; int info_fd; const char* msg = 0; - int error = 0; + int error; DBUG_ENTER("init_relay_log_info"); - if (rli->inited) // Set if this function called + if (rli->inited) // Set if this function called DBUG_RETURN(0); fn_format(fname, info_fname, mysql_data_home, "", 4+32); pthread_mutex_lock(&rli->data_lock); @@ -1501,23 +1712,10 @@ int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname) rli->log_space_limit= relay_log_space_limit; rli->log_space_total= 0; - // TODO: make this work with multi-master - if (!opt_relay_logname) - { - char tmp[FN_REFLEN]; - /* - TODO: The following should be using fn_format(); We just need to - first change fn_format() to cut the file name if it's too long. - */ - strmake(tmp,glob_hostname,FN_REFLEN-5); - strmov(strcend(tmp,'.'),"-relay-bin"); - opt_relay_logname=my_strdup(tmp,MYF(MY_WME)); - } - /* The relay log will now be opened, as a SEQ_READ_APPEND IO_CACHE. - Note that the I/O thread flushes it to disk after writing every event, in - flush_master_info(mi, 1). + Note that the I/O thread flushes it to disk after writing every + event, in flush_master_info(mi, 1). */ /* @@ -1529,16 +1727,42 @@ int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname) switch to using max_binlog_size for the relay log) and update rli->relay_log.max_size (and mysql_bin_log.max_size). */ - - if (open_log(&rli->relay_log, glob_hostname, opt_relay_logname, - "-relay-bin", opt_relaylog_index_name, - LOG_BIN, 1 /* read_append cache */, - 1 /* no auto events */, - max_relay_log_size ? max_relay_log_size : max_binlog_size)) { - pthread_mutex_unlock(&rli->data_lock); - sql_print_error("Failed in open_log() called from init_relay_log_info()"); - DBUG_RETURN(1); + char buf[FN_REFLEN]; + const char *ln; + static bool name_warning_sent= 0; + ln= rli->relay_log.generate_name(opt_relay_logname, "-relay-bin", + 1, buf); + /* We send the warning only at startup, not after every RESET SLAVE */ + if (!opt_relay_logname && !opt_relaylog_index_name && !name_warning_sent) + { + /* + User didn't give us info to name the relay log index file. + Picking `hostname`-relay-bin.index like we do, causes replication to + fail if this slave's hostname is changed later. So, we would like to + instead require a name. But as we don't want to break many existing + setups, we only give warning, not error. + */ + sql_print_warning("Neither --relay-log nor --relay-log-index were used;" + " so replication " + "may break when this MySQL server acts as a " + "slave and has his hostname changed!! Please " + "use '--relay-log=%s' to avoid this problem.", ln); + name_warning_sent= 1; + } + /* + note, that if open() fails, we'll still have index file open + but a destructor will take care of that + */ + if (rli->relay_log.open_index_file(opt_relaylog_index_name, ln) || + rli->relay_log.open(ln, LOG_BIN, 0, SEQ_READ_APPEND, 0, + (max_relay_log_size ? max_relay_log_size : + max_binlog_size), 1)) + { + pthread_mutex_unlock(&rli->data_lock); + sql_print_error("Failed in open_log() called from init_relay_log_info()"); + DBUG_RETURN(1); + } } /* if file does not exist */ @@ -1568,7 +1792,7 @@ file '%s', errno %d)", fname, my_errno); /* Init relay log with first entry in the relay index file */ if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */, - &msg)) + &msg, 0)) { sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)"); goto err; @@ -1579,11 +1803,11 @@ file '%s', errno %d)", fname, my_errno); } else // file exists { + error= 0; if (info_fd >= 0) reinit_io_cache(&rli->info_file, READ_CACHE, 0L,0,0); else { - int error=0; if ((info_fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0) { sql_print_error("\ @@ -1633,7 +1857,7 @@ Failed to open the existing relay log info file '%s' (errno %d)", rli->group_relay_log_name, rli->group_relay_log_pos, 0 /* no data lock*/, - &msg)) + &msg, 0)) { char llbuf[22]; sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)", @@ -1642,8 +1866,18 @@ Failed to open the existing relay log info file '%s' (errno %d)", goto err; } } - DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); + +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); + } +#endif + /* Now change the cache from READ to WRITE - must do this before flush_relay_log_info @@ -1775,7 +2009,8 @@ static void write_ignored_events_info_to_relay_log(THD *thd, MASTER_INFO *mi) " to the relay log, " "SHOW SLAVE STATUS may be inaccurate"); rli->relay_log.harvest_bytes_written(&rli->log_space_total); - flush_master_info(mi, 1); + if (flush_master_info(mi, 1)) + sql_print_error("Failed to flush master info file"); delete ev; } else @@ -1840,9 +2075,9 @@ void clear_until_condition(RELAY_LOG_INFO* rli) int init_master_info(MASTER_INFO* mi, const char* master_info_fname, - const char* slave_info_fname, - bool abort_if_no_master_info_file, - int thread_mask) + const char* slave_info_fname, + bool abort_if_no_master_info_file, + int thread_mask) { int fd,error; char fname[FN_REFLEN+128]; @@ -1856,7 +2091,7 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname, last time. If this case pos_in_file would be set and we would get a crash when trying to read the signature for the binary relay log. - + We only rewind the read position if we are starting the SQL thread. The handle_slave_sql thread assumes that the read position is at the beginning of the file, and will read the @@ -1882,7 +2117,7 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname, fd = mi->fd; /* does master.info exist ? */ - + if (access(fname,F_OK)) { if (abort_if_no_master_info_file) @@ -1918,7 +2153,7 @@ file '%s')", fname); { if (fd >= 0) reinit_io_cache(&mi->file, READ_CACHE, 0L,0,0); - else + else { if ((fd = my_open(fname, O_RDWR|O_BINARY, MYF(MY_WME))) < 0 ) { @@ -1938,52 +2173,52 @@ file '%s')", fname); mi->fd = fd; int port, connect_retry, master_log_pos, ssl= 0, lines; char *first_non_digit; - + /* Starting from 4.1.x master.info has new format. Now its - first line contains number of lines in file. By reading this - number we will be always distinguish to which version our - master.info corresponds to. We can't simply count lines in + first line contains number of lines in file. By reading this + number we will be always distinguish to which version our + master.info corresponds to. We can't simply count lines in file since versions before 4.1.x could generate files with more lines than needed. - If first line doesn't contain a number or contain number less than + If first line doesn't contain a number or contain number less than 14 then such file is treated like file from pre 4.1.1 version. - There is no ambiguity when reading an old master.info, as before + There is no ambiguity when reading an old master.info, as before 4.1.1, the first line contained the binlog's name, which is either - empty or has an extension (contains a '.'), so can't be confused + empty or has an extension (contains a '.'), so can't be confused with an integer. - So we're just reading first line and trying to figure which version + So we're just reading first line and trying to figure which version is this. */ - - /* - The first row is temporarily stored in mi->master_log_name, - if it is line count and not binlog name (new format) it will be + + /* + The first row is temporarily stored in mi->master_log_name, + if it is line count and not binlog name (new format) it will be overwritten by the second row later. */ if (init_strvar_from_file(mi->master_log_name, sizeof(mi->master_log_name), &mi->file, "")) goto errwithmsg; - + lines= strtoul(mi->master_log_name, &first_non_digit, 10); - if (mi->master_log_name[0]!='\0' && + if (mi->master_log_name[0]!='\0' && *first_non_digit=='\0' && lines >= LINES_IN_MASTER_INFO_WITH_SSL) { // Seems to be new format - if (init_strvar_from_file(mi->master_log_name, + if (init_strvar_from_file(mi->master_log_name, sizeof(mi->master_log_name), &mi->file, "")) goto errwithmsg; } else lines= 7; - + if (init_intvar_from_file(&master_log_pos, &mi->file, 4) || init_strvar_from_file(mi->host, sizeof(mi->host), &mi->file, master_host) || init_strvar_from_file(mi->user, sizeof(mi->user), &mi->file, - master_user) || + master_user) || init_strvar_from_file(mi->password, SCRAMBLED_PASSWORD_CHAR_LENGTH+1, &mi->file, master_password) || init_intvar_from_file(&port, &mi->file, master_port) || @@ -1991,17 +2226,17 @@ file '%s')", fname); master_connect_retry)) goto errwithmsg; - /* - If file has ssl part use it even if we have server without - SSL support. But these option will be ignored later when - slave will try connect to master, so in this case warning + /* + If file has ssl part use it even if we have server without + SSL support. But these option will be ignored later when + slave will try connect to master, so in this case warning is printed. */ - if (lines >= LINES_IN_MASTER_INFO_WITH_SSL && + if (lines >= LINES_IN_MASTER_INFO_WITH_SSL && (init_intvar_from_file(&ssl, &mi->file, master_ssl) || - init_strvar_from_file(mi->ssl_ca, sizeof(mi->ssl_ca), + init_strvar_from_file(mi->ssl_ca, sizeof(mi->ssl_ca), &mi->file, master_ssl_ca) || - init_strvar_from_file(mi->ssl_capath, sizeof(mi->ssl_capath), + init_strvar_from_file(mi->ssl_capath, sizeof(mi->ssl_capath), &mi->file, master_ssl_capath) || init_strvar_from_file(mi->ssl_cert, sizeof(mi->ssl_cert), &mi->file, master_ssl_cert) || @@ -2012,11 +2247,11 @@ file '%s')", fname); goto errwithmsg; #ifndef HAVE_OPENSSL if (ssl) - sql_print_error("SSL information in the master info file " + sql_print_warning("SSL information in the master info file " "('%s') are ignored because this MySQL slave was compiled " "without SSL support.", fname); #endif /* HAVE_OPENSSL */ - + /* This has to be handled here as init_intvar_from_file can't handle my_off_t types @@ -2036,15 +2271,15 @@ file '%s')", fname); mi->inited = 1; // now change cache READ -> WRITE - must do this before flush_master_info - reinit_io_cache(&mi->file, WRITE_CACHE,0L,0,1); + reinit_io_cache(&mi->file, WRITE_CACHE, 0L, 0, 1); if ((error=test(flush_master_info(mi, 1)))) sql_print_error("Failed to flush master info file"); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(error); - + errwithmsg: sql_print_error("Error reading master configuration"); - + err: if (fd >= 0) { @@ -2137,7 +2372,7 @@ void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a) } } -int show_master_info(THD* thd, MASTER_INFO* mi) +bool show_master_info(THD* thd, MASTER_INFO* mi) { // TODO: fix this for multi-master List<Item> field_list; @@ -2199,8 +2434,9 @@ int show_master_info(THD* thd, MASTER_INFO* mi) field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10, MYSQL_TYPE_LONGLONG)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); if (mi->host[0]) { @@ -2281,12 +2517,12 @@ int show_master_info(THD* thd, MASTER_INFO* mi) if ((mi->slave_running == MYSQL_SLAVE_RUN_CONNECT) && mi->rli.slave_running) { - long tmp= (long)((time_t)time((time_t*) 0) - - mi->rli.last_master_timestamp) - - mi->clock_diff_with_master; + long time_diff= ((long)((time_t)time((time_t*) 0) + - mi->rli.last_master_timestamp) + - mi->clock_diff_with_master); /* - Apparently on some systems tmp can be <0. Here are possible reasons - related to MySQL: + Apparently on some systems time_diff can be <0. Here are possible + reasons related to MySQL: - the master is itself a slave of another master whose time is ahead. - somebody used an explicit SET TIMESTAMP on the master. Possible reason related to granularity-to-second of time functions @@ -2304,24 +2540,29 @@ int show_master_info(THD* thd, MASTER_INFO* mi) last_master_timestamp == 0 (an "impossible" timestamp 1970) is a special marker to say "consider we have caught up". */ - protocol->store((longlong)(mi->rli.last_master_timestamp ? max(0, tmp) - : 0)); + protocol->store((longlong)(mi->rli.last_master_timestamp ? + max(0, time_diff) : 0)); } else protocol->store_null(); pthread_mutex_unlock(&mi->rli.data_lock); pthread_mutex_unlock(&mi->data_lock); - + if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length())) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } - -bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) +/* + RETURN + 2 - flush relay log failed + 1 - flush master info failed + 0 - all ok +*/ +int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) { IO_CACHE* file = &mi->file; char lbuf[22]; @@ -2340,8 +2581,9 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) When we come to this place in code, relay log may or not be initialized; the caller is responsible for setting 'flush_relay_log_cache' accordingly. */ - if (flush_relay_log_cache) - flush_io_cache(mi->rli.relay_log.get_log_file()); + if (flush_relay_log_cache && + flush_io_cache(mi->rli.relay_log.get_log_file())) + DBUG_RETURN(2); /* We flushed the relay log BEFORE the master.info file, because if we crash @@ -2353,13 +2595,13 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) */ /* - In certain cases this code may create master.info files that seems - corrupted, because of extra lines filled with garbage in the end - file (this happens if new contents take less space than previous - contents of file). But because of number of lines in the first line + In certain cases this code may create master.info files that seems + corrupted, because of extra lines filled with garbage in the end + file (this happens if new contents take less space than previous + contents of file). But because of number of lines in the first line of file we don't care about this garbage. */ - + my_b_seek(file, 0L); my_b_printf(file, "%u\n%s\n%s\n%s\n%s\n%s\n%d\n%d\n%d\n%s\n%s\n%s\n%s\n%s\n", LINES_IN_MASTER_INFO_WITH_SSL, @@ -2368,8 +2610,7 @@ bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache) mi->password, mi->port, mi->connect_retry, (int)(mi->ssl), mi->ssl_ca, mi->ssl_capath, mi->ssl_cert, mi->ssl_cipher, mi->ssl_key); - flush_io_cache(file); - DBUG_RETURN(0); + DBUG_RETURN(-flush_io_cache(file)); } @@ -2387,6 +2628,7 @@ st_relay_log_info::st_relay_log_info() bzero((char*) &info_file, sizeof(info_file)); bzero((char*) &cache_buf, sizeof(cache_buf)); + cached_charset_invalidate(); pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST); pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST); pthread_mutex_init(&log_space_lock, MY_MUTEX_INIT_FAST); @@ -2407,6 +2649,7 @@ st_relay_log_info::~st_relay_log_info() pthread_cond_destroy(&start_cond); pthread_cond_destroy(&stop_cond); pthread_cond_destroy(&log_space_cond); + relay_log.cleanup(); } /* @@ -2444,17 +2687,16 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, ulong init_abort_pos_wait; int error=0; struct timespec abstime; // for timeout checking - set_timespec(abstime,timeout); - + const char *msg; DBUG_ENTER("wait_for_pos"); - DBUG_PRINT("enter",("group_master_log_name: '%s' pos: %lu timeout: %ld", - group_master_log_name, (ulong) group_master_log_pos, - (long) timeout)); + DBUG_PRINT("enter",("log_name: '%s' log_pos: %lu timeout: %lu", + log_name->c_ptr(), (ulong) log_pos, (ulong) timeout)); + set_timespec(abstime,timeout); pthread_mutex_lock(&data_lock); - const char *msg= thd->enter_cond(&data_cond, &data_lock, - "Waiting for the slave SQL thread to " - "advance position"); + msg= thd->enter_cond(&data_cond, &data_lock, + "Waiting for the slave SQL thread to " + "advance position"); /* This function will abort when it notices that some CHANGE MASTER or RESET MASTER has changed the master info. @@ -2511,6 +2753,12 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, bool pos_reached; int cmp_result= 0; + DBUG_PRINT("info", + ("init_abort_pos_wait: %ld abort_pos_wait: %ld", + init_abort_pos_wait, abort_pos_wait)); + DBUG_PRINT("info",("group_master_log_name: '%s' pos: %lu", + group_master_log_name, (ulong) group_master_log_pos)); + /* group_master_log_name can be "", if we are just after a fresh replication start or after a CHANGE MASTER TO MASTER_HOST/PORT @@ -2593,7 +2841,7 @@ err: thd->exit_cond(msg); DBUG_PRINT("exit",("killed: %d abort: %d slave_running: %d \ improper_arguments: %d timed_out: %d", - (int) thd->killed, + thd->killed_errno(), (int) (init_abort_pos_wait != abort_pos_wait), (int) slave_running, (int) (error == -2), @@ -2606,6 +2854,37 @@ improper_arguments: %d timed_out: %d", DBUG_RETURN( error ? error : event_count ); } +void set_slave_thread_options(THD* thd) +{ + /* + It's nonsense to constrain the slave threads with max_join_size; if a + query succeeded on master, we HAVE to execute it. So set + OPTION_BIG_SELECTS. Setting max_join_size to HA_POS_ERROR is not enough + (and it's not needed if we have OPTION_BIG_SELECTS) because an INSERT + SELECT examining more than 4 billion rows would still fail (yes, because + when max_join_size is 4G, OPTION_BIG_SELECTS is automatically set, but + only for client threads. + */ + ulonglong options= thd->options | OPTION_BIG_SELECTS; + if (opt_log_slave_updates) + options|= OPTION_BIN_LOG; + else + options&= ~OPTION_BIN_LOG; + thd->options= options; + thd->variables.completion_type= 0; +} + +void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli) +{ + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.collation_server= + global_system_variables.collation_server; + thd->update_charset(); + rli->cached_charset_invalidate(); +} /* init_slave_thread() @@ -2616,7 +2895,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) DBUG_ENTER("init_slave_thread"); thd->system_thread = (thd_type == SLAVE_THD_SQL) ? SYSTEM_THREAD_SLAVE_SQL : SYSTEM_THREAD_SLAVE_IO; - thd->host_or_ip= ""; + thd->security_ctx->skip_grants(); my_net_init(&thd->net, 0); /* Adding MAX_LOG_EVENT_HEADER_LEN to the max_allowed_packet on all @@ -2625,20 +2904,8 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) */ thd->variables.max_allowed_packet= global_system_variables.max_allowed_packet + MAX_LOG_EVENT_HEADER; /* note, incr over the global not session var */ - thd->master_access= ~(ulong)0; - thd->priv_user = 0; thd->slave_thread = 1; - /* - It's nonsense to constrain the slave threads with max_join_size; if a - query succeeded on master, we HAVE to execute it. So set - OPTION_BIG_SELECTS. Setting max_join_size to HA_POS_ERROR is not enough - (and it's not needed if we have OPTION_BIG_SELECTS) because an INSERT - SELECT examining more than 4 billion rows would still fail (yes, because - when max_join_size is 4G, OPTION_BIG_SELECTS is automatically set, but - only for client threads. - */ - thd->options = ((opt_log_slave_updates) ? OPTION_BIN_LOG:0) | - OPTION_AUTO_IS_NULL | OPTION_BIG_SELECTS; + set_slave_thread_options(thd); thd->client_capabilities = CLIENT_LOCAL_FILES; thd->real_id=pthread_self(); pthread_mutex_lock(&LOCK_thread_count); @@ -2794,7 +3061,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) return packet_error; #endif - len = net_safe_read(mysql); + len = cli_safe_read(mysql); if (len == packet_error || (long) len < 1) { if (mysql_errno(mysql) == ER_NET_READ_INTERRUPTED) @@ -2807,8 +3074,7 @@ static ulong read_event(MYSQL* mysql, MASTER_INFO *mi, bool* suppress_warnings) *suppress_warnings= TRUE; } else - sql_print_error("Error reading packet from server: %s (\ -server_errno=%d)", + sql_print_error("Error reading packet from server: %s ( server_errno=%d)", mysql_error(mysql), mysql_errno(mysql)); return packet_error; } @@ -2816,13 +3082,13 @@ server_errno=%d)", /* Check if eof packet */ if (len < 8 && mysql->net.read_pos[0] == 254) { - sql_print_error("Slave: received end packet from server, apparent\ - master shutdown: %s", + sql_print_information("Slave: received end packet from server, apparent " + "master shutdown: %s", mysql_error(mysql)); return packet_error; } - DBUG_PRINT("info",( "len=%u, net->read_pos[4] = %d\n", + DBUG_PRINT("info",( "len: %lu net->read_pos[4]: %d\n", len, mysql->net.read_pos[4])); return len - 1; } @@ -2889,13 +3155,14 @@ bool st_relay_log_info::is_until_satisfied() if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) { - /* - We have no cached comaprison results so we should compare log names - and cache result + /* + We have no cached comparison results so we should compare log names + and cache result. + If we are after RESET SLAVE, and the SQL slave thread has not processed + any event yet, it could be that group_master_log_name is "". In that case, + just wait for more events (as there is no sensible comparison to do). */ - DBUG_ASSERT(*log_name || log_pos == 0); - if (*log_name) { const char *basename= log_name + dirname_length(log_name); @@ -2931,15 +3198,32 @@ bool st_relay_log_info::is_until_satisfied() } +void st_relay_log_info::cached_charset_invalidate() +{ + /* Full of zeroes means uninitialized. */ + bzero(cached_charset, sizeof(cached_charset)); +} + + +bool st_relay_log_info::cached_charset_compare(char *charset) +{ + if (bcmp(cached_charset, charset, sizeof(cached_charset))) + { + memcpy(cached_charset, charset, sizeof(cached_charset)); + return 1; + } + return 0; +} + + static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) { /* We acquire this mutex since we need it for all operations except - event execution. But we will release it in places where we will + event execution. But we will release it in places where we will wait for something for example inside of next_event(). */ pthread_mutex_lock(&rli->data_lock); - /* This tests if the position of the end of the last previous executed event hits the UNTIL barrier. @@ -2948,13 +3232,13 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) was an event ignored by the I/O thread just before (BUG#13861 to be fixed). */ - if (rli->until_condition!=RELAY_LOG_INFO::UNTIL_NONE && - rli->is_until_satisfied()) + if (rli->until_condition!=RELAY_LOG_INFO::UNTIL_NONE && + rli->is_until_satisfied()) { char buf[22]; sql_print_information("Slave SQL thread stopped because it reached its" " UNTIL position %s", llstr(rli->until_pos(), buf)); - /* + /* Setting abort_slave flag because we do not want additional message about error in query execution to be printed. */ @@ -2962,11 +3246,11 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) pthread_mutex_unlock(&rli->data_lock); return 1; } - + Log_event * ev = next_event(rli); - + DBUG_ASSERT(rli->sql_thd==thd); - + if (sql_slave_killed(thd,rli)) { pthread_mutex_unlock(&rli->data_lock); @@ -2979,34 +3263,70 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) int exec_res; /* - Skip queries originating from this server or number of - queries specified by the user in slave_skip_counter - We can't however skip event's that has something to do with the + Queries originating from this server must be skipped. + Low-level events (Format_desc, Rotate, Stop) from this server + must also be skipped. But for those we don't want to modify + group_master_log_pos, because these events did not exist on the master. + Format_desc is not completely skipped. + Skip queries specified by the user in slave_skip_counter. + We can't however skip events that has something to do with the log files themselves. + Filtering on own server id is extremely important, to ignore execution of + events created by the creation/rotation of the relay log (remember that + now the relay log starts with its Format_desc, has a Rotate etc). */ - if ((ev->server_id == (uint32) ::server_id && !replicate_same_server_id) || - (rli->slave_skip_counter && type_code != ROTATE_EVENT)) + DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id)); + + if ((ev->server_id == (uint32) ::server_id && + !replicate_same_server_id && + type_code != FORMAT_DESCRIPTION_EVENT) || + (rli->slave_skip_counter && + type_code != ROTATE_EVENT && type_code != STOP_EVENT && + type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT)) { - rli->inc_group_relay_log_pos(ev->get_event_len(), - type_code != STOP_EVENT ? ev->log_pos : LL(0), - 1/* skip lock*/); - flush_relay_log_info(rli); + DBUG_PRINT("info", ("event skipped")); + if (thd->options & OPTION_BEGIN) + rli->inc_event_relay_log_pos(); + else + { + rli->inc_group_relay_log_pos((type_code == ROTATE_EVENT || + type_code == STOP_EVENT || + type_code == FORMAT_DESCRIPTION_EVENT) ? + LL(0) : ev->log_pos, + 1/* skip lock*/); + flush_relay_log_info(rli); + } /* - Protect against common user error of setting the counter to 1 - instead of 2 while recovering from an failed auto-increment insert + Protect against common user error of setting the counter to 1 + instead of 2 while recovering from an insert which used auto_increment, + rand or user var. */ - if (rli->slave_skip_counter && - !((type_code == INTVAR_EVENT || type_code == STOP_EVENT) && - rli->slave_skip_counter == 1)) + if (rli->slave_skip_counter && + !((type_code == INTVAR_EVENT || + type_code == RAND_EVENT || + type_code == USER_VAR_EVENT) && + rli->slave_skip_counter == 1) && + /* + The events from ourselves which have something to do with the relay + log itself must be skipped, true, but they mustn't decrement + rli->slave_skip_counter, because the user is supposed to not see + these events (they are not in the master's binlog) and if we + decremented, START SLAVE would for example decrement when it sees + the Rotate, so the event which the user probably wanted to skip + would not be skipped. + */ + !(ev->server_id == (uint32) ::server_id && + (type_code == ROTATE_EVENT || type_code == STOP_EVENT || + type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT))) --rli->slave_skip_counter; pthread_mutex_unlock(&rli->data_lock); - delete ev; - return 0; // avoid infinite update loops - } + delete ev; + return 0; // avoid infinite update loops + } pthread_mutex_unlock(&rli->data_lock); - + thd->server_id = ev->server_id; // use the original server id for logging thd->set_time(); // time the query thd->lex->current_select= 0; @@ -3015,7 +3335,16 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) ev->thd = thd; exec_res = ev->exec_event(rli); DBUG_ASSERT(rli->sql_thd==thd); - delete ev; + /* + Format_description_log_event should not be deleted because it will be + used to read info about the relay log's format; it will be deleted when + the SQL thread does not need it, i.e. when this thread terminates. + */ + if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) + { + DBUG_PRINT("info", ("Deleting the event after it has been executed")); + delete ev; + } if (slave_trans_retries) { if (exec_res && @@ -3026,9 +3355,9 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) const char *errmsg; /* We were in a transaction which has been rolled back because of a - deadlock (currently, InnoDB deadlock detected by InnoDB) or lock - wait timeout (innodb_lock_wait_timeout exceeded); let's seek back to - BEGIN log event and retry it all again. + Sonera deadlock. if lock wait timeout (innodb_lock_wait_timeout exceeded) + there is no rollback since 5.0.13 (ref: manual). + let's seek back to BEGIN log event and retry it all again. We have to not only seek but also a) init_master_info(), to seek back to hot relay log's start for later (for when we will come back to this hot log after re-processing the @@ -3044,12 +3373,13 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) else if (init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 1, &errmsg)) + 1, &errmsg, 1)) sql_print_error("Error initializing relay log position: %s", errmsg); else { exec_res= 0; + end_trans(thd, ROLLBACK); /* chance for concurrent connection to get more locks */ safe_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE), (CHECK_KILLED_FUNC)sql_slave_killed, (void*)rli); @@ -3067,9 +3397,17 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) "the slave_transaction_retries variable.", slave_trans_retries); } - if (!((thd->options & OPTION_BEGIN) && opt_using_transactions)) - rli->trans_retries= 0; // restart from fresh - } + else if (!((thd->options & OPTION_BEGIN) && opt_using_transactions)) + { + /* + Only reset the retry counter if the event succeeded or + failed with a non-transient error. On a successful event, + the execution will proceed as usual; in the case of a + non-transient error, the slave will stop with an error. + */ + rli->trans_retries= 0; // restart from fresh + } + } return exec_res; } else @@ -3091,7 +3429,7 @@ on this slave.\ /* Slave I/O Thread entry point */ -extern "C" pthread_handler_decl(handle_slave_io,arg) +pthread_handler_t handle_slave_io(void *arg) { THD *thd; // needs to be first for thread_stack MYSQL *mysql; @@ -3099,14 +3437,14 @@ extern "C" pthread_handler_decl(handle_slave_io,arg) RELAY_LOG_INFO *rli= &mi->rli; char llbuff[22]; uint retry_count; - + // needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff my_thread_init(); DBUG_ENTER("handle_slave_io"); #ifndef DBUG_OFF slave_begin: -#endif +#endif DBUG_ASSERT(mi->inited); mysql= NULL ; retry_count= 0; @@ -3115,14 +3453,15 @@ slave_begin: /* Inform waiting threads that slave has started */ mi->slave_run_id++; -#ifndef DBUG_OFF +#ifndef DBUG_OFF mi->events_till_abort = abort_slave_event_count; -#endif - +#endif + thd= new THD; // note that contructor of THD uses DBUG_ ! THD_CHECK_SENTRY(thd); pthread_detach_this_thread(); + thd->thread_stack= (char*) &thd; // remember where our stack is if (init_slave_thread(thd, SLAVE_THD_IO)) { pthread_cond_broadcast(&mi->start_cond); @@ -3131,7 +3470,6 @@ slave_begin: goto err; } mi->io_thd = thd; - thd->thread_stack = (char*)&thd; // remember where our stack is pthread_mutex_lock(&LOCK_thread_count); threads.append(thd); pthread_mutex_unlock(&LOCK_thread_count); @@ -3139,18 +3477,17 @@ slave_begin: mi->abort_slave = 0; pthread_mutex_unlock(&mi->run_lock); pthread_cond_broadcast(&mi->start_cond); - + DBUG_PRINT("master_info",("log_file_name: '%s' position: %s", mi->master_log_name, llstr(mi->master_log_pos,llbuff))); - + if (!(mi->mysql = mysql = mysql_init(NULL))) { sql_print_error("Slave I/O thread: error in mysql_init()"); goto err; } - thd->proc_info = "Connecting to master"; // we can get killed during safe_connect if (!safe_connect(thd, mysql, mi)) @@ -3169,7 +3506,7 @@ slave_begin: } else { - sql_print_error("Slave I/O thread killed while connecting to master"); + sql_print_information("Slave I/O thread killed while connecting to master"); goto err; } @@ -3181,7 +3518,8 @@ connected: thd->proc_info = "Checking master version"; if (get_master_version_and_clock(mysql, mi)) goto err; - if (!mi->old_format) + + if (mi->rli.relay_log.description_event_for_queue->binlog_version > 1) { /* Register ourselves with the master. @@ -3192,22 +3530,22 @@ connected: if (register_slave_on_master(mysql) || update_slave_list(mysql, mi)) goto err; } - + DBUG_PRINT("info",("Starting reading binary log from master")); while (!io_slave_killed(thd,mi)) { - bool suppress_warnings= 0; + bool suppress_warnings= 0; thd->proc_info = "Requesting binlog dump"; if (request_dump(mysql, mi, &suppress_warnings)) { sql_print_error("Failed on request_dump()"); if (io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed while requesting master \ + sql_print_information("Slave I/O thread killed while requesting master \ dump"); goto err; } - + mi->slave_running= MYSQL_SLAVE_RUN_NOT_CONNECT; thd->proc_info= "Waiting to reconnect after a failed binlog dump request"; #ifdef SIGNAL_WITH_VIO_CLOSE @@ -3228,7 +3566,7 @@ dump"); } if (io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed while retrying master \ + sql_print_information("Slave I/O thread killed while retrying master \ dump"); goto err; } @@ -3241,7 +3579,7 @@ reconnecting to try again, log '%s' at postion %s", IO_RPL_LOG_NAME, if (safe_reconnect(thd, mysql, mi, suppress_warnings) || io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed during or \ + sql_print_information("Slave I/O thread killed during or \ after reconnect"); goto err; } @@ -3251,26 +3589,28 @@ after reconnect"); while (!io_slave_killed(thd,mi)) { - bool suppress_warnings= 0; - /* + ulong event_len; + + suppress_warnings= 0; + /* We say "waiting" because read_event() will wait if there's nothing to - read. But if there's something to read, it will not wait. The important - thing is to not confuse users by saying "reading" whereas we're in fact - receiving nothing. + read. But if there's something to read, it will not wait. The + important thing is to not confuse users by saying "reading" whereas + we're in fact receiving nothing. */ - thd->proc_info = "Waiting for master to send event"; - ulong event_len = read_event(mysql, mi, &suppress_warnings); + thd->proc_info= "Waiting for master to send event"; + event_len= read_event(mysql, mi, &suppress_warnings); if (io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed while reading event"); + sql_print_information("Slave I/O thread killed while reading event"); goto err; } - + if (event_len == packet_error) { uint mysql_error_number= mysql_errno(mysql); - if (mysql_error_number == ER_NET_PACKET_TOO_LARGE) + if (mysql_error_number == CR_NET_PACKET_TOO_LARGE) { sql_print_error("\ Log entry on master is longer than max_allowed_packet (%ld) on \ @@ -3297,30 +3637,30 @@ max_allowed_packet", goto err; // Don't retry forever safe_sleep(thd,mi->connect_retry,(CHECK_KILLED_FUNC)io_slave_killed, (void*) mi); - } + } if (io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed while waiting to \ + sql_print_information("Slave I/O thread killed while waiting to \ reconnect after a failed read"); goto err; } thd->proc_info = "Reconnecting after a failed master event read"; if (!suppress_warnings) - sql_print_error("Slave I/O thread: Failed reading log event, \ + sql_print_information("Slave I/O thread: Failed reading log event, \ reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos, llbuff)); if (safe_reconnect(thd, mysql, mi, suppress_warnings) || io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed during or after a \ + sql_print_information("Slave I/O thread killed during or after a \ reconnect done to recover from failed read"); goto err; } goto connected; } // if (event_len == packet_error) - + retry_count=0; // ok event, reset retry counter thd->proc_info = "Queueing master event to the relay log"; if (queue_event(mi,(const char*)mysql->net.read_pos + 1, @@ -3329,7 +3669,11 @@ reconnect done to recover from failed read"); sql_print_error("Slave I/O thread could not queue event from master"); goto err; } - flush_master_info(mi, 1); /* sure that we can flush the relay log */ + if (flush_master_info(mi, 1)) + { + sql_print_error("Failed to flush master info file"); + goto err; + } /* See if the relay logs take too much space. We don't lock mi->rli.log_space_lock here; this dirty read saves time @@ -3376,25 +3720,38 @@ log space"); // error = 0; err: // print the current replication position - sql_print_error("Slave I/O thread exiting, read up to log '%s', position %s", + sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query = thd->db = 0; // extra safety - thd->query_length= thd->db_length= 0; + thd->query= 0; // extra safety + thd->query_length= 0; + thd->reset_db(NULL, 0); VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (mysql) { + /* + Here we need to clear the active VIO before closing the + connection with the master. The reason is that THD::awake() + might be called from terminate_slave_thread() because somebody + issued a STOP SLAVE. If that happends, the close_active_vio() + can be called in the middle of closing the VIO associated with + the 'mysql' object, causing a crash. + */ +#ifdef SIGNAL_WITH_VIO_CLOSE + thd->clear_active_vio(); +#endif mysql_close(mysql); mi->mysql=0; } write_ignored_events_info_to_relay_log(thd, mi); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&mi->run_lock); - mi->slave_running = 0; - mi->io_thd = 0; + + /* Forget the relay log's format */ + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= 0; // TODO: make rpl_status part of MASTER_INFO change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE); - mi->abort_slave = 0; // TODO: check if this is needed DBUG_ASSERT(thd->net.buff != 0); net_end(&thd->net); // destructor will not free it, because net.vio is 0 close_thread_tables(thd, 0); @@ -3402,12 +3759,20 @@ err: THD_CHECK_SENTRY(thd); delete thd; pthread_mutex_unlock(&LOCK_thread_count); - pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done + mi->abort_slave= 0; + mi->slave_running= 0; + mi->io_thd= 0; + /* + Note: the order of the two following calls (first broadcast, then unlock) + is important. Otherwise a killer_thread can execute between the calls and + delete the mi structure leading to a crash! (see BUG#25306 for details) + */ + pthread_cond_broadcast(&mi->stop_cond); // tell the world we are done pthread_mutex_unlock(&mi->run_lock); #ifndef DBUG_OFF if (abort_slave_event_count && !events_till_abort) goto slave_begin; -#endif +#endif my_thread_end(); pthread_exit(0); DBUG_RETURN(0); // Can't return anything here @@ -3416,11 +3781,11 @@ err: /* Slave SQL Thread entry point */ -extern "C" pthread_handler_decl(handle_slave_sql,arg) +pthread_handler_t handle_slave_sql(void *arg) { THD *thd; /* needs to be first for thread_stack */ char llbuff[22],llbuff1[22]; - RELAY_LOG_INFO* rli = &((MASTER_INFO*)arg)->rli; + RELAY_LOG_INFO* rli = &((MASTER_INFO*)arg)->rli; const char *errmsg; // needs to call my_thread_init(), otherwise we get a coredump in DBUG_ stuff @@ -3428,7 +3793,7 @@ extern "C" pthread_handler_decl(handle_slave_sql,arg) DBUG_ENTER("handle_slave_sql"); #ifndef DBUG_OFF -slave_begin: +slave_begin: #endif DBUG_ASSERT(rli->inited); @@ -3497,15 +3862,38 @@ slave_begin: if (init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 1 /*need data lock*/, &errmsg)) + 1 /*need data lock*/, &errmsg, + 1 /*look for a description_event*/)) { sql_print_error("Error initializing relay log position: %s", errmsg); goto err; } THD_CHECK_SENTRY(thd); - DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + /* + Wonder if this is correct. I (Guilhem) wonder if my_b_tell() returns the + correct position when it's called just after my_b_seek() (the questionable + stuff is those "seek is done on next read" comments in the my_b_seek() + source code). + The crude reality is that this assertion randomly fails whereas + replication seems to work fine. And there is no easy explanation why it + fails (as we my_b_seek(rli->event_relay_log_pos) at the very end of + init_relay_log_pos() called above). Maybe the assertion would be + meaningful if we held rli->data_lock between the my_b_seek() and the + DBUG_ASSERT(). + */ +#ifdef SHOULD_BE_CHECKED + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); +#endif + } +#endif DBUG_ASSERT(rli->sql_thd == thd); DBUG_PRINT("master_info",("log_file_name: %s position: %s", @@ -3549,13 +3937,21 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ } /* Thread stopped. Print the current replication position to the log */ - sql_print_information("Slave SQL thread exiting, replication stopped in log \ - '%s' at position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); + sql_print_information("Slave SQL thread exiting, replication stopped in log " + "'%s' at position %s", + RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); err: VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query = thd->db = 0; // extra safety - thd->query_length= thd->db_length= 0; + /* + Some extra safety, which should not been needed (normally, event deletion + should already have done these assignments (each event which sets these + variables is supposed to set them to 0 before terminating)). + */ + thd->catalog= 0; + thd->reset_db(NULL, 0); + thd->query= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&rli->run_lock); @@ -3564,16 +3960,16 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun /* When master_pos_wait() wakes up it will check this and terminate */ rli->slave_running= 0; - /* - Going out of the transaction. Necessary to mark it, in case the user - restarts replication from a non-transactional statement (with CHANGE - MASTER). - */ + /* Forget the relay log's format */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= 0; /* Wake up master_pos_wait() */ pthread_mutex_unlock(&rli->data_lock); DBUG_PRINT("info",("Signaling possibly waiting master_pos_wait() functions")); pthread_cond_broadcast(&rli->data_cond); rli->ignore_log_space_limit= 0; /* don't need any lock */ + /* we die so won't remember charset - re-update them on next thread start */ + rli->cached_charset_invalidate(); rli->save_temporary_tables = thd->temporary_tables; /* @@ -3590,11 +3986,24 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ THD_CHECK_SENTRY(thd); delete thd; pthread_mutex_unlock(&LOCK_thread_count); + + /* + Note: the order of the broadcast and unlock calls below (first broadcast, then unlock) + is important. Otherwise a killer_thread can execute between the calls and + delete the mi structure leading to a crash! (see BUG#25306 for details) + */ pthread_cond_broadcast(&rli->stop_cond); - // tell the world we are done - pthread_mutex_unlock(&rli->run_lock); +#ifndef DBUG_OFF + /* + Bug #19938 Valgrind error (race) in handle_slave_sql() + Read the value of rli->event_till_abort before releasing the mutex + */ + const int eta= rli->events_till_abort; +#endif + pthread_mutex_unlock(&rli->run_lock); // tell the world we are done + #ifndef DBUG_OFF // TODO: reconsider the code below - if (abort_slave_event_count && !rli->events_till_abort) + if (abort_slave_event_count && !eta) goto slave_begin; #endif my_thread_end(); @@ -3666,7 +4075,7 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) if (unlikely(cev_not_written)) break; Execute_load_log_event xev(thd,0,0); - xev.log_pos = mi->master_log_pos; + xev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&xev))) { sql_print_error("Slave I/O: error writing Exec_load event to \ @@ -3680,7 +4089,6 @@ relay log"); { cev->block = (char*)net->read_pos; cev->block_len = num_bytes; - cev->log_pos = mi->master_log_pos; if (unlikely(mi->rli.relay_log.append(cev))) { sql_print_error("Slave I/O: error writing Create_file event to \ @@ -3694,7 +4102,7 @@ relay log"); { aev.block = (char*)net->read_pos; aev.block_len = num_bytes; - aev.log_pos = mi->master_log_pos; + aev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&aev))) { sql_print_error("Slave I/O: error writing Append_block event to \ @@ -3722,6 +4130,7 @@ err: DESCRIPTION Updates the master info with the place in the next binary log where we should start reading. + Rotate the relay log to avoid mixed-format relay logs. NOTES We assume we already locked mi->data_lock @@ -3743,7 +4152,7 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) /* Safe copy as 'rev' has been "sanitized" in Rotate_log_event's ctor */ memcpy(mi->master_log_name, rev->new_log_ident, rev->ident_len+1); mi->master_log_pos= rev->pos; - DBUG_PRINT("info", ("master_log_pos: '%s' %d", + DBUG_PRINT("info", ("master_log_pos: '%s' %lu", mi->master_log_name, (ulong) mi->master_log_pos)); #ifndef DBUG_OFF /* @@ -3753,21 +4162,34 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) if (disconnect_slave_event_count) events_till_disconnect++; #endif + + /* + If description_event_for_queue is format <4, there is conversion in the + relay log to the slave's format (4). And Rotate can mean upgrade or + nothing. If upgrade, it's to 5.0 or newer, so we will get a Format_desc, so + no need to reset description_event_for_queue now. And if it's nothing (same + master version as before), no need (still using the slave's format). + */ + if (mi->rli.relay_log.description_event_for_queue->binlog_version >= 4) + { + delete mi->rli.relay_log.description_event_for_queue; + /* start from format 3 (MySQL 4.0) again */ + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(3); + } + /* + Rotate the relay log makes binlog format detection easier (at next slave + start or mysqlbinlog) + */ + rotate_relay_log(mi); /* will take the right mutexes */ DBUG_RETURN(0); } - /* - queue_old_event() - - Writes a 3.23 event to the relay log. - - TODO: - Test this code before release - it has to be tested on a separate - setup with 3.23 master + Reads a 3.23 event and converts it to the slave's format. This code was + copied from MySQL 4.0. */ - -static int queue_old_event(MASTER_INFO *mi, const char *buf, +static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, ulong event_len) { const char *errmsg = 0; @@ -3775,7 +4197,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, bool ignore_event= 0; char *tmp_buf = 0; RELAY_LOG_INFO *rli= &mi->rli; - DBUG_ENTER("queue_old_event"); + DBUG_ENTER("queue_binlog_ver_1_event"); /* If we get Load event, we need to pass a non-reusable buffer @@ -3807,7 +4229,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, connected to the master). */ Log_event *ev = Log_event::read_log_event(buf,event_len, &errmsg, - 1 /*old format*/ ); + mi->rli.relay_log.description_event_for_queue); if (unlikely(!ev)) { sql_print_error("Read invalid event from master: '%s',\ @@ -3817,7 +4239,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(1); } pthread_mutex_lock(&mi->data_lock); - ev->log_pos = mi->master_log_pos; + ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */ switch (ev->get_type_code()) { case STOP_EVENT: ignore_event= 1; @@ -3841,15 +4263,13 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, */ { /* We come here when and only when tmp_buf != 0 */ - DBUG_ASSERT(tmp_buf); + DBUG_ASSERT(tmp_buf != 0); + inc_pos=event_len; + ev->log_pos+= inc_pos; int error = process_io_create_file(mi,(Create_file_log_event*)ev); delete ev; - /* - We had incremented event_len, but now when it is used to calculate the - position in the master's log, we must use the original value. - */ - mi->master_log_pos += --event_len; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + mi->master_log_pos += inc_pos; + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); my_free((char*)tmp_buf, MYF(0)); DBUG_RETURN(error); @@ -3860,6 +4280,12 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, } if (likely(!ignore_event)) { + if (ev->log_pos) + /* + Don't do it for fake Rotate events (see comment in + Log_event::Log_event(const char* buf...) in log_event.cc). + */ + ev->log_pos+= event_len; /* make log_pos be the pos of the end of the event */ if (unlikely(rli->relay_log.append(ev))) { delete ev; @@ -3870,15 +4296,103 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, } delete ev; mi->master_log_pos+= inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); DBUG_RETURN(0); } +/* + Reads a 4.0 event and converts it to the slave's format. This code was copied + from queue_binlog_ver_1_event(), with some affordable simplifications. +*/ +static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf, + ulong event_len) +{ + const char *errmsg = 0; + ulong inc_pos; + char *tmp_buf = 0; + RELAY_LOG_INFO *rli= &mi->rli; + DBUG_ENTER("queue_binlog_ver_3_event"); + + /* read_log_event() will adjust log_pos to be end_log_pos */ + Log_event *ev = Log_event::read_log_event(buf,event_len, &errmsg, + mi->rli.relay_log.description_event_for_queue); + if (unlikely(!ev)) + { + sql_print_error("Read invalid event from master: '%s',\ + master could be corrupt but a more likely cause of this is a bug", + errmsg); + my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(1); + } + pthread_mutex_lock(&mi->data_lock); + switch (ev->get_type_code()) { + case STOP_EVENT: + goto err; + case ROTATE_EVENT: + if (unlikely(process_io_rotate(mi,(Rotate_log_event*)ev))) + { + delete ev; + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(1); + } + inc_pos= 0; + break; + default: + inc_pos= event_len; + break; + } + if (unlikely(rli->relay_log.append(ev))) + { + delete ev; + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(1); + } + rli->relay_log.harvest_bytes_written(&rli->log_space_total); + delete ev; + mi->master_log_pos+= inc_pos; +err: + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(0); +} + +/* + queue_old_event() + + Writes a 3.23 or 4.0 event to the relay log, after converting it to the 5.0 + (exactly, slave's) format. To do the conversion, we create a 5.0 event from + the 3.23/4.0 bytes, then write this event to the relay log. + + TODO: + Test this code before release - it has to be tested on a separate + setup with 3.23 master or 4.0 master +*/ + +static int queue_old_event(MASTER_INFO *mi, const char *buf, + ulong event_len) +{ + switch (mi->rli.relay_log.description_event_for_queue->binlog_version) + { + case 1: + return queue_binlog_ver_1_event(mi,buf,event_len); + case 3: + return queue_binlog_ver_3_event(mi,buf,event_len); + default: /* unsupported format; eg version 2 */ + DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()", + mi->rli.relay_log.description_event_for_queue->binlog_version)); + return 1; + } +} /* queue_event() + If the event is 3.23/4.0, passes it to queue_old_event() which will convert + it. Otherwise, writes a 5.0 (or newer) event to the relay log. Then there is + no format conversion, it's pure read/write of bytes. + So a 5.0.0 slave's relay log can contain events in the slave's format or in + any >=5.0.0 format. */ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) @@ -3889,7 +4403,10 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) pthread_mutex_t *log_lock= rli->relay_log.get_log_lock(); DBUG_ENTER("queue_event"); - if (mi->old_format) + LINT_INIT(inc_pos); + + if (mi->rli.relay_log.description_event_for_queue->binlog_version<4 && + buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */) DBUG_RETURN(queue_old_event(mi,buf,event_len)); pthread_mutex_lock(&mi->data_lock); @@ -3901,7 +4418,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) master server shutdown. The only thing this does is cleaning. But cleaning is already done on a per-master-thread basis (as the master server is shutting down cleanly, it has written all DROP TEMPORARY TABLE - and DO RELEASE_LOCK; prepared statements' deletion are TODO). + prepared statements' deletion are TODO only when we binlog prep stmts). We don't even increment mi->master_log_pos, because we may be just after a Rotate event. Btw, in a few milliseconds we are going to have a Start @@ -3911,7 +4428,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) goto err; case ROTATE_EVENT: { - Rotate_log_event rev(buf,event_len,0); + Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue); if (unlikely(process_io_rotate(mi,&rev))) { error= 1; @@ -3924,6 +4441,42 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) inc_pos= 0; break; } + case FORMAT_DESCRIPTION_EVENT: + { + /* + Create an event, and save it (when we rotate the relay log, we will have + to write this event again). + */ + /* + We are the only thread which reads/writes description_event_for_queue. + The relay_log struct does not move (though some members of it can + change), so we needn't any lock (no rli->data_lock, no log lock). + */ + Format_description_log_event* tmp; + const char* errmsg; + if (!(tmp= (Format_description_log_event*) + Log_event::read_log_event(buf, event_len, &errmsg, + mi->rli.relay_log.description_event_for_queue))) + { + error= 2; + goto err; + } + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= tmp; + /* + Though this does some conversion to the slave's format, this will + preserve the master's binlog format version, and number of event types. + */ + /* + If the event was not requested by the slave (the slave did not ask for + it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos + */ + inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0; + DBUG_PRINT("info",("binlog format is now %d", + mi->rli.relay_log.description_event_for_queue->binlog_version)); + + } + break; default: inc_pos= event_len; break; @@ -3957,29 +4510,42 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) the slave SQL thread, otherwise that thread may let rli->group_relay_log_pos stay too small if the last binlog's event is ignored. + But events which were generated by this slave and which do not exist in + the master's binlog (i.e. Format_desc, Rotate & Stop) should not increment + mi->master_log_pos. */ - mi->master_log_pos+= inc_pos; - memcpy(rli->ign_master_log_name_end, mi->master_log_name, FN_REFLEN); - DBUG_ASSERT(rli->ign_master_log_name_end[0]); - rli->ign_master_log_pos_end= mi->master_log_pos; + if (buf[EVENT_TYPE_OFFSET]!=FORMAT_DESCRIPTION_EVENT && + buf[EVENT_TYPE_OFFSET]!=ROTATE_EVENT && + buf[EVENT_TYPE_OFFSET]!=STOP_EVENT) + { + mi->master_log_pos+= inc_pos; + memcpy(rli->ign_master_log_name_end, mi->master_log_name, FN_REFLEN); + DBUG_ASSERT(rli->ign_master_log_name_end[0]); + rli->ign_master_log_pos_end= mi->master_log_pos; + } rli->relay_log.signal_update(); // the slave SQL thread needs to re-check - DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu event originating from the same server, ignored", + (ulong) mi->master_log_pos)); } else { /* write the event to the relay log */ - if (likely(!(error= rli->relay_log.appendv(buf,event_len,0)))) + if (likely(!(rli->relay_log.appendv(buf,event_len,0)))) { mi->master_log_pos+= inc_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); rli->relay_log.harvest_bytes_written(&rli->log_space_total); } + else + error= 3; rli->ign_master_log_name_end[0]= 0; // last event is not ignored } pthread_mutex_unlock(log_lock); + err: pthread_mutex_unlock(&mi->data_lock); + DBUG_PRINT("info", ("error: %d", error)); DBUG_RETURN(error); } @@ -4004,6 +4570,7 @@ void end_relay_log_info(RELAY_LOG_INFO* rli) } rli->inited = 0; rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT); + rli->relay_log.harvest_bytes_written(&rli->log_space_total); /* Delete the slave's temporary tables from memory. In the future there will be other actions than this, to ensure persistance @@ -4132,6 +4699,7 @@ replication resumed in log '%s' at position %s", mi->user, thd->set_active_vio(mysql->net.vio); #endif } + mysql->reconnect= 1; DBUG_PRINT("exit",("slave_was_killed: %d", slave_was_killed)); DBUG_RETURN(slave_was_killed); } @@ -4225,6 +4793,7 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg) relay_log_pos Current log pos pending Number of bytes already processed from the event */ + rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE); my_b_seek(cur_log,rli->event_relay_log_pos); DBUG_RETURN(cur_log); } @@ -4292,26 +4861,37 @@ Log_event* next_event(RELAY_LOG_INFO* rli) goto err; #ifndef DBUG_OFF { + /* This is an assertion which sometimes fails, let's try to track it */ char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); DBUG_ASSERT(my_b_tell(cur_log) >= BIN_LOG_HEADER_SIZE); - /* - The next assertion sometimes (very rarely) fails, let's try to track - it - */ - DBUG_PRINT("info", ("\ -Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", - llstr(my_b_tell(cur_log),llbuf1), - llstr(rli->group_relay_log_pos,llbuf2))); - DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); + DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); } #endif /* Relay log is always in new format - if the master is 3.23, the - I/O thread will convert the format for us + I/O thread will convert the format for us. + A problem: the description event may be in a previous relay log. So if + the slave has been shutdown meanwhile, we would have to look in old relay + logs, which may even have been deleted. So we need to write this + description event at the beginning of the relay log. + When the relay log is created when the I/O thread starts, easy: the + master will send the description event and we will queue it. + But if the relay log is created by new_file(): then the solution is: + MYSQL_LOG::open() will write the buffered description event. */ - if ((ev=Log_event::read_log_event(cur_log,0,(bool)0 /* new format */))) + if ((ev=Log_event::read_log_event(cur_log,0, + rli->relay_log.description_event_for_exec))) + { DBUG_ASSERT(thd==rli->sql_thd); + /* + read it while we have a lock, to avoid a mutex lock in + inc_event_relay_log_pos() + */ + rli->future_event_relay_log_pos= my_b_tell(cur_log); if (hot_log) pthread_mutex_unlock(log_lock); DBUG_RETURN(ev); @@ -4359,8 +4939,7 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", DBUG_PRINT("info",("seeing an ignored end segment")); ev= new Rotate_log_event(thd, rli->ign_master_log_name_end, 0, rli->ign_master_log_pos_end, - Rotate_log_event::DUP_NAME | - Rotate_log_event::ZERO_LEN); + Rotate_log_event::DUP_NAME); rli->ign_master_log_name_end[0]= 0; pthread_mutex_unlock(log_lock); if (unlikely(!ev)) @@ -4490,8 +5069,8 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", { #ifdef EXTRA_DEBUG if (global_system_variables.log_warnings) - sql_print_error("next log '%s' is currently active", - rli->linfo.log_file_name); + sql_print_information("next log '%s' is currently active", + rli->linfo.log_file_name); #endif rli->cur_log= cur_log= rli->relay_log.get_log_file(); rli->cur_log_old_open_count= rli->relay_log.get_open_count(); @@ -4520,8 +5099,8 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", */ #ifdef EXTRA_DEBUG if (global_system_variables.log_warnings) - sql_print_error("next log '%s' is not active", - rli->linfo.log_file_name); + sql_print_information("next log '%s' is not active", + rli->linfo.log_file_name); #endif // open_binlog() will check the magic header if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, @@ -4547,7 +5126,11 @@ event(errno: %d cur_log->error: %d)", } } if (!errmsg && global_system_variables.log_warnings) - errmsg = "slave SQL thread was killed"; + { + sql_print_information("Error reading relay log event: %s", + "slave SQL thread was killed"); + DBUG_RETURN(0); + } err: if (errmsg) @@ -4567,9 +5150,9 @@ void rotate_relay_log(MASTER_INFO* mi) DBUG_ENTER("rotate_relay_log"); RELAY_LOG_INFO* rli= &mi->rli; - lock_slave_threads(mi); - pthread_mutex_lock(&mi->data_lock); - pthread_mutex_lock(&rli->data_lock); + /* We don't lock rli->run_lock. This would lead to deadlocks. */ + pthread_mutex_lock(&mi->run_lock); + /* We need to test inited because otherwise, new_file() will attempt to lock LOCK_log, which may not be inited (if we're not a slave). @@ -4598,14 +5181,76 @@ void rotate_relay_log(MASTER_INFO* mi) */ rli->relay_log.harvest_bytes_written(&rli->log_space_total); end: - pthread_mutex_unlock(&rli->data_lock); - pthread_mutex_unlock(&mi->data_lock); - unlock_slave_threads(mi); + pthread_mutex_unlock(&mi->run_lock); DBUG_VOID_RETURN; } -#ifdef __GNUC__ +/** + Detects, based on master's version (as found in the relay log), if master + has a certain bug. + @param rli RELAY_LOG_INFO which tells the master's version + @param bug_id Number of the bug as found in bugs.mysql.com + @return TRUE if master has the bug, FALSE if it does not. +*/ +bool rpl_master_has_bug(RELAY_LOG_INFO *rli, uint bug_id) +{ + struct st_version_range_for_one_bug { + uint bug_id; + const uchar introduced_in[3]; // first version with bug + const uchar fixed_in[3]; // first version with fix + }; + static struct st_version_range_for_one_bug versions_for_all_bugs[]= + { + {24432, { 5, 0, 24 }, { 5, 0, 38 } }, + {24432, { 5, 1, 12 }, { 5, 1, 17 } } + }; + const uchar *master_ver= + rli->relay_log.description_event_for_exec->server_version_split; + + DBUG_ASSERT(sizeof(rli->relay_log.description_event_for_exec->server_version_split) == 3); + + for (uint i= 0; + i < sizeof(versions_for_all_bugs)/sizeof(*versions_for_all_bugs);i++) + { + const uchar *introduced_in= versions_for_all_bugs[i].introduced_in, + *fixed_in= versions_for_all_bugs[i].fixed_in; + if ((versions_for_all_bugs[i].bug_id == bug_id) && + (memcmp(introduced_in, master_ver, 3) <= 0) && + (memcmp(fixed_in, master_ver, 3) > 0)) + { + // a verbose message for the error log + slave_print_error(rli, ER_UNKNOWN_ERROR, + "According to the master's version ('%s')," + " it is probable that master suffers from this bug:" + " http://bugs.mysql.com/bug.php?id=%u" + " and thus replicating the current binary log event" + " may make the slave's data become different from the" + " master's data." + " To take no risk, slave refuses to replicate" + " this event and stops." + " We recommend that all updates be stopped on the" + " master and slave, that the data of both be" + " manually synchronized," + " that master's binary logs be deleted," + " that master be upgraded to a version at least" + " equal to '%d.%d.%d'. Then replication can be" + " restarted.", + rli->relay_log.description_event_for_exec->server_version, + bug_id, + fixed_in[0], fixed_in[1], fixed_in[2]); + // a short message for SHOW SLAVE STATUS (message length constraints) + my_printf_error(ER_UNKNOWN_ERROR, "master may suffer from" + " http://bugs.mysql.com/bug.php?id=%u" + " so slave stops; check error log on slave" + " for more info", MYF(0), bug_id); + return TRUE; + } + } + return FALSE; +} + +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class I_List_iterator<i_string>; template class I_List_iterator<i_string_pair>; #endif diff --git a/sql/slave.h b/sql/slave.h index dccfcf01a8f..e7d4456ccd9 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -1,15 +1,14 @@ /* Copyright (C) 2000-2003 MySQL AB - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - + the Free Software Foundation; version 2 of the License. + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -93,11 +92,6 @@ extern my_bool opt_log_slave_updates; extern ulonglong relay_log_space_limit; struct st_master_info; -enum enum_binlog_formats { - BINLOG_FORMAT_CURRENT=0, /* 0 is important for easy 'if (mi->old_format)' */ - BINLOG_FORMAT_323_LESS_57, - BINLOG_FORMAT_323_GEQ_57 }; - /* 3 possible values for MASTER_INFO::slave_running and RELAY_LOG_INFO::slave_running. @@ -214,6 +208,8 @@ typedef struct st_relay_log_info ulonglong group_relay_log_pos; char event_relay_log_name[FN_REFLEN]; ulonglong event_relay_log_pos; + ulonglong future_event_relay_log_pos; + /* Original log name and position of the group we're currently executing (whose coordinates are group_relay_log_name/pos in the relay log) @@ -295,6 +291,7 @@ typedef struct st_relay_log_info UNTIL_LOG_NAMES_CMP_EQUAL= 0, UNTIL_LOG_NAMES_CMP_GREATER= 1 } until_log_names_cmp_result; + char cached_charset[6]; /* trans_retries varies between 0 to slave_transaction_retries and counts how many times the slave has retried the present transaction; gets reset to 0 @@ -338,12 +335,14 @@ typedef struct st_relay_log_info until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN; } - inline void inc_event_relay_log_pos(ulonglong val) + inline void inc_event_relay_log_pos() { - event_relay_log_pos+= val; + event_relay_log_pos= future_event_relay_log_pos; } - void inc_group_relay_log_pos(ulonglong val, ulonglong log_pos, bool skip_lock=0); + void inc_group_relay_log_pos(ulonglong log_pos, + bool skip_lock=0); + int wait_for_pos(THD* thd, String* log_name, longlong log_pos, longlong timeout); void close_temporary_tables(); @@ -355,6 +354,14 @@ typedef struct st_relay_log_info return ((until_condition == UNTIL_MASTER_POS) ? group_master_log_pos : group_relay_log_pos); } + /* + Last charset (6 bytes) seen by slave SQL thread is cached here; it helps + the thread save 3 get_charset() per Query_log_event if the charset is not + changing from event to event (common situation). + When the 6 bytes are equal to 0 is used to mean "cache is invalidated". + */ + void cached_charset_invalidate(); + bool cached_charset_compare(char *charset); } RELAY_LOG_INFO; @@ -403,11 +410,11 @@ typedef struct st_master_info my_bool ssl; // enables use of SSL connection if true char ssl_ca[FN_REFLEN], ssl_capath[FN_REFLEN], ssl_cert[FN_REFLEN]; char ssl_cipher[FN_REFLEN], ssl_key[FN_REFLEN]; - + my_off_t master_log_pos; File fd; // we keep the file open, so we need to remember the file pointer IO_CACHE file; - + pthread_mutex_t data_lock,run_lock; pthread_cond_t data_cond,start_cond,stop_cond; THD *io_thd; @@ -420,11 +427,10 @@ typedef struct st_master_info int events_till_abort; #endif bool inited; - enum enum_binlog_formats old_format; volatile bool abort_slave; volatile uint slave_running; volatile ulong slave_run_id; - /* + /* The difference in seconds between the clock of the master and the clock of the slave (second - first). It must be signed as it may be <0 or >0. clock_diff_with_master is computed when the I/O thread starts; for this the @@ -433,16 +439,16 @@ typedef struct st_master_info clock_of_slave - last_timestamp_executed_by_SQL_thread - clock_diff_with_master */ - long clock_diff_with_master; - + long clock_diff_with_master; + st_master_info() - :ssl(0), fd(-1), io_thd(0), inited(0), old_format(BINLOG_FORMAT_CURRENT), + :ssl(0), fd(-1), io_thd(0), inited(0), abort_slave(0),slave_running(0), slave_run_id(0) { host[0] = 0; user[0] = 0; password[0] = 0; ssl_ca[0]= 0; ssl_capath[0]= 0; ssl_cert[0]= 0; ssl_cipher[0]= 0; ssl_key[0]= 0; - + bzero((char*) &file, sizeof(file)); pthread_mutex_init(&run_lock, MY_MUTEX_INIT_FAST); pthread_mutex_init(&data_lock, MY_MUTEX_INIT_FAST); @@ -493,7 +499,7 @@ typedef struct st_table_rule_ent int init_slave(); void init_slave_skip_errors(const char* arg); -bool flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache); +int flush_master_info(MASTER_INFO* mi, bool flush_relay_log_cache); bool flush_relay_log_info(RELAY_LOG_INFO* rli); int register_slave_on_master(MYSQL* mysql); int terminate_slave_threads(MASTER_INFO* mi, int thread_mask, @@ -519,18 +525,15 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock, MASTER_INFO* mi, bool high_priority); -/* If fd is -1, dump to NET */ -int mysql_table_dump(THD* thd, const char* db, - const char* tbl_name, int fd = -1); - /* retrieve table from master and copy to slave*/ int fetch_master_table(THD* thd, const char* db_name, const char* table_name, MASTER_INFO* mi, MYSQL* mysql, bool overwrite); void table_rule_ent_hash_to_str(String* s, HASH* h); void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a); -int show_master_info(THD* thd, MASTER_INFO* mi); -int show_binlog_info(THD* thd); +bool show_master_info(THD* thd, MASTER_INFO* mi); +bool show_binlog_info(THD* thd); +bool rpl_master_has_bug(RELAY_LOG_INFO *rli, uint bug_id); /* See if the query uses any tables that should not be replicated */ bool tables_ok(THD* thd, TABLE_LIST* tables); @@ -547,7 +550,6 @@ int add_table_rule(HASH* h, const char* table_spec); int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec); void init_table_rule_hash(HASH* h, bool* h_inited); void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited); -const char *rewrite_db(const char* db, uint32 *new_db_len); const char *print_slave_db_safe(const char *db); int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); void skip_load_data_infile(NET* net); @@ -563,20 +565,22 @@ int init_master_info(MASTER_INFO* mi, const char* master_info_fname, bool abort_if_no_master_info_file, int thread_mask); void end_master_info(MASTER_INFO* mi); -int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname); void end_relay_log_info(RELAY_LOG_INFO* rli); void lock_slave_threads(MASTER_INFO* mi); void unlock_slave_threads(MASTER_INFO* mi); void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse); int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,ulonglong pos, - bool need_data_lock, const char** errmsg); + bool need_data_lock, const char** errmsg, + bool look_for_description_event); int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, const char** errmsg); +void set_slave_thread_options(THD* thd); +void set_slave_thread_default_charset(THD* thd, RELAY_LOG_INFO *rli); void rotate_relay_log(MASTER_INFO* mi); -extern "C" pthread_handler_decl(handle_slave_io,arg); -extern "C" pthread_handler_decl(handle_slave_sql,arg); +pthread_handler_t handle_slave_io(void *arg); +pthread_handler_t handle_slave_sql(void *arg); extern bool volatile abort_loop; extern MASTER_INFO main_mi, *active_mi; /* active_mi for multi-master */ extern LIST master_list; @@ -584,7 +588,8 @@ extern HASH replicate_do_table, replicate_ignore_table; extern DYNAMIC_ARRAY replicate_wild_do_table, replicate_wild_ignore_table; extern bool do_table_inited, ignore_table_inited, wild_do_table_inited, wild_ignore_table_inited; -extern bool table_rules_on, replicate_same_server_id; +extern bool table_rules_on; +extern my_bool replicate_same_server_id; extern int disconnect_slave_event_count, abort_slave_event_count ; diff --git a/sql/sp.cc b/sql/sp.cc new file mode 100644 index 00000000000..2bb13b02e14 --- /dev/null +++ b/sql/sp.cc @@ -0,0 +1,1904 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +#include "mysql_priv.h" +#include "sp.h" +#include "sp_head.h" +#include "sp_cache.h" +#include "sql_trigger.h" + +#include <my_user.h> + +static bool +create_string(THD *thd, String *buf, + int sp_type, + sp_name *name, + const char *params, ulong paramslen, + const char *returns, ulong returnslen, + const char *body, ulong bodylen, + st_sp_chistics *chistics, + const LEX_STRING *definer_user, + const LEX_STRING *definer_host); +static int +db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, + ulong sql_mode, const char *params, const char *returns, + const char *body, st_sp_chistics &chistics, + const char *definer, longlong created, longlong modified); + +/* + * + * DB storage of Stored PROCEDUREs and FUNCTIONs + * + */ + +enum +{ + MYSQL_PROC_FIELD_DB = 0, + MYSQL_PROC_FIELD_NAME, + MYSQL_PROC_FIELD_TYPE, + MYSQL_PROC_FIELD_SPECIFIC_NAME, + MYSQL_PROC_FIELD_LANGUAGE, + MYSQL_PROC_FIELD_ACCESS, + MYSQL_PROC_FIELD_DETERMINISTIC, + MYSQL_PROC_FIELD_SECURITY_TYPE, + MYSQL_PROC_FIELD_PARAM_LIST, + MYSQL_PROC_FIELD_RETURNS, + MYSQL_PROC_FIELD_BODY, + MYSQL_PROC_FIELD_DEFINER, + MYSQL_PROC_FIELD_CREATED, + MYSQL_PROC_FIELD_MODIFIED, + MYSQL_PROC_FIELD_SQL_MODE, + MYSQL_PROC_FIELD_COMMENT, + MYSQL_PROC_FIELD_COUNT +}; + +/* Tells what SP_DEFAULT_ACCESS should be mapped to */ +#define SP_DEFAULT_ACCESS_MAPPING SP_CONTAINS_SQL + + +/* + Close mysql.proc, opened with open_proc_table_for_read(). + + SYNOPSIS + close_proc_table() + thd Thread context + backup Pointer to Open_tables_state instance which holds + information about tables which were open before we + decided to access mysql.proc. +*/ + +void close_proc_table(THD *thd, Open_tables_state *backup) +{ + close_thread_tables(thd); + thd->restore_backup_open_tables_state(backup); +} + + +/* + Open the mysql.proc table for read. + + SYNOPSIS + open_proc_table_for_read() + thd Thread context + backup Pointer to Open_tables_state instance where information about + currently open tables will be saved, and from which will be + restored when we will end work with mysql.proc. + + NOTES + Thanks to restrictions which we put on opening and locking of + this table for writing, we can open and lock it for reading + even when we already have some other tables open and locked. + One must call close_proc_table() to close table opened with + this call. + + RETURN + 0 Error + # Pointer to TABLE object of mysql.proc +*/ + +TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup) +{ + TABLE_LIST tables; + TABLE *table; + bool not_used; + DBUG_ENTER("open_proc_table"); + + thd->reset_n_backup_open_tables_state(backup); + + bzero((char*) &tables, sizeof(tables)); + tables.db= (char*) "mysql"; + tables.table_name= tables.alias= (char*)"proc"; + if (!(table= open_table(thd, &tables, thd->mem_root, ¬_used, + MYSQL_LOCK_IGNORE_FLUSH))) + { + thd->restore_backup_open_tables_state(backup); + DBUG_RETURN(0); + } + + DBUG_ASSERT(table->s->system_table); + + table->reginfo.lock_type= TL_READ; + /* + We have to ensure we are not blocked by a flush tables, as this + could lead to a deadlock if we have other tables opened. + */ + if (!(thd->lock= mysql_lock_tables(thd, &table, 1, + MYSQL_LOCK_IGNORE_FLUSH, ¬_used))) + { + close_proc_table(thd, backup); + DBUG_RETURN(0); + } + DBUG_RETURN(table); +} + + +/* + Open the mysql.proc table for update. + + SYNOPSIS + open_proc_table_for_update() + thd Thread context + + NOTES + Table opened with this call should closed using close_thread_tables(). + + RETURN + 0 Error + # Pointer to TABLE object of mysql.proc +*/ + +static TABLE *open_proc_table_for_update(THD *thd) +{ + TABLE_LIST tables; + TABLE *table; + DBUG_ENTER("open_proc_table"); + + bzero((char*) &tables, sizeof(tables)); + tables.db= (char*) "mysql"; + tables.table_name= tables.alias= (char*)"proc"; + tables.lock_type= TL_WRITE; + + table= open_ltable(thd, &tables, TL_WRITE); + + DBUG_RETURN(table); +} + + +/* + Find row in open mysql.proc table representing stored routine. + + SYNOPSIS + db_find_routine_aux() + thd Thread context + type Type of routine to find (function or procedure) + name Name of routine + table TABLE object for open mysql.proc table. + + RETURN VALUE + SP_OK - Routine found + SP_KEY_NOT_FOUND- No routine with given name +*/ + +static int +db_find_routine_aux(THD *thd, int type, sp_name *name, TABLE *table) +{ + byte key[MAX_KEY_LENGTH]; // db, name, optional key length type + DBUG_ENTER("db_find_routine_aux"); + DBUG_PRINT("enter", ("type: %d name: %.*s", + type, name->m_name.length, name->m_name.str)); + + /* + Create key to find row. We have to use field->store() to be able to + handle VARCHAR and CHAR fields. + Assumption here is that the three first fields in the table are + 'db', 'name' and 'type' and the first key is the primary key over the + same fields. + */ + if (name->m_name.length > table->field[1]->field_length) + DBUG_RETURN(SP_KEY_NOT_FOUND); + table->field[0]->store(name->m_db.str, name->m_db.length, &my_charset_bin); + table->field[1]->store(name->m_name.str, name->m_name.length, + &my_charset_bin); + table->field[2]->store((longlong) type, TRUE); + key_copy(key, table->record[0], table->key_info, + table->key_info->key_length); + + if (table->file->index_read_idx(table->record[0], 0, + key, table->key_info->key_length, + HA_READ_KEY_EXACT)) + DBUG_RETURN(SP_KEY_NOT_FOUND); + + DBUG_RETURN(SP_OK); +} + + +/* + Find routine definition in mysql.proc table and create corresponding + sp_head object for it. + + SYNOPSIS + db_find_routine() + thd Thread context + type Type of routine (TYPE_ENUM_PROCEDURE/...) + name Name of routine + sphp Out parameter in which pointer to created sp_head + object is returned (0 in case of error). + + NOTE + This function may damage current LEX during execution, so it is good + idea to create temporary LEX and make it active before calling it. + + RETURN VALUE + 0 - Success + non-0 - Error (may be one of special codes like SP_KEY_NOT_FOUND) +*/ + +static int +db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) +{ + TABLE *table; + const char *params, *returns, *body; + int ret; + const char *definer; + longlong created; + longlong modified; + st_sp_chistics chistics; + char *ptr; + uint length; + char buff[65]; + String str(buff, sizeof(buff), &my_charset_bin); + ulong sql_mode; + Open_tables_state open_tables_state_backup; + DBUG_ENTER("db_find_routine"); + DBUG_PRINT("enter", ("type: %d name: %.*s", + type, name->m_name.length, name->m_name.str)); + + *sphp= 0; // In case of errors + if (!(table= open_proc_table_for_read(thd, &open_tables_state_backup))) + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + + if ((ret= db_find_routine_aux(thd, type, name, table)) != SP_OK) + goto done; + + if (table->s->fields != MYSQL_PROC_FIELD_COUNT) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + bzero((char *)&chistics, sizeof(chistics)); + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_ACCESS])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + switch (ptr[0]) { + case 'N': + chistics.daccess= SP_NO_SQL; + break; + case 'C': + chistics.daccess= SP_CONTAINS_SQL; + break; + case 'R': + chistics.daccess= SP_READS_SQL_DATA; + break; + case 'M': + chistics.daccess= SP_MODIFIES_SQL_DATA; + break; + default: + chistics.daccess= SP_DEFAULT_ACCESS_MAPPING; + } + + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_DETERMINISTIC])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + chistics.detistic= (ptr[0] == 'N' ? FALSE : TRUE); + + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + chistics.suid= (ptr[0] == 'I' ? SP_IS_NOT_SUID : SP_IS_SUID); + + if ((params= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_PARAM_LIST])) == NULL) + { + params= ""; + } + + if (type == TYPE_ENUM_PROCEDURE) + returns= ""; + else if ((returns= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_RETURNS])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + if ((body= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_BODY])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + // Get additional information + if ((definer= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_DEFINER])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + modified= table->field[MYSQL_PROC_FIELD_MODIFIED]->val_int(); + created= table->field[MYSQL_PROC_FIELD_CREATED]->val_int(); + + sql_mode= (ulong) table->field[MYSQL_PROC_FIELD_SQL_MODE]->val_int(); + + table->field[MYSQL_PROC_FIELD_COMMENT]->val_str(&str, &str); + + ptr= 0; + if ((length= str.length())) + ptr= thd->strmake(str.ptr(), length); + chistics.comment.str= ptr; + chistics.comment.length= length; + + close_proc_table(thd, &open_tables_state_backup); + table= 0; + + ret= db_load_routine(thd, type, name, sphp, + sql_mode, params, returns, body, chistics, + definer, created, modified); + + done: + if (table) + close_proc_table(thd, &open_tables_state_backup); + DBUG_RETURN(ret); +} + + +static int +db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, + ulong sql_mode, const char *params, const char *returns, + const char *body, st_sp_chistics &chistics, + const char *definer, longlong created, longlong modified) +{ + LEX *old_lex= thd->lex, newlex; + String defstr; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; + bool dbchanged; + ulong old_sql_mode= thd->variables.sql_mode; + ha_rows old_select_limit= thd->variables.select_limit; + sp_rcontext *old_spcont= thd->spcont; + + char definer_user_name_holder[USERNAME_LENGTH + 1]; + LEX_STRING_WITH_INIT definer_user_name(definer_user_name_holder, + USERNAME_LENGTH); + + char definer_host_name_holder[HOSTNAME_LENGTH + 1]; + LEX_STRING_WITH_INIT definer_host_name(definer_host_name_holder, + HOSTNAME_LENGTH); + + int ret; + + thd->variables.sql_mode= sql_mode; + thd->variables.select_limit= HA_POS_ERROR; + + thd->lex= &newlex; + newlex.current_select= NULL; + + parse_user(definer, strlen(definer), + definer_user_name.str, &definer_user_name.length, + definer_host_name.str, &definer_host_name.length); + + defstr.set_charset(system_charset_info); + + /* + We have to add DEFINER clause and provide proper routine characterstics in + routine definition statement that we build here to be able to use this + definition for SHOW CREATE PROCEDURE later. + */ + + if (!create_string(thd, &defstr, + type, + name, + params, strlen(params), + returns, strlen(returns), + body, strlen(body), + &chistics, &definer_user_name, &definer_host_name)) + { + ret= SP_INTERNAL_ERROR; + goto end; + } + + if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged))) + goto end; + + lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); + + thd->spcont= 0; + if (MYSQLparse(thd) || thd->is_fatal_error || newlex.sphead == NULL) + { + sp_head *sp= newlex.sphead; + + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) + goto end; + delete sp; + ret= SP_PARSE_ERROR; + } + else + { + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) + goto end; + *sphp= newlex.sphead; + (*sphp)->set_definer(&definer_user_name, &definer_host_name); + (*sphp)->set_info(created, modified, &chistics, sql_mode); + (*sphp)->optimize(); + } +end: + lex_end(thd->lex); + thd->spcont= old_spcont; + thd->variables.sql_mode= old_sql_mode; + thd->variables.select_limit= old_select_limit; + thd->lex= old_lex; + return ret; +} + + +static void +sp_returns_type(THD *thd, String &result, sp_head *sp) +{ + TABLE table; + Field *field; + bzero(&table, sizeof(table)); + table.in_use= thd; + table.s = &table.share_not_to_be_used; + field= sp->create_result_field(0, 0, &table); + field->sql_type(result); + + if (field->has_charset()) + { + result.append(STRING_WITH_LEN(" CHARSET ")); + result.append(field->charset()->csname); + } + + delete field; +} + +static int +db_create_routine(THD *thd, int type, sp_head *sp) +{ + int ret; + TABLE *table; + char definer[USER_HOST_BUFF_SIZE]; + DBUG_ENTER("db_create_routine"); + DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length, + sp->m_name.str)); + + if (!(table= open_proc_table_for_update(thd))) + ret= SP_OPEN_TABLE_FAILED; + else + { + restore_record(table, s->default_values); // Get default values for fields + + /* NOTE: all needed privilege checks have been already done. */ + strxmov(definer, thd->lex->definer->user.str, "@", + thd->lex->definer->host.str, NullS); + + if (table->s->fields != MYSQL_PROC_FIELD_COUNT) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + if (system_charset_info->cset->numchars(system_charset_info, + sp->m_name.str, + sp->m_name.str+sp->m_name.length) > + table->field[MYSQL_PROC_FIELD_NAME]->char_length()) + { + ret= SP_BAD_IDENTIFIER; + goto done; + } + if (sp->m_body.length > table->field[MYSQL_PROC_FIELD_BODY]->field_length) + { + ret= SP_BODY_TOO_LONG; + goto done; + } + table->field[MYSQL_PROC_FIELD_DB]-> + store(sp->m_db.str, sp->m_db.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_NAME]-> + store(sp->m_name.str, sp->m_name.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_TYPE]-> + store((longlong)type, 1); + table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME]-> + store(sp->m_name.str, sp->m_name.length, system_charset_info); + if (sp->m_chistics->daccess != SP_DEFAULT_ACCESS) + table->field[MYSQL_PROC_FIELD_ACCESS]-> + store((longlong)sp->m_chistics->daccess, 1); + table->field[MYSQL_PROC_FIELD_DETERMINISTIC]-> + store((longlong)(sp->m_chistics->detistic ? 1 : 2), 1); + if (sp->m_chistics->suid != SP_IS_DEFAULT_SUID) + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]-> + store((longlong)sp->m_chistics->suid, 1); + table->field[MYSQL_PROC_FIELD_PARAM_LIST]-> + store(sp->m_params.str, sp->m_params.length, system_charset_info); + if (sp->m_type == TYPE_ENUM_FUNCTION) + { + String retstr(64); + sp_returns_type(thd, retstr, sp); + table->field[MYSQL_PROC_FIELD_RETURNS]-> + store(retstr.ptr(), retstr.length(), system_charset_info); + } + table->field[MYSQL_PROC_FIELD_BODY]-> + store(sp->m_body.str, sp->m_body.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_DEFINER]-> + store(definer, (uint)strlen(definer), system_charset_info); + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_CREATED])->set_time(); + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time(); + table->field[MYSQL_PROC_FIELD_SQL_MODE]-> + store((longlong)thd->variables.sql_mode, 1); + if (sp->m_chistics->comment.str) + table->field[MYSQL_PROC_FIELD_COMMENT]-> + store(sp->m_chistics->comment.str, sp->m_chistics->comment.length, + system_charset_info); + + if ((sp->m_type == TYPE_ENUM_FUNCTION) && + !trust_function_creators && mysql_bin_log.is_open()) + { + if (!sp->m_chistics->detistic) + { + /* + Note that this test is not perfect; one could use + a non-deterministic read-only function in an update statement. + */ + enum enum_sp_data_access access= + (sp->m_chistics->daccess == SP_DEFAULT_ACCESS) ? + SP_DEFAULT_ACCESS_MAPPING : sp->m_chistics->daccess; + if (access == SP_CONTAINS_SQL || + access == SP_MODIFIES_SQL_DATA) + { + my_message(ER_BINLOG_UNSAFE_ROUTINE, + ER(ER_BINLOG_UNSAFE_ROUTINE), MYF(0)); + ret= SP_INTERNAL_ERROR; + goto done; + } + } + if (!(thd->security_ctx->master_access & SUPER_ACL)) + { + my_message(ER_BINLOG_CREATE_ROUTINE_NEED_SUPER, + ER(ER_BINLOG_CREATE_ROUTINE_NEED_SUPER), MYF(0)); + ret= SP_INTERNAL_ERROR; + goto done; + } + } + + ret= SP_OK; + if (table->file->write_row(table->record[0])) + ret= SP_WRITE_ROW_FAILED; + else if (mysql_bin_log.is_open()) + { + thd->clear_error(); + + String log_query; + log_query.set_charset(system_charset_info); + log_query.append(STRING_WITH_LEN("CREATE ")); + append_definer(thd, &log_query, &thd->lex->definer->user, + &thd->lex->definer->host); + log_query.append(thd->lex->stmt_definition_begin, + (char *)sp->m_body_begin - + thd->lex->stmt_definition_begin + + sp->m_body.length); + + /* Such a statement can always go directly to binlog, no trans cache */ + Query_log_event qinfo(thd, log_query.c_ptr(), log_query.length(), 0, + FALSE); + mysql_bin_log.write(&qinfo); + } + + } + +done: + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +static int +db_drop_routine(THD *thd, int type, sp_name *name) +{ + TABLE *table; + int ret; + DBUG_ENTER("db_drop_routine"); + DBUG_PRINT("enter", ("type: %d name: %.*s", + type, name->m_name.length, name->m_name.str)); + + if (!(table= open_proc_table_for_update(thd))) + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + if ((ret= db_find_routine_aux(thd, type, name, table)) == SP_OK) + { + if (table->file->delete_row(table->record[0])) + ret= SP_DELETE_ROW_FAILED; + } + + if (ret == SP_OK) + { + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } + + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +static int +db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics) +{ + TABLE *table; + int ret; + DBUG_ENTER("db_update_routine"); + DBUG_PRINT("enter", ("type: %d name: %.*s", + type, name->m_name.length, name->m_name.str)); + + if (!(table= open_proc_table_for_update(thd))) + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + if ((ret= db_find_routine_aux(thd, type, name, table)) == SP_OK) + { + store_record(table,record[1]); + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time(); + if (chistics->suid != SP_IS_DEFAULT_SUID) + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]-> + store((longlong)chistics->suid, 1); + if (chistics->daccess != SP_DEFAULT_ACCESS) + table->field[MYSQL_PROC_FIELD_ACCESS]-> + store((longlong)chistics->daccess, 1); + if (chistics->comment.str) + table->field[MYSQL_PROC_FIELD_COMMENT]->store(chistics->comment.str, + chistics->comment.length, + system_charset_info); + if ((table->file->update_row(table->record[1],table->record[0]))) + ret= SP_WRITE_ROW_FAILED; + } + + if (ret == SP_OK) + { + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } + + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +struct st_used_field +{ + const char *field_name; + uint field_length; + enum enum_field_types field_type; + Field *field; +}; + +static struct st_used_field init_fields[]= +{ + { "Db", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { "Name", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { "Type", 9, MYSQL_TYPE_STRING, 0}, + { "Definer", 77, MYSQL_TYPE_STRING, 0}, + { "Modified", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Created", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Security_type", 1, MYSQL_TYPE_STRING, 0}, + { "Comment", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { 0, 0, MYSQL_TYPE_STRING, 0} +}; + + +static int +print_field_values(THD *thd, TABLE *table, + struct st_used_field *used_fields, + int type, const char *wild) +{ + Protocol *protocol= thd->protocol; + + if (table->field[MYSQL_PROC_FIELD_TYPE]->val_int() == type) + { + String db_string; + String name_string; + struct st_used_field *used_field= used_fields; + + if (get_field(thd->mem_root, used_field->field, &db_string)) + db_string.set_ascii("", 0); + used_field+= 1; + get_field(thd->mem_root, used_field->field, &name_string); + + if (!wild || !wild[0] || !wild_compare(name_string.ptr(), wild, 0)) + { + protocol->prepare_for_resend(); + protocol->store(&db_string); + protocol->store(&name_string); + for (used_field++; + used_field->field_name; + used_field++) + { + switch (used_field->field_type) { + case MYSQL_TYPE_TIMESTAMP: + { + TIME tmp_time; + + bzero((char *)&tmp_time, sizeof(tmp_time)); + ((Field_timestamp *) used_field->field)->get_time(&tmp_time); + protocol->store(&tmp_time); + } + break; + default: + { + String tmp_string; + + get_field(thd->mem_root, used_field->field, &tmp_string); + protocol->store(&tmp_string); + } + break; + } + } + if (protocol->write()) + return SP_INTERNAL_ERROR; + } + } + + return SP_OK; +} + + +static int +db_show_routine_status(THD *thd, int type, const char *wild) +{ + TABLE *table; + TABLE_LIST tables; + int res; + DBUG_ENTER("db_show_routine_status"); + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.table_name= tables.alias= (char*)"proc"; + + if (! (table= open_ltable(thd, &tables, TL_READ))) + { + res= SP_OPEN_TABLE_FAILED; + goto done; + } + else + { + Item *item; + List<Item> field_list; + struct st_used_field *used_field; + TABLE_LIST *leaves= 0; + st_used_field used_fields[array_elements(init_fields)]; + + memcpy((char*) used_fields, (char*) init_fields, sizeof(used_fields)); + /* Init header */ + for (used_field= &used_fields[0]; + used_field->field_name; + used_field++) + { + switch (used_field->field_type) { + case MYSQL_TYPE_TIMESTAMP: + field_list.push_back(item=new Item_datetime(used_field->field_name)); + break; + default: + field_list.push_back(item=new Item_empty_string(used_field->field_name, + used_field-> + field_length)); + break; + } + } + /* Print header */ + if (thd->protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) + { + res= SP_INTERNAL_ERROR; + goto err_case; + } + + /* + Init fields + + tables is not VIEW for sure => we can pass 0 as condition + */ + thd->lex->select_lex.context.resolve_in_table_list_only(&tables); + setup_tables(thd, &thd->lex->select_lex.context, + &thd->lex->select_lex.top_join_list, + &tables, 0, &leaves, FALSE); + for (used_field= &used_fields[0]; + used_field->field_name; + used_field++) + { + Item_field *field= new Item_field(&thd->lex->select_lex.context, + "mysql", "proc", + used_field->field_name); + if (!field || + !(used_field->field= find_field_in_tables(thd, field, &tables, NULL, + 0, REPORT_ALL_ERRORS, 1, + TRUE))) + { + res= SP_INTERNAL_ERROR; + goto err_case1; + } + } + + table->file->ha_index_init(0); + if ((res= table->file->index_first(table->record[0]))) + { + res= (res == HA_ERR_END_OF_FILE) ? 0 : SP_INTERNAL_ERROR; + goto err_case1; + } + if ((res= print_field_values(thd, table, used_fields, type, wild))) + goto err_case1; + while (!table->file->index_next(table->record[0])) + { + if ((res= print_field_values(thd, table, used_fields, type, wild))) + goto err_case1; + } + res= SP_OK; + } + +err_case1: + send_eof(thd); +err_case: + table->file->ha_index_end(); + close_thread_tables(thd); +done: + DBUG_RETURN(res); +} + + +/* Drop all routines in database 'db' */ +int +sp_drop_db_routines(THD *thd, char *db) +{ + TABLE *table; + int ret; + uint key_len; + DBUG_ENTER("sp_drop_db_routines"); + DBUG_PRINT("enter", ("db: %s", db)); + + ret= SP_OPEN_TABLE_FAILED; + if (!(table= open_proc_table_for_update(thd))) + goto err; + + table->field[MYSQL_PROC_FIELD_DB]->store(db, strlen(db), system_charset_info); + key_len= table->key_info->key_part[0].store_length; + + ret= SP_OK; + table->file->ha_index_init(0); + if (! table->file->index_read(table->record[0], + (byte *)table->field[MYSQL_PROC_FIELD_DB]->ptr, + key_len, HA_READ_KEY_EXACT)) + { + int nxtres; + bool deleted= FALSE; + + do + { + if (! table->file->delete_row(table->record[0])) + deleted= TRUE; /* We deleted something */ + else + { + ret= SP_DELETE_ROW_FAILED; + nxtres= 0; + break; + } + } while (! (nxtres= table->file->index_next_same(table->record[0], + (byte *)table->field[MYSQL_PROC_FIELD_DB]->ptr, + key_len))); + if (nxtres != HA_ERR_END_OF_FILE) + ret= SP_KEY_NOT_FOUND; + if (deleted) + sp_cache_invalidate(); + } + table->file->ha_index_end(); + + close_thread_tables(thd); + +err: + DBUG_RETURN(ret); +} + + +/***************************************************************************** + PROCEDURE +******************************************************************************/ + +/* + Obtain object representing stored procedure/function by its name from + stored procedures cache and looking into mysql.proc if needed. + + SYNOPSIS + sp_find_routine() + thd - thread context + type - type of object (TYPE_ENUM_FUNCTION or TYPE_ENUM_PROCEDURE) + name - name of procedure + cp - hash to look routine in + cache_only - if true perform cache-only lookup + (Don't look in mysql.proc). + + RETURN VALUE + Non-0 pointer to sp_head object for the procedure, or + 0 - in case of error. +*/ + +sp_head * +sp_find_routine(THD *thd, int type, sp_name *name, sp_cache **cp, + bool cache_only) +{ + sp_head *sp; + ulong depth= (type == TYPE_ENUM_PROCEDURE ? + thd->variables.max_sp_recursion_depth : + 0); + DBUG_ENTER("sp_find_routine"); + DBUG_PRINT("enter", ("name: %.*s.%.*s, type: %d, cache only %d", + name->m_db.length, name->m_db.str, + name->m_name.length, name->m_name.str, + type, cache_only)); + + if ((sp= sp_cache_lookup(cp, name))) + { + ulong level; + sp_head *new_sp; + const char *returns= ""; + char definer[USER_HOST_BUFF_SIZE]; + + /* + String buffer for RETURNS data type must have system charset; + 64 -- size of "returns" column of mysql.proc. + */ + String retstr(64); + + DBUG_PRINT("info", ("found: 0x%lx", (ulong)sp)); + if (sp->m_first_free_instance) + { + DBUG_PRINT("info", ("first free: 0x%lx, level: %lu, flags %x", + (ulong)sp->m_first_free_instance, + sp->m_first_free_instance->m_recursion_level, + sp->m_first_free_instance->m_flags)); + DBUG_ASSERT(!(sp->m_first_free_instance->m_flags & sp_head::IS_INVOKED)); + if (sp->m_first_free_instance->m_recursion_level > depth) + { + sp->recursion_level_error(thd); + DBUG_RETURN(0); + } + DBUG_RETURN(sp->m_first_free_instance); + } + /* + Actually depth could be +1 than the actual value in case a SP calls + SHOW CREATE PROCEDURE. Hence, the linked list could hold up to one more + instance. + */ + + level= sp->m_last_cached_sp->m_recursion_level + 1; + if (level > depth) + { + sp->recursion_level_error(thd); + DBUG_RETURN(0); + } + + strxmov(definer, sp->m_definer_user.str, "@", + sp->m_definer_host.str, NullS); + if (type == TYPE_ENUM_FUNCTION) + { + sp_returns_type(thd, retstr, sp); + returns= retstr.ptr(); + } + if (db_load_routine(thd, type, name, &new_sp, + sp->m_sql_mode, sp->m_params.str, returns, + sp->m_body.str, *sp->m_chistics, definer, + sp->m_created, sp->m_modified) == SP_OK) + { + sp->m_last_cached_sp->m_next_cached_sp= new_sp; + new_sp->m_recursion_level= level; + new_sp->m_first_instance= sp; + sp->m_last_cached_sp= sp->m_first_free_instance= new_sp; + DBUG_PRINT("info", ("added level: 0x%lx, level: %lu, flags %x", + (ulong)new_sp, new_sp->m_recursion_level, + new_sp->m_flags)); + DBUG_RETURN(new_sp); + } + DBUG_RETURN(0); + } + if (!cache_only) + { + if (db_find_routine(thd, type, name, &sp) == SP_OK) + { + sp_cache_insert(cp, sp); + DBUG_PRINT("info", ("added new: 0x%lx, level: %lu, flags %x", + (ulong)sp, sp->m_recursion_level, + sp->m_flags)); + } + } + DBUG_RETURN(sp); +} + + +/* + This is used by sql_acl.cc:mysql_routine_grant() and is used to find + the routines in 'routines'. +*/ + +int +sp_exist_routines(THD *thd, TABLE_LIST *routines, bool any, bool no_error) +{ + TABLE_LIST *routine; + bool result= 0; + bool sp_object_found; + DBUG_ENTER("sp_exists_routine"); + for (routine= routines; routine; routine= routine->next_global) + { + sp_name *name; + LEX_STRING lex_db; + LEX_STRING lex_name; + lex_db.length= strlen(routine->db); + lex_name.length= strlen(routine->table_name); + lex_db.str= thd->strmake(routine->db, lex_db.length); + lex_name.str= thd->strmake(routine->table_name, lex_name.length); + name= new sp_name(lex_db, lex_name); + name->init_qname(thd); + sp_object_found= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, name, + &thd->sp_proc_cache, FALSE) != NULL || + sp_find_routine(thd, TYPE_ENUM_FUNCTION, name, + &thd->sp_func_cache, FALSE) != NULL; + mysql_reset_errors(thd, TRUE); + if (sp_object_found) + { + if (any) + DBUG_RETURN(1); + result= 1; + } + else if (!any) + { + if (!no_error) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION or PROCEDURE", + routine->table_name); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); + } + } + DBUG_RETURN(result); +} + + +/* + Check if a routine exists in the mysql.proc table, without actually + parsing the definition. (Used for dropping) + + SYNOPSIS + sp_routine_exists_in_table() + thd - thread context + name - name of procedure + + RETURN VALUE + 0 - Success + non-0 - Error; SP_OPEN_TABLE_FAILED or SP_KEY_NOT_FOUND +*/ + +int +sp_routine_exists_in_table(THD *thd, int type, sp_name *name) +{ + TABLE *table; + int ret; + Open_tables_state open_tables_state_backup; + + if (!(table= open_proc_table_for_read(thd, &open_tables_state_backup))) + ret= SP_OPEN_TABLE_FAILED; + else + { + if ((ret= db_find_routine_aux(thd, type, name, table)) != SP_OK) + ret= SP_KEY_NOT_FOUND; + close_proc_table(thd, &open_tables_state_backup); + } + return ret; +} + + +int +sp_create_procedure(THD *thd, sp_head *sp) +{ + int ret; + DBUG_ENTER("sp_create_procedure"); + DBUG_PRINT("enter", ("name: %.*s", sp->m_name.length, sp->m_name.str)); + + ret= db_create_routine(thd, TYPE_ENUM_PROCEDURE, sp); + DBUG_RETURN(ret); +} + + +int +sp_drop_procedure(THD *thd, sp_name *name) +{ + int ret; + DBUG_ENTER("sp_drop_procedure"); + DBUG_PRINT("enter", ("name: %.*s", name->m_name.length, name->m_name.str)); + + ret= db_drop_routine(thd, TYPE_ENUM_PROCEDURE, name); + if (!ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_update_procedure(THD *thd, sp_name *name, st_sp_chistics *chistics) +{ + int ret; + DBUG_ENTER("sp_update_procedure"); + DBUG_PRINT("enter", ("name: %.*s", name->m_name.length, name->m_name.str)); + + ret= db_update_routine(thd, TYPE_ENUM_PROCEDURE, name, chistics); + if (!ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_show_create_procedure(THD *thd, sp_name *name) +{ + int ret= SP_KEY_NOT_FOUND; + sp_head *sp; + DBUG_ENTER("sp_show_create_procedure"); + DBUG_PRINT("enter", ("name: %.*s", name->m_name.length, name->m_name.str)); + + /* + Increase the recursion limit for this statement. SHOW CREATE PROCEDURE + does not do actual recursion. + */ + thd->variables.max_sp_recursion_depth++; + if ((sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, name, + &thd->sp_proc_cache, FALSE))) + ret= sp->show_create_procedure(thd); + + thd->variables.max_sp_recursion_depth--; + DBUG_RETURN(ret); +} + + +int +sp_show_status_procedure(THD *thd, const char *wild) +{ + int ret; + DBUG_ENTER("sp_show_status_procedure"); + + ret= db_show_routine_status(thd, TYPE_ENUM_PROCEDURE, wild); + DBUG_RETURN(ret); +} + + +/***************************************************************************** + FUNCTION +******************************************************************************/ + +int +sp_create_function(THD *thd, sp_head *sp) +{ + int ret; + DBUG_ENTER("sp_create_function"); + DBUG_PRINT("enter", ("name: %.*s", sp->m_name.length, sp->m_name.str)); + + ret= db_create_routine(thd, TYPE_ENUM_FUNCTION, sp); + DBUG_RETURN(ret); +} + + +int +sp_drop_function(THD *thd, sp_name *name) +{ + int ret; + DBUG_ENTER("sp_drop_function"); + DBUG_PRINT("enter", ("name: %.*s", name->m_name.length, name->m_name.str)); + + ret= db_drop_routine(thd, TYPE_ENUM_FUNCTION, name); + if (!ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_update_function(THD *thd, sp_name *name, st_sp_chistics *chistics) +{ + int ret; + DBUG_ENTER("sp_update_procedure"); + DBUG_PRINT("enter", ("name: %.*s", name->m_name.length, name->m_name.str)); + + ret= db_update_routine(thd, TYPE_ENUM_FUNCTION, name, chistics); + if (!ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_show_create_function(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_show_create_function"); + DBUG_PRINT("enter", ("name: %.*s", name->m_name.length, name->m_name.str)); + + if ((sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, name, + &thd->sp_func_cache, FALSE))) + { + int ret= sp->show_create_function(thd); + + DBUG_RETURN(ret); + } + DBUG_RETURN(SP_KEY_NOT_FOUND); +} + + +int +sp_show_status_function(THD *thd, const char *wild) +{ + int ret; + DBUG_ENTER("sp_show_status_function"); + ret= db_show_routine_status(thd, TYPE_ENUM_FUNCTION, wild); + DBUG_RETURN(ret); +} + + +/* + Structure that represents element in the set of stored routines + used by statement or routine. +*/ +struct Sroutine_hash_entry; + +struct Sroutine_hash_entry +{ + /* Set key consisting of one-byte routine type and quoted routine name. */ + LEX_STRING key; + /* + Next element in list linking all routines in set. See also comments + for LEX::sroutine/sroutine_list and sp_head::m_sroutines. + */ + Sroutine_hash_entry *next; + /* + Uppermost view which directly or indirectly uses this routine. + 0 if routine is not used in view. Note that it also can be 0 if + statement uses routine both via view and directly. + */ + TABLE_LIST *belong_to_view; +}; + + +extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first) +{ + Sroutine_hash_entry *rn= (Sroutine_hash_entry *)ptr; + *plen= rn->key.length; + return (byte *)rn->key.str; +} + + +/* + Check if + - current statement (the one in thd->lex) needs table prelocking + - first routine in thd->lex->sroutines_list needs to execute its body in + prelocked mode. + + SYNOPSIS + sp_get_prelocking_info() + thd Current thread, thd->lex is the statement to be + checked. + need_prelocking OUT TRUE - prelocked mode should be activated + before executing the statement + FALSE - Don't activate prelocking + first_no_prelocking OUT TRUE - Tables used by first routine in + thd->lex->sroutines_list should be + prelocked. + FALSE - Otherwise. + NOTES + This function assumes that for any "CALL proc(...)" statement routines_list + will have 'proc' as first element (it may have several, consider e.g. + "proc(sp_func(...)))". This property is currently guaranted by the parser. +*/ + +void sp_get_prelocking_info(THD *thd, bool *need_prelocking, + bool *first_no_prelocking) +{ + Sroutine_hash_entry *routine; + routine= (Sroutine_hash_entry*)thd->lex->sroutines_list.first; + + DBUG_ASSERT(routine); + bool first_is_procedure= (routine->key.str[0] == TYPE_ENUM_PROCEDURE); + + *first_no_prelocking= first_is_procedure; + *need_prelocking= !first_is_procedure || test(routine->next); +} + + +/* + Auxilary function that adds new element to the set of stored routines + used by statement. + + SYNOPSIS + add_used_routine() + lex LEX representing statement + arena Arena in which memory for new element will be allocated + key Key for the hash representing set + belong_to_view Uppermost view which uses this routine + (0 if routine is not used by view) + + NOTES + Will also add element to end of 'LEX::sroutines_list' list. + + In case when statement uses stored routines but does not need + prelocking (i.e. it does not use any tables) we will access the + elements of LEX::sroutines set on prepared statement re-execution. + Because of this we have to allocate memory for both hash element + and copy of its key in persistent arena. + + TODO + When we will got rid of these accesses on re-executions we will be + able to allocate memory for hash elements in non-persitent arena + and directly use key values from sp_head::m_sroutines sets instead + of making their copies. + + RETURN VALUE + TRUE - new element was added. + FALSE - element was not added (because it is already present in the set). +*/ + +static bool add_used_routine(LEX *lex, Query_arena *arena, + const LEX_STRING *key, + TABLE_LIST *belong_to_view) +{ + hash_init_opt(&lex->sroutines, system_charset_info, + Query_tables_list::START_SROUTINES_HASH_SIZE, + 0, 0, sp_sroutine_key, 0, 0); + + if (!hash_search(&lex->sroutines, (byte *)key->str, key->length)) + { + Sroutine_hash_entry *rn= + (Sroutine_hash_entry *)arena->alloc(sizeof(Sroutine_hash_entry) + + key->length); + if (!rn) // OOM. Error will be reported using fatal_error(). + return FALSE; + rn->key.length= key->length; + rn->key.str= (char *)rn + sizeof(Sroutine_hash_entry); + memcpy(rn->key.str, key->str, key->length); + my_hash_insert(&lex->sroutines, (byte *)rn); + lex->sroutines_list.link_in_list((byte *)rn, (byte **)&rn->next); + rn->belong_to_view= belong_to_view; + return TRUE; + } + return FALSE; +} + + +/* + Add routine which is explicitly used by statement to the set of stored + routines used by this statement. + + SYNOPSIS + sp_add_used_routine() + lex - LEX representing statement + arena - arena in which memory for new element of the set + will be allocated + rt - routine name + rt_type - routine type (one of TYPE_ENUM_PROCEDURE/...) + + NOTES + Will also add element to end of 'LEX::sroutines_list' list (and will + take into account that this is explicitly used routine). + + To be friendly towards prepared statements one should pass + persistent arena as second argument. +*/ + +void sp_add_used_routine(LEX *lex, Query_arena *arena, + sp_name *rt, char rt_type) +{ + rt->set_routine_type(rt_type); + (void)add_used_routine(lex, arena, &rt->m_sroutines_key, 0); + lex->sroutines_list_own_last= lex->sroutines_list.next; + lex->sroutines_list_own_elements= lex->sroutines_list.elements; +} + + +/* + Remove routines which are only indirectly used by statement from + the set of routines used by this statement. + + SYNOPSIS + sp_remove_not_own_routines() + lex LEX representing statement +*/ + +void sp_remove_not_own_routines(LEX *lex) +{ + Sroutine_hash_entry *not_own_rt, *next_rt; + for (not_own_rt= *(Sroutine_hash_entry **)lex->sroutines_list_own_last; + not_own_rt; not_own_rt= next_rt) + { + /* + It is safe to obtain not_own_rt->next after calling hash_delete() now + but we want to be more future-proof. + */ + next_rt= not_own_rt->next; + hash_delete(&lex->sroutines, (byte *)not_own_rt); + } + + *(Sroutine_hash_entry **)lex->sroutines_list_own_last= NULL; + lex->sroutines_list.next= lex->sroutines_list_own_last; + lex->sroutines_list.elements= lex->sroutines_list_own_elements; +} + + +/* + Merge contents of two hashes representing sets of routines used + by statements or by other routines. + + SYNOPSIS + sp_update_sp_used_routines() + dst - hash to which elements should be added + src - hash from which elements merged + + NOTE + This procedure won't create new Sroutine_hash_entry objects, + instead it will simply add elements from source to destination + hash. Thus time of life of elements in destination hash becomes + dependant on time of life of elements from source hash. It also + won't touch lists linking elements in source and destination + hashes. +*/ + +void sp_update_sp_used_routines(HASH *dst, HASH *src) +{ + for (uint i=0 ; i < src->records ; i++) + { + Sroutine_hash_entry *rt= (Sroutine_hash_entry *)hash_element(src, i); + if (!hash_search(dst, (byte *)rt->key.str, rt->key.length)) + my_hash_insert(dst, (byte *)rt); + } +} + + +/* + Add contents of hash representing set of routines to the set of + routines used by statement. + + SYNOPSIS + sp_update_stmt_used_routines() + thd Thread context + lex LEX representing statement + src Hash representing set from which routines will be added + belong_to_view Uppermost view which uses these routines, 0 if none + + NOTE + It will also add elements to end of 'LEX::sroutines_list' list. +*/ + +static void +sp_update_stmt_used_routines(THD *thd, LEX *lex, HASH *src, + TABLE_LIST *belong_to_view) +{ + for (uint i=0 ; i < src->records ; i++) + { + Sroutine_hash_entry *rt= (Sroutine_hash_entry *)hash_element(src, i); + (void)add_used_routine(lex, thd->stmt_arena, &rt->key, belong_to_view); + } +} + + +/* + Add contents of list representing set of routines to the set of + routines used by statement. + + SYNOPSIS + sp_update_stmt_used_routines() + thd Thread context + lex LEX representing statement + src List representing set from which routines will be added + belong_to_view Uppermost view which uses these routines, 0 if none + + NOTE + It will also add elements to end of 'LEX::sroutines_list' list. +*/ + +static void sp_update_stmt_used_routines(THD *thd, LEX *lex, SQL_LIST *src, + TABLE_LIST *belong_to_view) +{ + for (Sroutine_hash_entry *rt= (Sroutine_hash_entry *)src->first; + rt; rt= rt->next) + (void)add_used_routine(lex, thd->stmt_arena, &rt->key, belong_to_view); +} + + +/* + Cache sub-set of routines used by statement, add tables used by these + routines to statement table list. Do the same for all routines used + by these routines. + + SYNOPSIS + sp_cache_routines_and_add_tables_aux() + thd - thread context + lex - LEX representing statement + start - first routine from the list of routines to be cached + (this list defines mentioned sub-set). + first_no_prelock - If true, don't add tables or cache routines used by + the body of the first routine (i.e. *start) + will be executed in non-prelocked mode. + tabs_changed - Set to TRUE some tables were added, FALSE otherwise + NOTE + If some function is missing this won't be reported here. + Instead this fact will be discovered during query execution. + + RETURN VALUE + 0 - success + non-0 - failure +*/ + +static int +sp_cache_routines_and_add_tables_aux(THD *thd, LEX *lex, + Sroutine_hash_entry *start, + bool first_no_prelock, bool *tabs_changed) +{ + int ret= 0; + bool tabschnd= 0; /* Set if tables changed */ + bool first= TRUE; + DBUG_ENTER("sp_cache_routines_and_add_tables_aux"); + + for (Sroutine_hash_entry *rt= start; rt; rt= rt->next) + { + sp_name name(rt->key.str, rt->key.length); + int type= rt->key.str[0]; + sp_head *sp; + + if (!(sp= sp_cache_lookup((type == TYPE_ENUM_FUNCTION ? + &thd->sp_func_cache : &thd->sp_proc_cache), + &name))) + { + name.m_name.str= strchr(name.m_qname.str, '.'); + name.m_db.length= name.m_name.str - name.m_qname.str; + name.m_db.str= strmake_root(thd->mem_root, name.m_qname.str, + name.m_db.length); + name.m_name.str+= 1; + name.m_name.length= name.m_qname.length - name.m_db.length - 1; + + switch ((ret= db_find_routine(thd, type, &name, &sp))) + { + case SP_OK: + { + if (type == TYPE_ENUM_FUNCTION) + sp_cache_insert(&thd->sp_func_cache, sp); + else + sp_cache_insert(&thd->sp_proc_cache, sp); + } + break; + case SP_KEY_NOT_FOUND: + ret= SP_OK; + break; + default: + /* + Any error when loading an existing routine is either some problem + with the mysql.proc table, or a parse error because the contents + has been tampered with (in which case we clear that error). + */ + if (ret == SP_PARSE_ERROR) + thd->clear_error(); + /* + If we cleared the parse error, or when db_find_routine() flagged + an error with it's return value without calling my_error(), we + set the generic "mysql.proc table corrupt" error here. + */ + if (!thd->net.report_error) + { + /* + SP allows full NAME_LEN chars thus he have to allocate enough + size in bytes. Otherwise there is stack overrun could happen + if multibyte sequence is `name`. `db` is still safe because the + rest of the server checks agains NAME_LEN bytes and not chars. + Hence, the overrun happens only if the name is in length > 32 and + uses multibyte (cyrillic, greek, etc.) + + !! Change 3 with SYSTEM_CHARSET_MBMAXLEN when it's defined. + */ + char n[NAME_LEN*3*2+2]; + + /* m_qname.str is not always \0 terminated */ + memcpy(n, name.m_qname.str, name.m_qname.length); + n[name.m_qname.length]= '\0'; + my_error(ER_SP_PROC_TABLE_CORRUPT, MYF(0), n, ret); + } + break; + } + } + if (sp) + { + if (!(first && first_no_prelock)) + { + sp_update_stmt_used_routines(thd, lex, &sp->m_sroutines, + rt->belong_to_view); + tabschnd|= + sp->add_used_tables_to_table_list(thd, &lex->query_tables_last, + rt->belong_to_view); + } + } + first= FALSE; + } + if (tabs_changed) /* it can be NULL */ + *tabs_changed= tabschnd; + DBUG_RETURN(ret); +} + + +/* + Cache all routines from the set of used by statement, add tables used + by those routines to statement table list. Do the same for all routines + used by those routines. + + SYNOPSIS + sp_cache_routines_and_add_tables() + thd - thread context + lex - LEX representing statement + first_no_prelock - If true, don't add tables or cache routines used by + the body of the first routine (i.e. *start) + tabs_changed - Set to TRUE some tables were added, FALSE otherwise + + RETURN VALUE + 0 - success + non-0 - failure +*/ + +int +sp_cache_routines_and_add_tables(THD *thd, LEX *lex, bool first_no_prelock, + bool *tabs_changed) +{ + return sp_cache_routines_and_add_tables_aux(thd, lex, + (Sroutine_hash_entry *)lex->sroutines_list.first, + first_no_prelock, tabs_changed); +} + + +/* + Add all routines used by view to the set of routines used by statement. + Add tables used by those routines to statement table list. Do the same + for all routines used by these routines. + + SYNOPSIS + sp_cache_routines_and_add_tables_for_view() + thd Thread context + lex LEX representing statement + view Table list element representing view + + RETURN VALUE + 0 - success + non-0 - failure +*/ + +int +sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex, TABLE_LIST *view) +{ + Sroutine_hash_entry **last_cached_routine_ptr= + (Sroutine_hash_entry **)lex->sroutines_list.next; + sp_update_stmt_used_routines(thd, lex, &view->view->sroutines_list, + view->top_table()); + return sp_cache_routines_and_add_tables_aux(thd, lex, + *last_cached_routine_ptr, FALSE, + NULL); +} + + +/* + Add triggers for table to the set of routines used by statement. + Add tables used by them to statement table list. Do the same for + all implicitly used routines. + + SYNOPSIS + sp_cache_routines_and_add_tables_for_triggers() + thd thread context + lex LEX respresenting statement + table Table list element for table with trigger + + RETURN VALUE + 0 - success + non-0 - failure +*/ + +int +sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, + TABLE_LIST *table) +{ + int ret= 0; + Table_triggers_list *triggers= table->table->triggers; + if (add_used_routine(lex, thd->stmt_arena, &triggers->sroutines_key, + table->belong_to_view)) + { + Sroutine_hash_entry **last_cached_routine_ptr= + (Sroutine_hash_entry **)lex->sroutines_list.next; + for (int i= 0; i < (int)TRG_EVENT_MAX; i++) + { + for (int j= 0; j < (int)TRG_ACTION_MAX; j++) + { + if (triggers->bodies[i][j]) + { + (void)triggers->bodies[i][j]-> + add_used_tables_to_table_list(thd, &lex->query_tables_last, + table->belong_to_view); + sp_update_stmt_used_routines(thd, lex, + &triggers->bodies[i][j]->m_sroutines, + table->belong_to_view); + } + } + } + ret= sp_cache_routines_and_add_tables_aux(thd, lex, + *last_cached_routine_ptr, + FALSE, NULL); + } + return ret; +} + + +/* + * Generates the CREATE... string from the table information. + * Returns TRUE on success, FALSE on (alloc) failure. + */ +static bool +create_string(THD *thd, String *buf, + int type, + sp_name *name, + const char *params, ulong paramslen, + const char *returns, ulong returnslen, + const char *body, ulong bodylen, + st_sp_chistics *chistics, + const LEX_STRING *definer_user, + const LEX_STRING *definer_host) +{ + /* Make some room to begin with */ + if (buf->alloc(100 + name->m_qname.length + paramslen + returnslen + bodylen + + chistics->comment.length + 10 /* length of " DEFINER= "*/ + + USER_HOST_BUFF_SIZE)) + return FALSE; + + buf->append(STRING_WITH_LEN("CREATE ")); + append_definer(thd, buf, definer_user, definer_host); + if (type == TYPE_ENUM_FUNCTION) + buf->append(STRING_WITH_LEN("FUNCTION ")); + else + buf->append(STRING_WITH_LEN("PROCEDURE ")); + append_identifier(thd, buf, name->m_name.str, name->m_name.length); + buf->append('('); + buf->append(params, paramslen); + buf->append(')'); + if (type == TYPE_ENUM_FUNCTION) + { + buf->append(STRING_WITH_LEN(" RETURNS ")); + buf->append(returns, returnslen); + } + buf->append('\n'); + switch (chistics->daccess) { + case SP_NO_SQL: + buf->append(STRING_WITH_LEN(" NO SQL\n")); + break; + case SP_READS_SQL_DATA: + buf->append(STRING_WITH_LEN(" READS SQL DATA\n")); + break; + case SP_MODIFIES_SQL_DATA: + buf->append(STRING_WITH_LEN(" MODIFIES SQL DATA\n")); + break; + case SP_DEFAULT_ACCESS: + case SP_CONTAINS_SQL: + /* Do nothing */ + break; + } + if (chistics->detistic) + buf->append(STRING_WITH_LEN(" DETERMINISTIC\n")); + if (chistics->suid == SP_IS_NOT_SUID) + buf->append(STRING_WITH_LEN(" SQL SECURITY INVOKER\n")); + if (chistics->comment.length) + { + buf->append(STRING_WITH_LEN(" COMMENT ")); + append_unescaped(buf, chistics->comment.str, chistics->comment.length); + buf->append('\n'); + } + buf->append(body, bodylen); + return TRUE; +} + + + +/* + Change the current database if needed. + + SYNOPSIS + sp_use_new_db() + thd thread handle + new_db new database name (a string and its length) + old_db [IN] str points to a buffer where to store the old + database, length contains the size of the buffer + [OUT] if old db was not NULL, its name is copied + to the buffer pointed at by str and length is updated + accordingly. Otherwise str[0] is set to '\0' and length + is set to 0. The out parameter should be used only if + the database name has been changed (see dbchangedp). + dbchangedp [OUT] is set to TRUE if the current database is changed, + FALSE otherwise. A database is not changed if the old + name is the same as the new one, both names are empty, + or an error has occurred. + + RETURN VALUE + 0 success + 1 access denied or out of memory (the error message is + set in THD) +*/ + +int +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, + bool no_access_check, bool *dbchangedp) +{ + int ret; + DBUG_ENTER("sp_use_new_db"); + DBUG_PRINT("enter", ("newdb: %s", new_db.str)); + + /* + Set new_db to an empty string if it's NULL, because mysql_change_db + requires a non-NULL argument. + new_db.str can be NULL only if we're restoring the old database after + execution of a stored procedure and there were no current database + selected. The stored procedure itself must always have its database + initialized. + */ + if (new_db.str == NULL) + new_db.str= empty_c_string; + + if (thd->db) + { + old_db->length= (strmake(old_db->str, thd->db, old_db->length) - + old_db->str); + } + else + { + old_db->str[0]= '\0'; + old_db->length= 0; + } + + /* Don't change the database if the new name is the same as the old one. */ + if (my_strcasecmp(system_charset_info, old_db->str, new_db.str) == 0) + { + *dbchangedp= FALSE; + DBUG_RETURN(0); + } + + ret= mysql_change_db(thd, new_db.str, no_access_check); + + *dbchangedp= ret == 0; + DBUG_RETURN(ret); +} + diff --git a/sql/sp.h b/sql/sp.h new file mode 100644 index 00000000000..38b7d43c08f --- /dev/null +++ b/sql/sp.h @@ -0,0 +1,117 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_H_ +#define _SP_H_ + +// Return codes from sp_create_*, sp_drop_*, and sp_show_*: +#define SP_OK 0 +#define SP_KEY_NOT_FOUND -1 +#define SP_OPEN_TABLE_FAILED -2 +#define SP_WRITE_ROW_FAILED -3 +#define SP_DELETE_ROW_FAILED -4 +#define SP_GET_FIELD_FAILED -5 +#define SP_PARSE_ERROR -6 +#define SP_INTERNAL_ERROR -7 +#define SP_NO_DB_ERROR -8 +#define SP_BAD_IDENTIFIER -9 +#define SP_BODY_TOO_LONG -10 + +/* Drop all routines in database 'db' */ +int +sp_drop_db_routines(THD *thd, char *db); + +sp_head * +sp_find_routine(THD *thd, int type, sp_name *name, + sp_cache **cp, bool cache_only); + +int +sp_exist_routines(THD *thd, TABLE_LIST *procs, bool any, bool no_error); + +int +sp_routine_exists_in_table(THD *thd, int type, sp_name *name); + +int +sp_create_procedure(THD *thd, sp_head *sp); + +int +sp_drop_procedure(THD *thd, sp_name *name); + + +int +sp_update_procedure(THD *thd, sp_name *name, st_sp_chistics *chistics); + +int +sp_show_create_procedure(THD *thd, sp_name *name); + +int +sp_show_status_procedure(THD *thd, const char *wild); + +int +sp_create_function(THD *thd, sp_head *sp); + +int +sp_drop_function(THD *thd, sp_name *name); + +int +sp_update_function(THD *thd, sp_name *name, st_sp_chistics *chistics); + +int +sp_show_create_function(THD *thd, sp_name *name); + +int +sp_show_status_function(THD *thd, const char *wild); + + +/* + Procedures for pre-caching of stored routines and building table list + for prelocking. +*/ +void sp_get_prelocking_info(THD *thd, bool *need_prelocking, + bool *first_no_prelocking); +void sp_add_used_routine(LEX *lex, Query_arena *arena, + sp_name *rt, char rt_type); +void sp_remove_not_own_routines(LEX *lex); +void sp_update_sp_used_routines(HASH *dst, HASH *src); +int sp_cache_routines_and_add_tables(THD *thd, LEX *lex, + bool first_no_prelock, + bool *tabs_changed); +int sp_cache_routines_and_add_tables_for_view(THD *thd, LEX *lex, + TABLE_LIST *view); +int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, + TABLE_LIST *table); + +extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first); + +/* + Routines which allow open/lock and close mysql.proc table even when + we already have some tables open and locked. +*/ +TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup); +void close_proc_table(THD *thd, Open_tables_state *backup); + + +/* + Do a "use new_db". The current db is stored at old_db. If new_db is the + same as the current one, nothing is changed. dbchangedp is set to true if + the db was actually changed. +*/ + +int +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, + bool no_access_check, bool *dbchangedp); + +#endif /* _SP_H_ */ diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc new file mode 100644 index 00000000000..de4e1efd496 --- /dev/null +++ b/sql/sp_cache.cc @@ -0,0 +1,259 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation +#endif +#include "sp_cache.h" +#include "sp_head.h" + +static pthread_mutex_t Cversion_lock; +static ulong volatile Cversion= 0; + + +/* + Cache of stored routines. +*/ + +class sp_cache +{ +public: + ulong version; + + sp_cache(); + ~sp_cache(); + + inline void insert(sp_head *sp) + { + /* TODO: why don't we check return value? */ + my_hash_insert(&m_hashtable, (const byte *)sp); + } + + inline sp_head *lookup(char *name, uint namelen) + { + return (sp_head *)hash_search(&m_hashtable, (const byte *)name, namelen); + } + +#ifdef NOT_USED + inline bool remove(char *name, uint namelen) + { + sp_head *sp= lookup(name, namelen); + if (sp) + { + hash_delete(&m_hashtable, (byte *)sp); + return TRUE; + } + return FALSE; + } +#endif + + inline void remove_all() + { + cleanup(); + init(); + } + +private: + void init(); + void cleanup(); + + /* All routines in this cache */ + HASH m_hashtable; +}; // class sp_cache + + +/* Initialize the SP caching once at startup */ + +void sp_cache_init() +{ + pthread_mutex_init(&Cversion_lock, MY_MUTEX_INIT_FAST); +} + + +/* + Clear the cache *cp and set *cp to NULL. + + SYNOPSIS + sp_cache_clear() + cp Pointer to cache to clear + + NOTE + This function doesn't invalidate other caches. +*/ + +void sp_cache_clear(sp_cache **cp) +{ + sp_cache *c= *cp; + + if (c) + { + delete c; + *cp= NULL; + } +} + + +/* + Insert a routine into the cache. + + SYNOPSIS + sp_cache_insert() + cp The cache to put routine into + sp Routine to insert. + + TODO: Perhaps it will be more straightforward if in case we returned an + error from this function when we couldn't allocate sp_cache. (right + now failure to put routine into cache will cause a 'SP not found' + error to be reported at some later time) +*/ + +void sp_cache_insert(sp_cache **cp, sp_head *sp) +{ + sp_cache *c; + + if (!(c= *cp)) + { + if (!(c= new sp_cache())) + return; // End of memory error + c->version= Cversion; // No need to lock when reading long variable + } + DBUG_PRINT("info",("sp_cache: inserting: %.*s", sp->m_qname.length, + sp->m_qname.str)); + c->insert(sp); + *cp= c; // Update *cp if it was NULL +} + + +/* + Look up a routine in the cache. + SYNOPSIS + sp_cache_lookup() + cp Cache to look into + name Name of rutine to find + + NOTE + An obsolete (but not more obsolete then since last + sp_cache_flush_obsolete call) routine may be returned. + + RETURN + The routine or + NULL if the routine not found. +*/ + +sp_head *sp_cache_lookup(sp_cache **cp, sp_name *name) +{ + sp_cache *c= *cp; + if (! c) + return NULL; + return c->lookup(name->m_qname.str, name->m_qname.length); +} + + +/* + Invalidate all routines in all caches. + + SYNOPSIS + sp_cache_invalidate() + + NOTE + This is called when a VIEW definition is modifed. We can't destroy sp_head + objects here as one may modify VIEW definitions from prelocking-free SPs. +*/ + +void sp_cache_invalidate() +{ + DBUG_PRINT("info",("sp_cache: invalidating")); + thread_safe_increment(Cversion, &Cversion_lock); +} + + +/* + Remove out-of-date SPs from the cache. + + SYNOPSIS + sp_cache_flush_obsolete() + cp Cache to flush + + NOTE + This invalidates pointers to sp_head objects this thread uses. + In practice that means 'dont call this function when inside SP'. +*/ + +void sp_cache_flush_obsolete(sp_cache **cp) +{ + sp_cache *c= *cp; + if (c) + { + ulong v; + v= Cversion; // No need to lock when reading long variable + if (c->version < v) + { + DBUG_PRINT("info",("sp_cache: deleting all functions")); + /* We need to delete all elements. */ + c->remove_all(); + c->version= v; + } + } +} + + +/************************************************************************* + Internal functions + *************************************************************************/ + +static byte *hash_get_key_for_sp_head(const byte *ptr, uint *plen, + my_bool first) +{ + sp_head *sp= (sp_head *)ptr; + *plen= sp->m_qname.length; + return (byte*) sp->m_qname.str; +} + + +static void +hash_free_sp_head(void *p) +{ + sp_head *sp= (sp_head *)p; + delete sp; +} + + +sp_cache::sp_cache() +{ + init(); +} + + +sp_cache::~sp_cache() +{ + hash_free(&m_hashtable); +} + + +void +sp_cache::init() +{ + hash_init(&m_hashtable, system_charset_info, 0, 0, 0, + hash_get_key_for_sp_head, hash_free_sp_head, 0); + version= 0; +} + + +void +sp_cache::cleanup() +{ + hash_free(&m_hashtable); +} diff --git a/sql/sp_cache.h b/sql/sp_cache.h new file mode 100644 index 00000000000..9d34c9a2fb5 --- /dev/null +++ b/sql/sp_cache.h @@ -0,0 +1,62 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_CACHE_H_ +#define _SP_CACHE_H_ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +/* + Stored procedures/functions cache. This is used as follows: + * Each thread has its own cache. + * Each sp_head object is put into its thread cache before it is used, and + then remains in the cache until deleted. +*/ + +class sp_head; +class sp_cache; + +/* + Cache usage scenarios: + 1. Application-wide init: + sp_cache_init(); + + 2. SP execution in thread: + 2.1 While holding sp_head* pointers: + + // look up a routine in the cache (no checks if it is up to date or not) + sp_cache_lookup(); + + sp_cache_insert(); + sp_cache_invalidate(); + + 2.2 When not holding any sp_head* pointers: + sp_cache_flush_obsolete(); + + 3. Before thread exit: + sp_cache_clear(); +*/ + +void sp_cache_init(); +void sp_cache_clear(sp_cache **cp); +void sp_cache_insert(sp_cache **cp, sp_head *sp); +sp_head *sp_cache_lookup(sp_cache **cp, sp_name *name); +void sp_cache_invalidate(); +void sp_cache_flush_obsolete(sp_cache **cp); + +#endif /* _SP_CACHE_H_ */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc new file mode 100644 index 00000000000..1f44fa6639c --- /dev/null +++ b/sql/sp_head.cc @@ -0,0 +1,3648 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation +#endif +#include "sp_head.h" +#include "sp.h" +#include "sp_pcontext.h" +#include "sp_rcontext.h" +#include "sp_cache.h" + +/* + Sufficient max length of printed destinations and frame offsets (all uints). +*/ +#define SP_INSTR_UINT_MAXLEN 8 +#define SP_STMT_PRINT_MAXLEN 40 + + +#include <my_user.h> + +Item_result +sp_map_result_type(enum enum_field_types type) +{ + switch (type) { + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_INT24: + return INT_RESULT; + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: + return DECIMAL_RESULT; + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: + return REAL_RESULT; + default: + return STRING_RESULT; + } +} + + +Item::Type +sp_map_item_type(enum enum_field_types type) +{ + switch (type) { + case MYSQL_TYPE_BIT: + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_INT24: + return Item::INT_ITEM; + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: + return Item::DECIMAL_ITEM; + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: + return Item::REAL_ITEM; + default: + return Item::STRING_ITEM; + } +} + + +/* + Return a string representation of the Item value. + + NOTE: If the item has a string result type, the string is escaped + according to its character set. + + SYNOPSIS + item a pointer to the Item + str string buffer for representation of the value + + RETURN + NULL on error + a pointer to valid a valid string on success +*/ + +static String * +sp_get_item_value(THD *thd, Item *item, String *str) +{ + switch (item->result_type()) { + case REAL_RESULT: + case INT_RESULT: + case DECIMAL_RESULT: + return item->val_str(str); + + case STRING_RESULT: + { + String *result= item->val_str(str); + + if (!result) + return NULL; + + { + char buf_holder[STRING_BUFFER_USUAL_SIZE]; + String buf(buf_holder, sizeof(buf_holder), result->charset()); + CHARSET_INFO *cs= thd->variables.character_set_client; + + /* We must reset length of the buffer, because of String specificity. */ + buf.length(0); + + buf.append('_'); + buf.append(result->charset()->csname); + if (cs->escape_with_backslash_is_dangerous) + buf.append(' '); + append_query_string(cs, result, &buf); + str->copy(buf); + + return str; + } + } + + case ROW_RESULT: + default: + return NULL; + } +} + + +/* + SYNOPSIS + sp_get_flags_for_command() + + DESCRIPTION + Returns a combination of: + * sp_head::MULTI_RESULTS: added if the 'cmd' is a command that might + result in multiple result sets being sent back. + * sp_head::CONTAINS_DYNAMIC_SQL: added if 'cmd' is one of PREPARE, + EXECUTE, DEALLOCATE. +*/ + +uint +sp_get_flags_for_command(LEX *lex) +{ + uint flags; + + switch (lex->sql_command) { + case SQLCOM_SELECT: + if (lex->result) + { + flags= 0; /* This is a SELECT with INTO clause */ + break; + } + /* fallthrough */ + case SQLCOM_ANALYZE: + case SQLCOM_OPTIMIZE: + case SQLCOM_PRELOAD_KEYS: + case SQLCOM_ASSIGN_TO_KEYCACHE: + case SQLCOM_CHECKSUM: + case SQLCOM_CHECK: + case SQLCOM_HA_READ: + case SQLCOM_SHOW_BINLOGS: + case SQLCOM_SHOW_BINLOG_EVENTS: + case SQLCOM_SHOW_CHARSETS: + case SQLCOM_SHOW_COLLATIONS: + case SQLCOM_SHOW_COLUMN_TYPES: + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_CREATE_DB: + case SQLCOM_SHOW_CREATE_FUNC: + case SQLCOM_SHOW_CREATE_PROC: + case SQLCOM_SHOW_DATABASES: + case SQLCOM_SHOW_ERRORS: + case SQLCOM_SHOW_FIELDS: + case SQLCOM_SHOW_GRANTS: + case SQLCOM_SHOW_INNODB_STATUS: + case SQLCOM_SHOW_KEYS: + case SQLCOM_SHOW_LOGS: + case SQLCOM_SHOW_MASTER_STAT: + case SQLCOM_SHOW_MUTEX_STATUS: + case SQLCOM_SHOW_NEW_MASTER: + case SQLCOM_SHOW_OPEN_TABLES: + case SQLCOM_SHOW_PRIVILEGES: + case SQLCOM_SHOW_PROCESSLIST: + case SQLCOM_SHOW_SLAVE_HOSTS: + case SQLCOM_SHOW_SLAVE_STAT: + case SQLCOM_SHOW_STATUS: + case SQLCOM_SHOW_STATUS_FUNC: + case SQLCOM_SHOW_STATUS_PROC: + case SQLCOM_SHOW_STORAGE_ENGINES: + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_VARIABLES: + case SQLCOM_SHOW_WARNS: + case SQLCOM_SHOW_PROC_CODE: + case SQLCOM_SHOW_FUNC_CODE: + case SQLCOM_REPAIR: + case SQLCOM_BACKUP_TABLE: + case SQLCOM_RESTORE_TABLE: + flags= sp_head::MULTI_RESULTS; + break; + /* + EXECUTE statement may return a result set, but doesn't have to. + We can't, however, know it in advance, and therefore must add + this statement here. This is ok, as is equivalent to a result-set + statement within an IF condition. + */ + case SQLCOM_EXECUTE: + flags= sp_head::MULTI_RESULTS | sp_head::CONTAINS_DYNAMIC_SQL; + break; + case SQLCOM_PREPARE: + case SQLCOM_DEALLOCATE_PREPARE: + flags= sp_head::CONTAINS_DYNAMIC_SQL; + break; + case SQLCOM_CREATE_TABLE: + if (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) + flags= 0; + else + flags= sp_head::HAS_COMMIT_OR_ROLLBACK; + break; + case SQLCOM_DROP_TABLE: + if (lex->drop_temporary) + flags= 0; + else + flags= sp_head::HAS_COMMIT_OR_ROLLBACK; + break; + case SQLCOM_FLUSH: + flags= sp_head::HAS_SQLCOM_FLUSH; + break; + case SQLCOM_RESET: + flags= sp_head::HAS_SQLCOM_RESET; + break; + case SQLCOM_CREATE_INDEX: + case SQLCOM_CREATE_DB: + case SQLCOM_CREATE_VIEW: + case SQLCOM_CREATE_TRIGGER: + case SQLCOM_CREATE_USER: + case SQLCOM_ALTER_TABLE: + case SQLCOM_BEGIN: + case SQLCOM_RENAME_TABLE: + case SQLCOM_RENAME_USER: + case SQLCOM_DROP_INDEX: + case SQLCOM_DROP_DB: + case SQLCOM_DROP_USER: + case SQLCOM_DROP_VIEW: + case SQLCOM_DROP_TRIGGER: + case SQLCOM_TRUNCATE: + case SQLCOM_COMMIT: + case SQLCOM_ROLLBACK: + case SQLCOM_LOAD: + case SQLCOM_LOAD_MASTER_DATA: + case SQLCOM_LOCK_TABLES: + case SQLCOM_CREATE_PROCEDURE: + case SQLCOM_CREATE_SPFUNCTION: + case SQLCOM_ALTER_PROCEDURE: + case SQLCOM_ALTER_FUNCTION: + case SQLCOM_DROP_PROCEDURE: + case SQLCOM_DROP_FUNCTION: + flags= sp_head::HAS_COMMIT_OR_ROLLBACK; + break; + default: + flags= 0; + break; + } + return flags; +} + + +/* + Prepare an Item for evaluation (call of fix_fields). + + SYNOPSIS + sp_prepare_func_item() + thd thread handler + it_addr pointer on item refernce + + RETURN + NULL error + prepared item +*/ + +Item * +sp_prepare_func_item(THD* thd, Item **it_addr) +{ + DBUG_ENTER("sp_prepare_func_item"); + it_addr= (*it_addr)->this_item_addr(thd, it_addr); + + if (!(*it_addr)->fixed && + ((*it_addr)->fix_fields(thd, it_addr) || + (*it_addr)->check_cols(1))) + { + DBUG_PRINT("info", ("fix_fields() failed")); + DBUG_RETURN(NULL); + } + DBUG_RETURN(*it_addr); +} + + +/* + Evaluate an expression and store the result in the field. + + SYNOPSIS + sp_eval_expr() + thd - current thread object + expr_item - the root item of the expression + result_field - the field to store the result + + RETURN VALUES + FALSE on success + TRUE on error +*/ + +bool +sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr) +{ + Item *expr_item; + + DBUG_ENTER("sp_eval_expr"); + + if (!(expr_item= sp_prepare_func_item(thd, expr_item_ptr))) + DBUG_RETURN(TRUE); + + bool err_status= FALSE; + + /* + Set THD flags to emit warnings/errors in case of overflow/type errors + during saving the item into the field. + + Save original values and restore them after save. + */ + + enum_check_fields save_count_cuted_fields= thd->count_cuted_fields; + bool save_abort_on_warning= thd->abort_on_warning; + bool save_no_trans_update= thd->no_trans_update; + + thd->count_cuted_fields= CHECK_FIELD_ERROR_FOR_NULL; + thd->abort_on_warning= + thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES); + thd->no_trans_update= 0; + + /* Save the value in the field. Convert the value if needed. */ + + expr_item->save_in_field(result_field, 0); + + thd->count_cuted_fields= save_count_cuted_fields; + thd->abort_on_warning= save_abort_on_warning; + thd->no_trans_update= save_no_trans_update; + + if (thd->net.report_error) + { + /* Return error status if something went wrong. */ + err_status= TRUE; + } + + DBUG_RETURN(err_status); +} + + +/* + * + * sp_name + * + */ + +void +sp_name::init_qname(THD *thd) +{ + m_sroutines_key.length= m_db.length + m_name.length + 2; + if (!(m_sroutines_key.str= thd->alloc(m_sroutines_key.length + 1))) + return; + m_qname.length= m_sroutines_key.length - 1; + m_qname.str= m_sroutines_key.str + 1; + sprintf(m_qname.str, "%.*s.%.*s", + m_db.length, (m_db.length ? m_db.str : ""), + m_name.length, m_name.str); +} + + +/* + Check that the name 'ident' is ok. It's assumed to be an 'ident' + from the parser, so we only have to check length and trailing spaces. + The former is a standard requirement (and 'show status' assumes a + non-empty name), the latter is a mysql:ism as trailing spaces are + removed by get_field(). + + RETURN + TRUE - bad name + FALSE - name is ok +*/ + +bool +check_routine_name(LEX_STRING ident) +{ + return (!ident.str || !ident.str[0] || ident.str[ident.length-1] == ' '); +} + +/* ------------------------------------------------------------------ */ + + +/* + * + * sp_head + * + */ + +void * +sp_head::operator new(size_t size) +{ + DBUG_ENTER("sp_head::operator new"); + MEM_ROOT own_root; + sp_head *sp; + + init_alloc_root(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC); + sp= (sp_head *) alloc_root(&own_root, size); + sp->main_mem_root= own_root; + DBUG_PRINT("info", ("mem_root 0x%lx", (ulong) &sp->mem_root)); + DBUG_RETURN(sp); +} + +void +sp_head::operator delete(void *ptr, size_t size) +{ + DBUG_ENTER("sp_head::operator delete"); + MEM_ROOT own_root; + sp_head *sp= (sp_head *) ptr; + + /* Make a copy of main_mem_root as free_root will free the sp */ + own_root= sp->main_mem_root; + DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx", + (ulong) &sp->mem_root, (ulong) &own_root)); + free_root(&own_root, MYF(0)); + + DBUG_VOID_RETURN; +} + + +sp_head::sp_head() + :Query_arena(&main_mem_root, INITIALIZED_FOR_SP), + m_flags(0), m_recursion_level(0), m_next_cached_sp(0), + m_cont_level(0) +{ + m_first_instance= this; + m_first_free_instance= this; + m_last_cached_sp= this; + + m_return_field_def.charset = NULL; + + extern byte * + sp_table_key(const byte *ptr, uint *plen, my_bool first); + DBUG_ENTER("sp_head::sp_head"); + + m_backpatch.empty(); + m_cont_backpatch.empty(); + m_lex.empty(); + hash_init(&m_sptabs, system_charset_info, 0, 0, 0, sp_table_key, 0, 0); + hash_init(&m_sroutines, system_charset_info, 0, 0, 0, sp_sroutine_key, 0, 0); + DBUG_VOID_RETURN; +} + + +void +sp_head::init(LEX *lex) +{ + DBUG_ENTER("sp_head::init"); + + lex->spcont= m_pcont= new sp_pcontext(); + + /* + Altough trg_table_fields list is used only in triggers we init for all + types of stored procedures to simplify reset_lex()/restore_lex() code. + */ + lex->trg_table_fields.empty(); + my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8); + m_param_begin= m_param_end= m_body_begin= 0; + m_qname.str= m_db.str= m_name.str= m_params.str= + m_body.str= m_defstr.str= 0; + m_qname.length= m_db.length= m_name.length= m_params.length= + m_body.length= m_defstr.length= 0; + m_return_field_def.charset= NULL; + DBUG_VOID_RETURN; +} + + +void +sp_head::init_sp_name(THD *thd, sp_name *spname) +{ + DBUG_ENTER("sp_head::init_sp_name"); + + /* Must be initialized in the parser. */ + + DBUG_ASSERT(spname && spname->m_db.str && spname->m_db.length); + + /* We have to copy strings to get them into the right memroot. */ + + m_db.length= spname->m_db.length; + m_db.str= strmake_root(thd->mem_root, spname->m_db.str, spname->m_db.length); + + m_name.length= spname->m_name.length; + m_name.str= strmake_root(thd->mem_root, spname->m_name.str, + spname->m_name.length); + + if (spname->m_qname.length == 0) + spname->init_qname(thd); + + m_qname.length= spname->m_qname.length; + m_qname.str= strmake_root(thd->mem_root, spname->m_qname.str, + m_qname.length); +} + + +void +sp_head::init_strings(THD *thd, LEX *lex) +{ + DBUG_ENTER("sp_head::init_strings"); + uchar *endp; /* Used to trim the end */ + /* During parsing, we must use thd->mem_root */ + MEM_ROOT *root= thd->mem_root; + + if (m_param_begin && m_param_end) + { + m_params.length= m_param_end - m_param_begin; + m_params.str= strmake_root(root, + (char *)m_param_begin, m_params.length); + } + + /* If ptr has overrun end_of_query then end_of_query is the end */ + endp= (lex->ptr > lex->end_of_query ? lex->end_of_query : lex->ptr); + /* + Trim "garbage" at the end. This is sometimes needed with the + "/ * ! VERSION... * /" wrapper in dump files. + */ + endp= skip_rear_comments(m_body_begin, endp); + + m_body.length= endp - m_body_begin; + m_body.str= strmake_root(root, (char *)m_body_begin, m_body.length); + m_defstr.length= endp - lex->buf; + m_defstr.str= strmake_root(root, (char *)lex->buf, m_defstr.length); + DBUG_VOID_RETURN; +} + + +static TYPELIB * +create_typelib(MEM_ROOT *mem_root, create_field *field_def, List<String> *src) +{ + TYPELIB *result= NULL; + CHARSET_INFO *cs= field_def->charset; + DBUG_ENTER("create_typelib"); + if (src->elements) + { + result= (TYPELIB*) alloc_root(mem_root, sizeof(TYPELIB)); + result->count= src->elements; + result->name= ""; + if (!(result->type_names=(const char **) + alloc_root(mem_root,(sizeof(char *)+sizeof(int))*(result->count+1)))) + DBUG_RETURN(0); + result->type_lengths= (unsigned int *)(result->type_names + result->count+1); + List_iterator<String> it(*src); + String conv; + for (uint i=0; i < result->count; i++) + { + uint32 dummy; + uint length; + String *tmp= it++; + + if (String::needs_conversion(tmp->length(), tmp->charset(), + cs, &dummy)) + { + uint cnv_errs; + conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs); + + length= conv.length(); + result->type_names[i]= (char*) strmake_root(mem_root, conv.ptr(), + length); + } + else + { + length= tmp->length(); + result->type_names[i]= strmake_root(mem_root, tmp->ptr(), length); + } + + // Strip trailing spaces. + length= cs->cset->lengthsp(cs, result->type_names[i], length); + result->type_lengths[i]= length; + ((uchar *)result->type_names[i])[length]= '\0'; + } + result->type_names[result->count]= 0; + result->type_lengths[result->count]= 0; + } + DBUG_RETURN(result); +} + + +int +sp_head::create(THD *thd) +{ + DBUG_ENTER("sp_head::create"); + int ret; + + DBUG_PRINT("info", ("type: %d name: %s params: %s body: %s", + m_type, m_name.str, m_params.str, m_body.str)); + + if (m_type == TYPE_ENUM_FUNCTION) + ret= sp_create_function(thd, this); + else + ret= sp_create_procedure(thd, this); + + DBUG_RETURN(ret); +} + +sp_head::~sp_head() +{ + destroy(); + delete m_next_cached_sp; + if (m_thd) + restore_thd_mem_root(m_thd); +} + +void +sp_head::destroy() +{ + sp_instr *i; + LEX *lex; + DBUG_ENTER("sp_head::destroy"); + DBUG_PRINT("info", ("name: %s", m_name.str)); + + for (uint ip = 0 ; (i = get_instr(ip)) ; ip++) + delete i; + delete_dynamic(&m_instr); + m_pcont->destroy(); + free_items(); + + /* + If we have non-empty LEX stack then we just came out of parser with + error. Now we should delete all auxilary LEXes and restore original + THD::lex (In this case sp_head::restore_thd_mem_root() was not called + too, so m_thd points to the current thread context). + It is safe to not update LEX::ptr because further query string parsing + and execution will be stopped anyway. + */ + DBUG_ASSERT(m_lex.is_empty() || m_thd); + while ((lex= (LEX *)m_lex.pop())) + { + lex_end(m_thd->lex); + delete m_thd->lex; + m_thd->lex= lex; + } + + hash_free(&m_sptabs); + hash_free(&m_sroutines); + DBUG_VOID_RETURN; +} + + +/* + This is only used for result fields from functions (both during + fix_length_and_dec() and evaluation). +*/ + +Field * +sp_head::create_result_field(uint field_max_length, const char *field_name, + TABLE *table) +{ + uint field_length; + Field *field; + + DBUG_ENTER("sp_head::create_result_field"); + + field_length= !m_return_field_def.length ? + field_max_length : m_return_field_def.length; + + field= ::make_field((char*) 0, /* field ptr */ + field_length, /* field [max] length */ + (uchar*) "", /* null ptr */ + 0, /* null bit */ + m_return_field_def.pack_flag, + m_return_field_def.sql_type, + m_return_field_def.charset, + m_return_field_def.geom_type, + Field::NONE, /* unreg check */ + m_return_field_def.interval, + field_name ? field_name : (const char *) m_name.str, + table); + + DBUG_RETURN(field); +} + + +int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b) +{ + return (int)((*a)->pos_in_query - (*b)->pos_in_query); +} + + +/* + StoredRoutinesBinlogging + Top-down overview: + + 1. Statements + + Statements that have is_update_query(stmt) == TRUE are written into the + binary log verbatim. + Examples: + UPDATE tbl SET tbl.x = spfunc_w_side_effects() + UPDATE tbl SET tbl.x=1 WHERE spfunc_w_side_effect_that_returns_false(tbl.y) + + Statements that have is_update_query(stmt) == FALSE (e.g. SELECTs) are not + written into binary log. Instead we catch function calls the statement + makes and write it into binary log separately (see #3). + + 2. PROCEDURE calls + + CALL statements are not written into binary log. Instead + * Any FUNCTION invocation (in SET, IF, WHILE, OPEN CURSOR and other SP + instructions) is written into binlog separately. + + * Each statement executed in SP is binlogged separately, according to rules + in #1, with the exception that we modify query string: we replace uses + of SP local variables with NAME_CONST('spvar_name', <spvar-value>) calls. + This substitution is done in subst_spvars(). + + 3. FUNCTION calls + + In sp_head::execute_function(), we check + * If this function invocation is done from a statement that is written + into the binary log. + * If there were any attempts to write events to the binary log during + function execution (grep for start_union_events and stop_union_events) + + If the answers are No and Yes, we write the function call into the binary + log as "SELECT spfunc(<param1value>, <param2value>, ...)". + + + 4. Miscellaneous issues. + + 4.1 User variables. + + When we call mysql_bin_log.write() for an SP statement, thd->user_var_events + must hold set<{var_name, value}> pairs for all user variables used during + the statement execution. + This set is produced by tracking user variable reads during statement + execution. + + Fo SPs, this has the following implications: + 1) thd->user_var_events may contain events from several SP statements and + needs to be valid after exection of these statements was finished. In + order to achieve that, we + * Allocate user_var_events array elements on appropriate mem_root (grep + for user_var_events_alloc). + * Use is_query_in_union() to determine if user_var_event is created. + + 2) We need to empty thd->user_var_events after we have wrote a function + call. This is currently done by making + reset_dynamic(&thd->user_var_events); + calls in several different places. (TODO cosider moving this into + mysql_bin_log.write() function) +*/ + + +/* + Replace thd->query{_length} with a string that one can write to the binlog. + + SYNOPSIS + subst_spvars() + thd Current thread. + instr Instruction (we look for Item_splocal instances in + instr->free_list) + query_str Original query string + + DESCRIPTION + + The binlog-suitable string is produced by replacing references to SP local + variables with NAME_CONST('sp_var_name', value) calls. + + RETURN + FALSE on success + thd->query{_length} either has been appropriately replaced or there + is no need for replacements. + TRUE out of memory error. +*/ + +static bool +subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str) +{ + DBUG_ENTER("subst_spvars"); + if (thd->prelocked_mode == NON_PRELOCKED && mysql_bin_log.is_open()) + { + Dynamic_array<Item_splocal*> sp_vars_uses; + char *pbuf, *cur, buffer[512]; + String qbuf(buffer, sizeof(buffer), &my_charset_bin); + int prev_pos, res; + + /* Find all instances of Item_splocal used in this statement */ + for (Item *item= instr->free_list; item; item= item->next) + { + if (item->is_splocal()) + { + Item_splocal *item_spl= (Item_splocal*)item; + if (item_spl->pos_in_query) + sp_vars_uses.append(item_spl); + } + } + if (!sp_vars_uses.elements()) + DBUG_RETURN(FALSE); + + /* Sort SP var refs by their occurences in the query */ + sp_vars_uses.sort(cmp_splocal_locations); + + /* + Construct a statement string where SP local var refs are replaced + with "NAME_CONST(name, value)" + */ + qbuf.length(0); + cur= query_str->str; + prev_pos= res= 0; + for (Item_splocal **splocal= sp_vars_uses.front(); + splocal < sp_vars_uses.back(); splocal++) + { + Item *val; + + char str_buffer[STRING_BUFFER_USUAL_SIZE]; + String str_value_holder(str_buffer, sizeof(str_buffer), + &my_charset_latin1); + String *str_value; + + /* append the text between sp ref occurences */ + res|= qbuf.append(cur + prev_pos, (*splocal)->pos_in_query - prev_pos); + prev_pos= (*splocal)->pos_in_query + (*splocal)->m_name.length; + + /* append the spvar substitute */ + res|= qbuf.append(STRING_WITH_LEN(" NAME_CONST('")); + res|= qbuf.append((*splocal)->m_name.str, (*splocal)->m_name.length); + res|= qbuf.append(STRING_WITH_LEN("',")); + res|= (*splocal)->fix_fields(thd, (Item **) splocal); + + if (res) + break; + + val= (*splocal)->this_item(); + DBUG_PRINT("info", ("print %p", val)); + str_value= sp_get_item_value(thd, val, &str_value_holder); + if (str_value) + res|= qbuf.append(*str_value); + else + res|= qbuf.append(STRING_WITH_LEN("NULL")); + res|= qbuf.append(')'); + if (res) + break; + } + res|= qbuf.append(cur + prev_pos, query_str->length - prev_pos); + if (res) + DBUG_RETURN(TRUE); + + if (!(pbuf= thd->strmake(qbuf.ptr(), qbuf.length()))) + DBUG_RETURN(TRUE); + + thd->query= pbuf; + thd->query_length= qbuf.length(); + } + DBUG_RETURN(FALSE); +} + + +/* + Return appropriate error about recursion limit reaching + + SYNOPSIS + sp_head::recursion_level_error() + thd Thread handle + + NOTE + For functions and triggers we return error about prohibited recursion. + For stored procedures we return about reaching recursion limit. +*/ + +void sp_head::recursion_level_error(THD *thd) +{ + if (m_type == TYPE_ENUM_PROCEDURE) + { + my_error(ER_SP_RECURSION_LIMIT, MYF(0), + thd->variables.max_sp_recursion_depth, + m_name.str); + } + else + my_error(ER_SP_NO_RECURSION, MYF(0)); +} + + +/* + Execute the routine. The main instruction jump loop is there + Assume the parameters already set. + + RETURN + FALSE on success + TRUE on error + +*/ + +bool +sp_head::execute(THD *thd) +{ + DBUG_ENTER("sp_head::execute"); + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; + bool dbchanged; + sp_rcontext *ctx; + bool err_status= FALSE; + uint ip= 0; + ulong save_sql_mode; + bool save_abort_on_warning; + Query_arena *old_arena; + /* per-instruction arena */ + MEM_ROOT execute_mem_root; + Query_arena execute_arena(&execute_mem_root, INITIALIZED_FOR_SP), + backup_arena; + query_id_t old_query_id; + TABLE *old_derived_tables; + LEX *old_lex; + Item_change_list old_change_list; + String old_packet; + + /* Use some extra margin for possible SP recursion and functions */ + if (check_stack_overrun(thd, 8 * STACK_MIN_SIZE, (char*)&old_packet)) + DBUG_RETURN(TRUE); + + /* init per-instruction memroot */ + init_alloc_root(&execute_mem_root, MEM_ROOT_BLOCK_SIZE, 0); + + DBUG_ASSERT(!(m_flags & IS_INVOKED)); + m_flags|= IS_INVOKED; + m_first_instance->m_first_free_instance= m_next_cached_sp; + if (m_next_cached_sp) + { + DBUG_PRINT("info", + ("first free for 0x%lx ++: 0x%lx->0x%lx level: %lu flags %x", + (ulong)m_first_instance, (ulong) this, + (ulong) m_next_cached_sp, + m_next_cached_sp->m_recursion_level, + m_next_cached_sp->m_flags)); + } + /* + Check that if there are not any instances after this one then + pointer to the last instance points on this instance or if there are + some instances after this one then recursion level of next instance + greater then recursion level of current instance on 1 + */ + DBUG_ASSERT((m_next_cached_sp == 0 && + m_first_instance->m_last_cached_sp == this) || + (m_recursion_level + 1 == m_next_cached_sp->m_recursion_level)); + + if (m_db.length && + (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged))) + goto done; + + if ((ctx= thd->spcont)) + ctx->clear_handler(); + thd->query_error= 0; + old_arena= thd->stmt_arena; + + /* + We have to save/restore this info when we are changing call level to + be able properly do close_thread_tables() in instructions. + */ + old_query_id= thd->query_id; + old_derived_tables= thd->derived_tables; + thd->derived_tables= 0; + save_sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode= m_sql_mode; + save_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= 0; + + /* + It is also more efficient to save/restore current thd->lex once when + do it in each instruction + */ + old_lex= thd->lex; + /* + We should also save Item tree change list to avoid rollback something + too early in the calling query. + */ + old_change_list= thd->change_list; + thd->change_list.empty(); + /* + Cursors will use thd->packet, so they may corrupt data which was prepared + for sending by upper level. OTOH cursors in the same routine can share this + buffer safely so let use use routine-local packet instead of having own + packet buffer for each cursor. + + It is probably safe to use same thd->convert_buff everywhere. + */ + old_packet.swap(thd->packet); + + /* + Switch to per-instruction arena here. We can do it since we cleanup + arena after every instruction. + */ + thd->set_n_backup_active_arena(&execute_arena, &backup_arena); + + /* + Save callers arena in order to store instruction results and out + parameters in it later during sp_eval_func_item() + */ + thd->spcont->callers_arena= &backup_arena; + + do + { + sp_instr *i; + uint hip; // Handler ip + + i = get_instr(ip); // Returns NULL when we're done. + if (i == NULL) + break; + DBUG_PRINT("execute", ("Instruction %u", ip)); + /* Don't change NOW() in FUNCTION or TRIGGER */ + if (!thd->in_sub_stmt) + thd->set_time(); // Make current_time() et al work + + /* + We have to set thd->stmt_arena before executing the instruction + to store in the instruction free_list all new items, created + during the first execution (for example expanding of '*' or the + items made during other permanent subquery transformations). + */ + thd->stmt_arena= i; + + /* + Will write this SP statement into binlog separately + (TODO: consider changing the condition to "not inside event union") + */ + if (thd->prelocked_mode == NON_PRELOCKED) + thd->user_var_events_alloc= thd->mem_root; + + err_status= i->execute(thd, &ip); + + /* + If this SP instruction have sent eof, it has caused no_send_error to be + set. Clear it back to allow the next instruction to send error. (multi- + statement execution code clears no_send_error between statements too) + */ + thd->net.no_send_error= 0; + if (i->free_list) + cleanup_items(i->free_list); + + /* + If we've set thd->user_var_events_alloc to mem_root of this SP + statement, clean all the events allocated in it. + */ + if (thd->prelocked_mode == NON_PRELOCKED) + { + reset_dynamic(&thd->user_var_events); + thd->user_var_events_alloc= NULL;//DEBUG + } + + /* we should cleanup free_list and memroot, used by instruction */ + thd->cleanup_after_query(); + free_root(&execute_mem_root, MYF(0)); + + /* + Check if an exception has occurred and a handler has been found + Note: We have to check even if err_status == FALSE, since warnings (and + some errors) don't return a non-zero value. We also have to check even + if thd->killed != 0, since some errors return with this even when a + handler has been found (e.g. "bad data"). + */ + if (ctx) + { + uint hf; + + switch (ctx->found_handler(&hip, &hf)) { + case SP_HANDLER_NONE: + break; + case SP_HANDLER_CONTINUE: + thd->restore_active_arena(&execute_arena, &backup_arena); + thd->set_n_backup_active_arena(&execute_arena, &backup_arena); + ctx->push_hstack(i->get_cont_dest()); + // Fall through + default: + ip= hip; + err_status= FALSE; + ctx->clear_handler(); + ctx->enter_handler(hip); + thd->clear_error(); + thd->killed= THD::NOT_KILLED; + thd->mysys_var->abort= 0; + continue; + } + } + } while (!err_status && !thd->killed); + + thd->restore_active_arena(&execute_arena, &backup_arena); + + thd->spcont->pop_all_cursors(); // To avoid memory leaks after an error + + /* Restore all saved */ + old_packet.swap(thd->packet); + DBUG_ASSERT(thd->change_list.is_empty()); + thd->change_list= old_change_list; + /* To avoid wiping out thd->change_list on old_change_list destruction */ + old_change_list.empty(); + thd->lex= old_lex; + thd->query_id= old_query_id; + DBUG_ASSERT(!thd->derived_tables); + thd->derived_tables= old_derived_tables; + thd->variables.sql_mode= save_sql_mode; + thd->abort_on_warning= save_abort_on_warning; + + thd->stmt_arena= old_arena; + state= EXECUTED; + + done: + DBUG_PRINT("info", ("err_status: %d killed: %d query_error: %d", + err_status, thd->killed, thd->query_error)); + + if (thd->killed) + err_status= TRUE; + /* + If the DB has changed, the pointer has changed too, but the + original thd->db will then have been freed + */ + if (dbchanged) + { + /* + No access check when changing back to where we came from. + (It would generate an error from mysql_change_db() when old_db=="") + */ + if (! thd->killed) + err_status|= mysql_change_db(thd, old_db.str, 1); + } + m_flags&= ~IS_INVOKED; + DBUG_PRINT("info", + ("first free for 0x%lx --: 0x%lx->0x%lx, level: %lu, flags %x", + (ulong) m_first_instance, + (ulong) m_first_instance->m_first_free_instance, + (ulong) this, m_recursion_level, m_flags)); + /* + Check that we have one of following: + + 1) there are not free instances which means that this instance is last + in the list of instances (pointer to the last instance point on it and + ther are not other instances after this one in the list) + + 2) There are some free instances which mean that first free instance + should go just after this one and recursion level of that free instance + should be on 1 more then recursion level of this instance. + */ + DBUG_ASSERT((m_first_instance->m_first_free_instance == 0 && + this == m_first_instance->m_last_cached_sp && + m_next_cached_sp == 0) || + (m_first_instance->m_first_free_instance != 0 && + m_first_instance->m_first_free_instance == m_next_cached_sp && + m_first_instance->m_first_free_instance->m_recursion_level == + m_recursion_level + 1)); + m_first_instance->m_first_free_instance= this; + + DBUG_RETURN(err_status); +} + + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +/* + set_routine_security_ctx() changes routine security context, and + checks if there is an EXECUTE privilege in new context. If there is + no EXECUTE privilege, it changes the context back and returns a + error. + + SYNOPSIS + set_routine_security_ctx() + thd thread handle + sp stored routine to change the context for + is_proc TRUE is procedure, FALSE if function + save_ctx pointer to an old security context + + RETURN + TRUE if there was a error, and the context wasn't changed. + FALSE if the context was changed. +*/ + +bool +set_routine_security_ctx(THD *thd, sp_head *sp, bool is_proc, + Security_context **save_ctx) +{ + *save_ctx= 0; + if (sp_change_security_context(thd, sp, save_ctx)) + return TRUE; + + /* + If we changed context to run as another user, we need to check the + access right for the new context again as someone may have revoked + the right to use the procedure from this user. + + TODO: + Cache if the definer has the right to use the object on the + first usage and only reset the cache if someone does a GRANT + statement that 'may' affect this. + */ + if (*save_ctx && + check_routine_access(thd, EXECUTE_ACL, + sp->m_db.str, sp->m_name.str, is_proc, FALSE)) + { + sp_restore_security_context(thd, *save_ctx); + *save_ctx= 0; + return TRUE; + } + + return FALSE; +} +#endif // ! NO_EMBEDDED_ACCESS_CHECKS + + +/* + Execute a trigger: + - changes security context for triggers + - switch to new memroot + - call sp_head::execute + - restore old memroot + - restores security context + + SYNOPSIS + sp_head::execute_trigger() + thd Thread handle + db database name + table table name + grant_info GRANT_INFO structure to be filled with + information about definer's privileges + on subject table + + RETURN + FALSE on success + TRUE on error +*/ + +bool +sp_head::execute_trigger(THD *thd, const char *db, const char *table, + GRANT_INFO *grant_info) +{ + sp_rcontext *octx = thd->spcont; + sp_rcontext *nctx = NULL; + bool err_status= FALSE; + MEM_ROOT call_mem_root; + Query_arena call_arena(&call_mem_root, Query_arena::INITIALIZED_FOR_SP); + Query_arena backup_arena; + + DBUG_ENTER("sp_head::execute_trigger"); + DBUG_PRINT("info", ("trigger %s", m_name.str)); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *save_ctx; + if (sp_change_security_context(thd, this, &save_ctx)) + DBUG_RETURN(TRUE); + + /* + NOTE: TRIGGER_ACL should be used here. + */ + if (check_global_access(thd, SUPER_ACL)) + { + sp_restore_security_context(thd, save_ctx); + DBUG_RETURN(TRUE); + } + + /* + Fetch information about table-level privileges to GRANT_INFO + structure for subject table. Check of privileges that will use it + and information about column-level privileges will happen in + Item_trigger_field::fix_fields(). + */ + fill_effective_table_privileges(thd, grant_info, db, table); +#endif // NO_EMBEDDED_ACCESS_CHECKS + + /* + Prepare arena and memroot for objects which lifetime is whole + duration of trigger call (sp_rcontext, it's tables and items, + sp_cursor and Item_cache holders for case expressions). We can't + use caller's arena/memroot for those objects because in this case + some fixed amount of memory will be consumed for each trigger + invocation and so statements which involve lot of them will hog + memory. + + TODO: we should create sp_rcontext once per command and reuse it + on subsequent executions of a trigger. + */ + init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0); + thd->set_n_backup_active_arena(&call_arena, &backup_arena); + + if (!(nctx= new sp_rcontext(m_pcont, 0, octx)) || + nctx->init(thd)) + { + err_status= TRUE; + goto err_with_cleanup; + } + +#ifndef DBUG_OFF + nctx->sp= this; +#endif + + thd->spcont= nctx; + + err_status= execute(thd); + +err_with_cleanup: + thd->restore_active_arena(&call_arena, &backup_arena); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + sp_restore_security_context(thd, save_ctx); +#endif // NO_EMBEDDED_ACCESS_CHECKS + delete nctx; + call_arena.free_items(); + free_root(&call_mem_root, MYF(0)); + thd->spcont= octx; + + DBUG_RETURN(err_status); +} + + +/* + Execute a function: + - evaluate parameters + - changes security context for SUID routines + - switch to new memroot + - call sp_head::execute + - restore old memroot + - evaluate the return value + - restores security context + + SYNOPSIS + sp_head::execute_function() + thd Thread handle + argp Passed arguments (these are items from containing + statement?) + argcount Number of passed arguments. We need to check if this is + correct. + return_value_fld Save result here. + + RETURN + FALSE on success + TRUE on error +*/ + +bool +sp_head::execute_function(THD *thd, Item **argp, uint argcount, + Field *return_value_fld) +{ + ulonglong binlog_save_options; + bool need_binlog_call; + uint arg_no; + sp_rcontext *octx = thd->spcont; + sp_rcontext *nctx = NULL; + char buf[STRING_BUFFER_USUAL_SIZE]; + String binlog_buf(buf, sizeof(buf), &my_charset_bin); + bool err_status= FALSE; + MEM_ROOT call_mem_root; + Query_arena call_arena(&call_mem_root, Query_arena::INITIALIZED_FOR_SP); + Query_arena backup_arena; + + DBUG_ENTER("sp_head::execute_function"); + DBUG_PRINT("info", ("function %s", m_name.str)); + + /* + Check that the function is called with all specified arguments. + + If it is not, use my_error() to report an error, or it will not terminate + the invoking query properly. + */ + if (argcount != m_pcont->context_var_count()) + { + /* + Need to use my_error here, or it will not terminate the + invoking query properly. + */ + my_error(ER_SP_WRONG_NO_OF_ARGS, MYF(0), + "FUNCTION", m_qname.str, m_pcont->context_var_count(), argcount); + DBUG_RETURN(TRUE); + } + /* + Prepare arena and memroot for objects which lifetime is whole + duration of function call (sp_rcontext, it's tables and items, + sp_cursor and Item_cache holders for case expressions). + We can't use caller's arena/memroot for those objects because + in this case some fixed amount of memory will be consumed for + each function/trigger invocation and so statements which involve + lot of them will hog memory. + TODO: we should create sp_rcontext once per command and reuse + it on subsequent executions of a function/trigger. + */ + init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0); + thd->set_n_backup_active_arena(&call_arena, &backup_arena); + + if (!(nctx= new sp_rcontext(m_pcont, return_value_fld, octx)) || + nctx->init(thd)) + { + thd->restore_active_arena(&call_arena, &backup_arena); + err_status= TRUE; + goto err_with_cleanup; + } + + /* + We have to switch temporarily back to callers arena/memroot. + Function arguments belong to the caller and so the may reference + memory which they will allocate during calculation long after + this function call will be finished (e.g. in Item::cleanup()). + */ + thd->restore_active_arena(&call_arena, &backup_arena); + +#ifndef DBUG_OFF + nctx->sp= this; +#endif + + /* Pass arguments. */ + for (arg_no= 0; arg_no < argcount; arg_no++) + { + /* Arguments must be fixed in Item_func_sp::fix_fields */ + DBUG_ASSERT(argp[arg_no]->fixed); + + if ((err_status= nctx->set_variable(thd, arg_no, &(argp[arg_no])))) + goto err_with_cleanup; + } + + need_binlog_call= mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG); + + /* + Remember the original arguments for unrolled replication of functions + before they are changed by execution. + */ + if (need_binlog_call) + { + binlog_buf.length(0); + binlog_buf.append(STRING_WITH_LEN("SELECT ")); + append_identifier(thd, &binlog_buf, m_db.str, m_db.length); + binlog_buf.append('.'); + append_identifier(thd, &binlog_buf, m_name.str, m_name.length); + binlog_buf.append('('); + for (arg_no= 0; arg_no < argcount; arg_no++) + { + String str_value_holder; + String *str_value; + + if (arg_no) + binlog_buf.append(','); + + str_value= sp_get_item_value(thd, nctx->get_item(arg_no), + &str_value_holder); + + if (str_value) + binlog_buf.append(*str_value); + else + binlog_buf.append(STRING_WITH_LEN("NULL")); + } + binlog_buf.append(')'); + } + thd->spcont= nctx; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *save_security_ctx; + if (set_routine_security_ctx(thd, this, FALSE, &save_security_ctx)) + { + err_status= TRUE; + goto err_with_cleanup; + } +#endif + + binlog_save_options= thd->options; + if (need_binlog_call) + { + query_id_t q; + reset_dynamic(&thd->user_var_events); + /* + In case of artificially constructed events for function calls + we have separate union for each such event and hence can't use + query_id of real calling statement as the start of all these + unions (this will break logic of replication of user-defined + variables). So we use artifical value which is guaranteed to + be greater than all query_id's of all statements belonging + to previous events/unions. + Possible alternative to this is logging of all function invocations + as one select and not resetting THD::user_var_events before + each invocation. + */ + VOID(pthread_mutex_lock(&LOCK_thread_count)); + q= global_query_id; + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + mysql_bin_log.start_union_events(thd, q + 1); + } + + /* + Switch to call arena/mem_root so objects like sp_cursor or + Item_cache holders for case expressions can be allocated on it. + + TODO: In future we should associate call arena/mem_root with + sp_rcontext and allocate all these objects (and sp_rcontext + itself) on it directly rather than juggle with arenas. + */ + thd->set_n_backup_active_arena(&call_arena, &backup_arena); + + thd->options&= ~OPTION_BIN_LOG; + err_status= execute(thd); + thd->options= binlog_save_options; + + thd->restore_active_arena(&call_arena, &backup_arena); + + if (need_binlog_call) + mysql_bin_log.stop_union_events(thd); + + if (need_binlog_call && thd->binlog_evt_union.unioned_events) + { + Query_log_event qinfo(thd, binlog_buf.ptr(), binlog_buf.length(), + thd->binlog_evt_union.unioned_events_trans, FALSE); + if (mysql_bin_log.write(&qinfo) && + thd->binlog_evt_union.unioned_events_trans) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Invoked ROUTINE modified a transactional table but MySQL " + "failed to reflect this change in the binary log"); + } + reset_dynamic(&thd->user_var_events); + } + + if (!err_status) + { + /* We need result only in function but not in trigger */ + + if (!nctx->is_return_value_set()) + { + my_error(ER_SP_NORETURNEND, MYF(0), m_name.str); + err_status= TRUE; + } + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + sp_restore_security_context(thd, save_security_ctx); +#endif + +err_with_cleanup: + delete nctx; + call_arena.free_items(); + free_root(&call_mem_root, MYF(0)); + thd->spcont= octx; + + DBUG_RETURN(err_status); +} + + +/* + Execute a procedure. + SYNOPSIS + sp_head::execute_procedure() + thd Thread handle + args List of values passed as arguments. + + DESCRIPTION + + The function does the following steps: + - Set all parameters + - changes security context for SUID routines + - call sp_head::execute + - copy back values of INOUT and OUT parameters + - restores security context + + RETURN + FALSE on success + TRUE on error +*/ + +bool +sp_head::execute_procedure(THD *thd, List<Item> *args) +{ + bool err_status= FALSE; + uint params = m_pcont->context_var_count(); + sp_rcontext *save_spcont, *octx; + sp_rcontext *nctx = NULL; + DBUG_ENTER("sp_head::execute_procedure"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + + if (args->elements != params) + { + my_error(ER_SP_WRONG_NO_OF_ARGS, MYF(0), "PROCEDURE", + m_qname.str, params, args->elements); + DBUG_RETURN(TRUE); + } + + save_spcont= octx= thd->spcont; + if (! octx) + { // Create a temporary old context + if (!(octx= new sp_rcontext(m_pcont, NULL, octx)) || + octx->init(thd)) + { + delete octx; /* Delete octx if it was init() that failed. */ + DBUG_RETURN(TRUE); + } + +#ifndef DBUG_OFF + octx->sp= 0; +#endif + thd->spcont= octx; + + /* set callers_arena to thd, for upper-level function to work */ + thd->spcont->callers_arena= thd; + } + + if (!(nctx= new sp_rcontext(m_pcont, NULL, octx)) || + nctx->init(thd)) + { + delete nctx; /* Delete nctx if it was init() that failed. */ + thd->spcont= save_spcont; + DBUG_RETURN(TRUE); + } +#ifndef DBUG_OFF + nctx->sp= this; +#endif + + if (params > 0) + { + List_iterator<Item> it_args(*args); + + DBUG_PRINT("info",(" %.*s: eval args", m_name.length, m_name.str)); + + for (uint i= 0 ; i < params ; i++) + { + Item *arg_item= it_args++; + + if (!arg_item) + break; + + sp_variable_t *spvar= m_pcont->find_variable(i); + + if (!spvar) + continue; + + if (spvar->mode != sp_param_in) + { + Settable_routine_parameter *srp= + arg_item->get_settable_routine_parameter(); + + if (!srp) + { + my_error(ER_SP_NOT_VAR_ARG, MYF(0), i+1, m_qname.str); + err_status= TRUE; + break; + } + + srp->set_required_privilege(spvar->mode == sp_param_inout); + } + + if (spvar->mode == sp_param_out) + { + Item_null *null_item= new Item_null(); + + if (!null_item || + nctx->set_variable(thd, i, (Item **)&null_item)) + { + err_status= TRUE; + break; + } + } + else + { + if (nctx->set_variable(thd, i, it_args.ref())) + { + err_status= TRUE; + break; + } + } + } + + /* + Okay, got values for all arguments. Close tables that might be used by + arguments evaluation. If arguments evaluation required prelocking mode, + we'll leave it here. + */ + if (!thd->in_sub_stmt) + close_thread_tables(thd, 0, 0); + + DBUG_PRINT("info",(" %.*s: eval args done", m_name.length, m_name.str)); + } + + thd->spcont= nctx; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *save_security_ctx= 0; + if (!err_status) + err_status= set_routine_security_ctx(thd, this, TRUE, &save_security_ctx); +#endif + + if (!err_status) + err_status= execute(thd); + + /* + In the case when we weren't able to employ reuse mechanism for + OUT/INOUT paranmeters, we should reallocate memory. This + allocation should be done on the arena which will live through + all execution of calling routine. + */ + thd->spcont->callers_arena= octx->callers_arena; + + if (!err_status && params > 0) + { + List_iterator<Item> it_args(*args); + + /* + Copy back all OUT or INOUT values to the previous frame, or + set global user variables + */ + for (uint i= 0 ; i < params ; i++) + { + Item *arg_item= it_args++; + + if (!arg_item) + break; + + sp_variable_t *spvar= m_pcont->find_variable(i); + + if (spvar->mode == sp_param_in) + continue; + + Settable_routine_parameter *srp= + arg_item->get_settable_routine_parameter(); + + DBUG_ASSERT(srp); + + if (srp->set_value(thd, octx, nctx->get_item_addr(i))) + { + err_status= TRUE; + break; + } + } + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (save_security_ctx) + sp_restore_security_context(thd, save_security_ctx); +#endif + + if (!save_spcont) + delete octx; + + delete nctx; + thd->spcont= save_spcont; + + DBUG_RETURN(err_status); +} + + +// Reset lex during parsing, before we parse a sub statement. +void +sp_head::reset_lex(THD *thd) +{ + DBUG_ENTER("sp_head::reset_lex"); + LEX *sublex; + LEX *oldlex= thd->lex; + my_lex_states org_next_state= oldlex->next_state; + + (void)m_lex.push_front(oldlex); + thd->lex= sublex= new st_lex; + + /* Reset most stuff. The length arguments doesn't matter here. */ + lex_start(thd, oldlex->buf, (ulong) (oldlex->end_of_query - oldlex->ptr)); + + /* + next_state is normally the same (0), but it happens that we swap lex in + "mid-sentence", so we must restore it. + */ + sublex->next_state= org_next_state; + /* We must reset ptr and end_of_query again */ + sublex->ptr= oldlex->ptr; + sublex->end_of_query= oldlex->end_of_query; + sublex->tok_start= oldlex->tok_start; + sublex->yylineno= oldlex->yylineno; + /* And keep the SP stuff too */ + sublex->sphead= oldlex->sphead; + sublex->spcont= oldlex->spcont; + /* And trigger related stuff too */ + sublex->trg_chistics= oldlex->trg_chistics; + sublex->trg_table_fields.empty(); + sublex->sp_lex_in_use= FALSE; + + sublex->in_comment= oldlex->in_comment; + + /* Reset type info. */ + + sublex->charset= NULL; + sublex->length= NULL; + sublex->dec= NULL; + sublex->interval_list.empty(); + sublex->type= 0; + + DBUG_VOID_RETURN; +} + +// Restore lex during parsing, after we have parsed a sub statement. +void +sp_head::restore_lex(THD *thd) +{ + DBUG_ENTER("sp_head::restore_lex"); + LEX *sublex= thd->lex; + LEX *oldlex= (LEX *)m_lex.pop(); + + if (! oldlex) + return; // Nothing to restore + + // Update some state in the old one first + oldlex->ptr= sublex->ptr; + oldlex->next_state= sublex->next_state; + oldlex->trg_table_fields.push_back(&sublex->trg_table_fields); + + /* + Add routines which are used by statement to respective set for + this routine. + */ + sp_update_sp_used_routines(&m_sroutines, &sublex->sroutines); + /* + Merge tables used by this statement (but not by its functions or + procedures) to multiset of tables used by this routine. + */ + merge_table_list(thd, sublex->query_tables, sublex); + if (! sublex->sp_lex_in_use) + { + lex_end(sublex); + delete sublex; + } + thd->lex= oldlex; + DBUG_VOID_RETURN; +} + +void +sp_head::push_backpatch(sp_instr *i, sp_label_t *lab) +{ + bp_t *bp= (bp_t *)sql_alloc(sizeof(bp_t)); + + if (bp) + { + bp->lab= lab; + bp->instr= i; + (void)m_backpatch.push_front(bp); + } +} + +void +sp_head::backpatch(sp_label_t *lab) +{ + bp_t *bp; + uint dest= instructions(); + List_iterator_fast<bp_t> li(m_backpatch); + + while ((bp= li++)) + { + if (bp->lab == lab) + bp->instr->backpatch(dest, lab->ctx); + } +} + +/* + Prepare an instance of create_field for field creation (fill all necessary + attributes). + + SYNOPSIS + sp_head::fill_field_definition() + thd [IN] Thread handle + lex [IN] Yacc parsing context + field_type [IN] Field type + field_def [OUT] An instance of create_field to be filled + + RETURN + FALSE on success + TRUE on error +*/ + +bool +sp_head::fill_field_definition(THD *thd, LEX *lex, + enum enum_field_types field_type, + create_field *field_def) +{ + HA_CREATE_INFO sp_db_info; + LEX_STRING cmt = { 0, 0 }; + uint unused1= 0; + int unused2= 0; + + load_db_opt_by_name(thd, m_db.str, &sp_db_info); + + if (field_def->init(thd, (char*) "", field_type, lex->length, lex->dec, + lex->type, (Item*) 0, (Item*) 0, &cmt, 0, + &lex->interval_list, + (lex->charset ? lex->charset : + sp_db_info.default_table_charset), + lex->uint_geom_type)) + return TRUE; + + if (field_def->interval_list.elements) + field_def->interval= create_typelib(mem_root, field_def, + &field_def->interval_list); + + sp_prepare_create_field(thd, field_def); + + if (prepare_create_field(field_def, &unused1, &unused2, &unused2, + HA_CAN_GEOMETRY)) + { + return TRUE; + } + + return FALSE; +} + + +void +sp_head::new_cont_backpatch(sp_instr_opt_meta *i) +{ + m_cont_level+= 1; + if (i) + { + /* Use the cont. destination slot to store the level */ + i->m_cont_dest= m_cont_level; + (void)m_cont_backpatch.push_front(i); + } +} + +void +sp_head::add_cont_backpatch(sp_instr_opt_meta *i) +{ + i->m_cont_dest= m_cont_level; + (void)m_cont_backpatch.push_front(i); +} + +void +sp_head::do_cont_backpatch() +{ + uint dest= instructions(); + uint lev= m_cont_level--; + sp_instr_opt_meta *i; + + while ((i= m_cont_backpatch.head()) && i->m_cont_dest == lev) + { + i->m_cont_dest= dest; + (void)m_cont_backpatch.pop(); + } +} + +void +sp_head::set_info(longlong created, longlong modified, + st_sp_chistics *chistics, ulong sql_mode) +{ + m_created= created; + m_modified= modified; + m_chistics= (st_sp_chistics *) memdup_root(mem_root, (char*) chistics, + sizeof(*chistics)); + if (m_chistics->comment.length == 0) + m_chistics->comment.str= 0; + else + m_chistics->comment.str= strmake_root(mem_root, + m_chistics->comment.str, + m_chistics->comment.length); + m_sql_mode= sql_mode; +} + + +void +sp_head::set_definer(const char *definer, uint definerlen) +{ + char user_name_holder[USERNAME_LENGTH + 1]; + LEX_STRING_WITH_INIT user_name(user_name_holder, USERNAME_LENGTH); + + char host_name_holder[HOSTNAME_LENGTH + 1]; + LEX_STRING_WITH_INIT host_name(host_name_holder, HOSTNAME_LENGTH); + + parse_user(definer, definerlen, user_name.str, &user_name.length, + host_name.str, &host_name.length); + + set_definer(&user_name, &host_name); +} + + +void +sp_head::set_definer(const LEX_STRING *user_name, const LEX_STRING *host_name) +{ + m_definer_user.str= strmake_root(mem_root, user_name->str, user_name->length); + m_definer_user.length= user_name->length; + + m_definer_host.str= strmake_root(mem_root, host_name->str, host_name->length); + m_definer_host.length= host_name->length; +} + + +void +sp_head::reset_thd_mem_root(THD *thd) +{ + DBUG_ENTER("sp_head::reset_thd_mem_root"); + m_thd_root= thd->mem_root; + thd->mem_root= &main_mem_root; + DBUG_PRINT("info", ("mem_root 0x%lx moved to thd mem root 0x%lx", + (ulong) &mem_root, (ulong) &thd->mem_root)); + free_list= thd->free_list; // Keep the old list + thd->free_list= NULL; // Start a new one + m_thd= thd; + DBUG_VOID_RETURN; +} + +void +sp_head::restore_thd_mem_root(THD *thd) +{ + DBUG_ENTER("sp_head::restore_thd_mem_root"); + Item *flist= free_list; // The old list + set_query_arena(thd); // Get new free_list and mem_root + state= INITIALIZED_FOR_SP; + + DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx", + (ulong) &mem_root, (ulong) &thd->mem_root)); + thd->free_list= flist; // Restore the old one + thd->mem_root= m_thd_root; + m_thd= NULL; + DBUG_VOID_RETURN; +} + + +/* + Check if a user has access right to a routine + + SYNOPSIS + check_show_routine_access() + thd Thread handler + sp SP + full_access Set to 1 if the user has SELECT right to the + 'mysql.proc' able or is the owner of the routine + RETURN + 0 ok + 1 error +*/ + +bool check_show_routine_access(THD *thd, sp_head *sp, bool *full_access) +{ + TABLE_LIST tables; + bzero((char*) &tables,sizeof(tables)); + tables.db= (char*) "mysql"; + tables.table_name= tables.alias= (char*) "proc"; + *full_access= (!check_table_access(thd, SELECT_ACL, &tables, 1) || + (!strcmp(sp->m_definer_user.str, + thd->security_ctx->priv_user) && + !strcmp(sp->m_definer_host.str, + thd->security_ctx->priv_host))); + if (!*full_access) + return check_some_routine_access(thd, sp->m_db.str, sp->m_name.str, + sp->m_type == TYPE_ENUM_PROCEDURE); + return 0; +} + + +int +sp_head::show_create_procedure(THD *thd) +{ + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + int res; + List<Item> field_list; + byte *sql_mode_str; + ulong sql_mode_len; + bool full_access; + DBUG_ENTER("sp_head::show_create_procedure"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + + LINT_INIT(sql_mode_str); + LINT_INIT(sql_mode_len); + + if (check_show_routine_access(thd, this, &full_access)) + DBUG_RETURN(1); + + sql_mode_str= + sys_var_thd_sql_mode::symbolic_mode_representation(thd, + m_sql_mode, + &sql_mode_len); + field_list.push_back(new Item_empty_string("Procedure", NAME_LEN)); + field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + // 1024 is for not to confuse old clients + Item_empty_string *definition= + new Item_empty_string("Create Procedure", max(buffer.length(),1024)); + definition->maybe_null= TRUE; + field_list.push_back(definition); + + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) + DBUG_RETURN(1); + protocol->prepare_for_resend(); + protocol->store(m_name.str, m_name.length, system_charset_info); + protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + if (full_access) + protocol->store(m_defstr.str, m_defstr.length, system_charset_info); + else + protocol->store_null(); + res= protocol->write(); + send_eof(thd); + + DBUG_RETURN(res); +} + + +/* + Add instruction to SP + + SYNOPSIS + sp_head::add_instr() + instr Instruction +*/ + +void sp_head::add_instr(sp_instr *instr) +{ + instr->free_list= m_thd->free_list; + m_thd->free_list= 0; + /* + Memory root of every instruction is designated for permanent + transformations (optimizations) made on the parsed tree during + the first execution. It points to the memory root of the + entire stored procedure, as their life span is equal. + */ + instr->mem_root= &main_mem_root; + insert_dynamic(&m_instr, (gptr)&instr); +} + + +int +sp_head::show_create_function(THD *thd) +{ + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + int res; + List<Item> field_list; + byte *sql_mode_str; + ulong sql_mode_len; + bool full_access; + DBUG_ENTER("sp_head::show_create_function"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + LINT_INIT(sql_mode_str); + LINT_INIT(sql_mode_len); + + if (check_show_routine_access(thd, this, &full_access)) + DBUG_RETURN(1); + + sql_mode_str= + sys_var_thd_sql_mode::symbolic_mode_representation(thd, + m_sql_mode, + &sql_mode_len); + field_list.push_back(new Item_empty_string("Function",NAME_LEN)); + field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + Item_empty_string *definition= + new Item_empty_string("Create Function", max(buffer.length(),1024)); + definition->maybe_null= TRUE; + field_list.push_back(definition); + + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(1); + protocol->prepare_for_resend(); + protocol->store(m_name.str, m_name.length, system_charset_info); + protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + if (full_access) + protocol->store(m_defstr.str, m_defstr.length, system_charset_info); + else + protocol->store_null(); + res= protocol->write(); + send_eof(thd); + + DBUG_RETURN(res); +} + + +/* + Do some minimal optimization of the code: + 1) Mark used instructions + 1.1) While doing this, shortcut jumps to jump instructions + 2) Compact the code, removing unused instructions + + This is the main mark and move loop; it relies on the following methods + in sp_instr and its subclasses: + + opt_mark() Mark instruction as reachable + opt_shortcut_jump() Shortcut jumps to the final destination; + used by opt_mark(). + opt_move() Update moved instruction + set_destination() Set the new destination (jump instructions only) +*/ + +void sp_head::optimize() +{ + List<sp_instr> bp; + sp_instr *i; + uint src, dst; + + opt_mark(); + + bp.empty(); + src= dst= 0; + while ((i= get_instr(src))) + { + if (! i->marked) + { + delete i; + src+= 1; + } + else + { + if (src != dst) + { // Move the instruction and update prev. jumps + sp_instr *ibp; + List_iterator_fast<sp_instr> li(bp); + + set_dynamic(&m_instr, (gptr)&i, dst); + while ((ibp= li++)) + { + sp_instr_opt_meta *im= static_cast<sp_instr_opt_meta *>(ibp); + im->set_destination(src, dst); + } + } + i->opt_move(dst, &bp); + src+= 1; + dst+= 1; + } + } + m_instr.elements= dst; + bp.empty(); +} + +void sp_head::add_mark_lead(uint ip, List<sp_instr> *leads) +{ + sp_instr *i= get_instr(ip); + + if (i && ! i->marked) + leads->push_front(i); +} + +void +sp_head::opt_mark() +{ + uint ip; + sp_instr *i; + List<sp_instr> leads; + + /* + Forward flow analysis algorithm in the instruction graph: + - first, add the entry point in the graph (the first instruction) to the + 'leads' list of paths to explore. + - while there are still leads to explore: + - pick one lead, and follow the path forward. Mark instruction reached. + Stop only if the end of the routine is reached, or the path converge + to code already explored (marked). + - while following a path, collect in the 'leads' list any fork to + another path (caused by conditional jumps instructions), so that these + paths can be explored as well. + */ + + /* Add the entry point */ + i= get_instr(0); + leads.push_front(i); + + /* For each path of code ... */ + while (leads.elements != 0) + { + i= leads.pop(); + + /* Mark the entire path, collecting new leads. */ + while (i && ! i->marked) + { + ip= i->opt_mark(this, & leads); + i= get_instr(ip); + } + } +} + + +#ifndef DBUG_OFF +/* + Return the routine instructions as a result set. + Returns 0 if ok, !=0 on error. +*/ +int +sp_head::show_routine_code(THD *thd) +{ + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + List<Item> field_list; + sp_instr *i; + bool full_access; + int res= 0; + uint ip; + DBUG_ENTER("sp_head::show_routine_code"); + DBUG_PRINT("info", ("procedure: %s", m_name.str)); + + if (check_show_routine_access(thd, this, &full_access) || !full_access) + DBUG_RETURN(1); + + field_list.push_back(new Item_uint("Pos", 9)); + // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Instruction", + max(buffer.length(), 1024))); + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) + DBUG_RETURN(1); + + for (ip= 0; (i = get_instr(ip)) ; ip++) + { + /* + Consistency check. If these are different something went wrong + during optimization. + */ + if (ip != i->m_ip) + { + const char *format= "Instruction at position %u has m_ip=%u"; + char tmp[sizeof(format) + 2*SP_INSTR_UINT_MAXLEN + 1]; + + sprintf(tmp, format, ip, i->m_ip); + /* + Since this is for debugging purposes only, we don't bother to + introduce a special error code for it. + */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, tmp); + } + protocol->prepare_for_resend(); + protocol->store((longlong)ip); + + buffer.set("", 0, system_charset_info); + i->print(&buffer); + protocol->store(buffer.ptr(), buffer.length(), system_charset_info); + if ((res= protocol->write())) + break; + } + send_eof(thd); + + DBUG_RETURN(res); +} +#endif // ifndef DBUG_OFF + + +/* + Prepare LEX and thread for execution of instruction, if requested open + and lock LEX's tables, execute instruction's core function, perform + cleanup afterwards. + + SYNOPSIS + reset_lex_and_exec_core() + thd - thread context + nextp - out - next instruction + open_tables - if TRUE then check read access to tables in LEX's table + list and open and lock them (used in instructions which + need to calculate some expression and don't execute + complete statement). + sp_instr - instruction for which we prepare context, and which core + function execute by calling its exec_core() method. + + NOTE + We are not saving/restoring some parts of THD which may need this because + we do this once for whole routine execution in sp_head::execute(). + + RETURN VALUE + 0/non-0 - Success/Failure +*/ + +int +sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, + bool open_tables, sp_instr* instr) +{ + int res= 0; + + DBUG_ASSERT(!thd->derived_tables); + DBUG_ASSERT(thd->change_list.is_empty()); + /* + Use our own lex. + We should not save old value since it is saved/restored in + sp_head::execute() when we are entering/leaving routine. + */ + thd->lex= m_lex; + + VOID(pthread_mutex_lock(&LOCK_thread_count)); + thd->query_id= next_query_id(); + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + + if (thd->prelocked_mode == NON_PRELOCKED) + { + /* + This statement will enter/leave prelocked mode on its own. + Entering prelocked mode changes table list and related members + of LEX, so we'll need to restore them. + */ + if (lex_query_tables_own_last) + { + /* + We've already entered/left prelocked mode with this statement. + Attach the list of tables that need to be prelocked and mark m_lex + as having such list attached. + */ + *lex_query_tables_own_last= prelocking_tables; + m_lex->mark_as_requiring_prelocking(lex_query_tables_own_last); + } + } + + reinit_stmt_before_use(thd, m_lex); + + if (open_tables) + res= instr->exec_open_and_lock_tables(thd, m_lex->query_tables); + + if (!res) + res= instr->exec_core(thd, nextp); + + m_lex->unit.cleanup(); + + thd->proc_info="closing tables"; + close_thread_tables(thd); + thd->proc_info= 0; + + if (m_lex->query_tables_own_last) + { + /* + We've entered and left prelocking mode when executing statement + stored in m_lex. + m_lex->query_tables(->next_global)* list now has a 'tail' - a list + of tables that are added for prelocking. (If this is the first + execution, the 'tail' was added by open_tables(), otherwise we've + attached it above in this function). + Now we'll save the 'tail', and detach it. + */ + lex_query_tables_own_last= m_lex->query_tables_own_last; + prelocking_tables= *lex_query_tables_own_last; + *lex_query_tables_own_last= NULL; + m_lex->mark_as_requiring_prelocking(NULL); + } + thd->rollback_item_tree_changes(); + /* Update the state of the active arena. */ + thd->stmt_arena->state= Query_arena::EXECUTED; + + + /* + Unlike for PS we should not call Item's destructors for newly created + items after execution of each instruction in stored routine. This is + because SP often create Item (like Item_int, Item_string etc...) when + they want to store some value in local variable, pass return value and + etc... So their life time should be longer than one instruction. + + cleanup_items() is called in sp_head::execute() + */ + return res || thd->net.report_error; +} + + +/* + sp_instr class functions +*/ + +int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables) +{ + int result; + + /* + Check whenever we have access to tables for this statement + and open and lock them before executing instructions core function. + */ + if (check_table_access(thd, SELECT_ACL, tables, 0) + || open_and_lock_tables(thd, tables)) + result= -1; + else + result= 0; + + return result; +} + +uint sp_instr::get_cont_dest() +{ + return (m_ip+1); +} + + +int sp_instr::exec_core(THD *thd, uint *nextp) +{ + DBUG_ASSERT(0); + return 0; +} + +/* + sp_instr_stmt class functions +*/ + +int +sp_instr_stmt::execute(THD *thd, uint *nextp) +{ + char *query; + uint32 query_length; + int res; + DBUG_ENTER("sp_instr_stmt::execute"); + DBUG_PRINT("info", ("command: %d", m_lex_keeper.sql_command())); + + query= thd->query; + query_length= thd->query_length; + if (!(res= alloc_query(thd, m_query.str, m_query.length+1)) && + !(res=subst_spvars(thd, this, &m_query))) + { + /* + (the order of query cache and subst_spvars calls is irrelevant because + queries with SP vars can't be cached) + */ + if (query_cache_send_result_to_client(thd, + thd->query, thd->query_length) <= 0) + { + res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this); + query_cache_end_of_result(thd); + } + else + *nextp= m_ip+1; + thd->query= query; + thd->query_length= query_length; + } + DBUG_RETURN(res); +} + + +void +sp_instr_stmt::print(String *str) +{ + uint i, len; + + /* stmt CMD "..." */ + if (str->reserve(SP_STMT_PRINT_MAXLEN+SP_INSTR_UINT_MAXLEN+8)) + return; + str->qs_append(STRING_WITH_LEN("stmt ")); + str->qs_append((uint)m_lex_keeper.sql_command()); + str->qs_append(STRING_WITH_LEN(" \"")); + len= m_query.length; + /* + Print the query string (but not too much of it), just to indicate which + statement it is. + */ + if (len > SP_STMT_PRINT_MAXLEN) + len= SP_STMT_PRINT_MAXLEN-3; + /* Copy the query string and replace '\n' with ' ' in the process */ + for (i= 0 ; i < len ; i++) + { + char c= m_query.str[i]; + if (c == '\n') + c= ' '; + str->qs_append(c); + } + if (m_query.length > SP_STMT_PRINT_MAXLEN) + str->qs_append(STRING_WITH_LEN("...")); /* Indicate truncated string */ + str->qs_append('"'); +} + + +int +sp_instr_stmt::exec_core(THD *thd, uint *nextp) +{ + int res= mysql_execute_command(thd); + *nextp= m_ip+1; + return res; +} + + +/* + sp_instr_set class functions +*/ + +int +sp_instr_set::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_set::execute"); + DBUG_PRINT("info", ("offset: %u", m_offset)); + + DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this)); +} + + +int +sp_instr_set::exec_core(THD *thd, uint *nextp) +{ + int res= thd->spcont->set_variable(thd, m_offset, &m_value); + + if (res && thd->spcont->found_handler_here()) + { + /* + Failed to evaluate the value, and a handler has been found. Reset the + variable to NULL. + */ + + if (thd->spcont->set_variable(thd, m_offset, 0)) + { + /* If this also failed, let's abort. */ + + sp_rcontext *spcont= thd->spcont; + + thd->spcont= 0; /* Avoid handlers */ + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + spcont->clear_handler(); + thd->spcont= spcont; + } + } + + *nextp = m_ip+1; + return res; +} + +void +sp_instr_set::print(String *str) +{ + /* set name@offset ... */ + int rsrv = SP_INSTR_UINT_MAXLEN+6; + sp_variable_t *var = m_ctx->find_variable(m_offset); + + /* 'var' should always be non-null, but just in case... */ + if (var) + rsrv+= var->name.length; + if (str->reserve(rsrv)) + return; + str->qs_append(STRING_WITH_LEN("set ")); + if (var) + { + str->qs_append(var->name.str, var->name.length); + str->qs_append('@'); + } + str->qs_append(m_offset); + str->qs_append(' '); + m_value->print(str); +} + + +/* + sp_instr_set_trigger_field class functions +*/ + +int +sp_instr_set_trigger_field::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_set_trigger_field::execute"); + DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this)); +} + + +int +sp_instr_set_trigger_field::exec_core(THD *thd, uint *nextp) +{ + const int res= (trigger_field->set_value(thd, &value) ? -1 : 0); + *nextp = m_ip+1; + return res; +} + +void +sp_instr_set_trigger_field::print(String *str) +{ + str->append(STRING_WITH_LEN("set_trigger_field ")); + trigger_field->print(str); + str->append(STRING_WITH_LEN(":=")); + value->print(str); +} + +/* + sp_instr_opt_meta +*/ + +uint sp_instr_opt_meta::get_cont_dest() +{ + return m_cont_dest; +} + + +/* + sp_instr_jump class functions +*/ + +int +sp_instr_jump::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + + *nextp= m_dest; + DBUG_RETURN(0); +} + +void +sp_instr_jump::print(String *str) +{ + /* jump dest */ + if (str->reserve(SP_INSTR_UINT_MAXLEN+5)) + return; + str->qs_append(STRING_WITH_LEN("jump ")); + str->qs_append(m_dest); +} + +uint +sp_instr_jump::opt_mark(sp_head *sp, List<sp_instr> *leads) +{ + m_dest= opt_shortcut_jump(sp, this); + if (m_dest != m_ip+1) /* Jumping to following instruction? */ + marked= 1; + m_optdest= sp->get_instr(m_dest); + return m_dest; +} + +uint +sp_instr_jump::opt_shortcut_jump(sp_head *sp, sp_instr *start) +{ + uint dest= m_dest; + sp_instr *i; + + while ((i= sp->get_instr(dest))) + { + uint ndest; + + if (start == i || this == i) + break; + ndest= i->opt_shortcut_jump(sp, start); + if (ndest == dest) + break; + dest= ndest; + } + return dest; +} + +void +sp_instr_jump::opt_move(uint dst, List<sp_instr> *bp) +{ + if (m_dest > m_ip) + bp->push_back(this); // Forward + else if (m_optdest) + m_dest= m_optdest->m_ip; // Backward + m_ip= dst; +} + + +/* + sp_instr_jump_if_not class functions +*/ + +int +sp_instr_jump_if_not::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump_if_not::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this)); +} + + +int +sp_instr_jump_if_not::exec_core(THD *thd, uint *nextp) +{ + Item *it; + int res; + + it= sp_prepare_func_item(thd, &m_expr); + if (! it) + { + res= -1; + } + else + { + res= 0; + if (! it->val_bool()) + *nextp = m_dest; + else + *nextp = m_ip+1; + } + + return res; +} + + +void +sp_instr_jump_if_not::print(String *str) +{ + /* jump_if_not dest(cont) ... */ + if (str->reserve(2*SP_INSTR_UINT_MAXLEN+14+32)) // Add some for the expr. too + return; + str->qs_append(STRING_WITH_LEN("jump_if_not ")); + str->qs_append(m_dest); + str->qs_append('('); + str->qs_append(m_cont_dest); + str->qs_append(STRING_WITH_LEN(") ")); + m_expr->print(str); +} + + +uint +sp_instr_jump_if_not::opt_mark(sp_head *sp, List<sp_instr> *leads) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->add_mark_lead(m_dest, leads); + if ((i= sp->get_instr(m_cont_dest))) + { + m_cont_dest= i->opt_shortcut_jump(sp, this); + m_cont_optdest= sp->get_instr(m_cont_dest); + } + sp->add_mark_lead(m_cont_dest, leads); + return m_ip+1; +} + +void +sp_instr_jump_if_not::opt_move(uint dst, List<sp_instr> *bp) +{ + /* + cont. destinations may point backwards after shortcutting jumps + during the mark phase. If it's still pointing forwards, only + push this for backpatching if sp_instr_jump::opt_move() will not + do it (i.e. if the m_dest points backwards). + */ + if (m_cont_dest > m_ip) + { // Forward + if (m_dest < m_ip) + bp->push_back(this); + } + else if (m_cont_optdest) + m_cont_dest= m_cont_optdest->m_ip; // Backward + /* This will take care of m_dest and m_ip */ + sp_instr_jump::opt_move(dst, bp); +} + + +/* + sp_instr_freturn class functions +*/ + +int +sp_instr_freturn::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_freturn::execute"); + DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this)); +} + + +int +sp_instr_freturn::exec_core(THD *thd, uint *nextp) +{ + /* + Change <next instruction pointer>, so that this will be the last + instruction in the stored function. + */ + + *nextp= UINT_MAX; + + /* + Evaluate the value of return expression and store it in current runtime + context. + + NOTE: It's necessary to evaluate result item right here, because we must + do it in scope of execution the current context/block. + */ + + return thd->spcont->set_return_value(thd, &m_value); +} + +void +sp_instr_freturn::print(String *str) +{ + /* freturn type expr... */ + if (str->reserve(1024+8+32)) // Add some for the expr. too + return; + str->qs_append(STRING_WITH_LEN("freturn ")); + str->qs_append((uint)m_type); + str->qs_append(' '); + m_value->print(str); +} + +/* + sp_instr_hpush_jump class functions +*/ + +int +sp_instr_hpush_jump::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hpush_jump::execute"); + List_iterator_fast<sp_cond_type_t> li(m_cond); + sp_cond_type_t *p; + + while ((p= li++)) + thd->spcont->push_handler(p, m_ip+1, m_type, m_frame); + + *nextp= m_dest; + DBUG_RETURN(0); +} + + +void +sp_instr_hpush_jump::print(String *str) +{ + /* hpush_jump dest fsize type */ + if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 21)) + return; + str->qs_append(STRING_WITH_LEN("hpush_jump ")); + str->qs_append(m_dest); + str->qs_append(' '); + str->qs_append(m_frame); + switch (m_type) { + case SP_HANDLER_NONE: + str->qs_append(STRING_WITH_LEN(" NONE")); // This would be a bug + break; + case SP_HANDLER_EXIT: + str->qs_append(STRING_WITH_LEN(" EXIT")); + break; + case SP_HANDLER_CONTINUE: + str->qs_append(STRING_WITH_LEN(" CONTINUE")); + break; + case SP_HANDLER_UNDO: + str->qs_append(STRING_WITH_LEN(" UNDO")); + break; + default: + // This would be a bug as well + str->qs_append(STRING_WITH_LEN(" UNKNOWN:")); + str->qs_append(m_type); + } +} + + +uint +sp_instr_hpush_jump::opt_mark(sp_head *sp, List<sp_instr> *leads) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->add_mark_lead(m_dest, leads); + return m_ip+1; +} + + +/* + sp_instr_hpop class functions +*/ + +int +sp_instr_hpop::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hpop::execute"); + thd->spcont->pop_handlers(m_count); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + +void +sp_instr_hpop::print(String *str) +{ + /* hpop count */ + if (str->reserve(SP_INSTR_UINT_MAXLEN+5)) + return; + str->qs_append(STRING_WITH_LEN("hpop ")); + str->qs_append(m_count); +} + + +/* + sp_instr_hreturn class functions +*/ + +int +sp_instr_hreturn::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hreturn::execute"); + if (m_dest) + *nextp= m_dest; + else + { + *nextp= thd->spcont->pop_hstack(); + } + thd->spcont->exit_handler(); + DBUG_RETURN(0); +} + + +void +sp_instr_hreturn::print(String *str) +{ + /* hreturn framesize dest */ + if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 9)) + return; + str->qs_append(STRING_WITH_LEN("hreturn ")); + str->qs_append(m_frame); + if (m_dest) + { + str->qs_append(' '); + str->qs_append(m_dest); + } +} + + +uint +sp_instr_hreturn::opt_mark(sp_head *sp, List<sp_instr> *leads) +{ + if (m_dest) + return sp_instr_jump::opt_mark(sp, leads); + + marked= 1; + return UINT_MAX; +} + + +/* + sp_instr_cpush class functions +*/ + +int +sp_instr_cpush::execute(THD *thd, uint *nextp) +{ + Query_arena backup_arena; + DBUG_ENTER("sp_instr_cpush::execute"); + + /* + We should create cursors in the callers arena, as + it could be (and usually is) used in several instructions. + */ + thd->set_n_backup_active_arena(thd->spcont->callers_arena, &backup_arena); + + thd->spcont->push_cursor(&m_lex_keeper, this); + + thd->restore_active_arena(thd->spcont->callers_arena, &backup_arena); + + *nextp= m_ip+1; + + DBUG_RETURN(0); +} + + +void +sp_instr_cpush::print(String *str) +{ + LEX_STRING n; + my_bool found= m_ctx->find_cursor(m_cursor, &n); + /* cpush name@offset */ + uint rsrv= SP_INSTR_UINT_MAXLEN+7; + + if (found) + rsrv+= n.length; + if (str->reserve(rsrv)) + return; + str->qs_append(STRING_WITH_LEN("cpush ")); + if (found) + { + str->qs_append(n.str, n.length); + str->qs_append('@'); + } + str->qs_append(m_cursor); +} + + +/* + sp_instr_cpop class functions +*/ + +int +sp_instr_cpop::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_cpop::execute"); + thd->spcont->pop_cursors(m_count); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + + +void +sp_instr_cpop::print(String *str) +{ + /* cpop count */ + if (str->reserve(SP_INSTR_UINT_MAXLEN+5)) + return; + str->qs_append(STRING_WITH_LEN("cpop ")); + str->qs_append(m_count); +} + + +/* + sp_instr_copen class functions +*/ + +int +sp_instr_copen::execute(THD *thd, uint *nextp) +{ + /* + We don't store a pointer to the cursor in the instruction to be + able to reuse the same instruction among different threads in future. + */ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_copen::execute"); + + if (! c) + res= -1; + else + { + sp_lex_keeper *lex_keeper= c->get_lex_keeper(); + Query_arena *old_arena= thd->stmt_arena; + + /* + Get the Query_arena from the cpush instruction, which contains + the free_list of the query, so new items (if any) are stored in + the right free_list, and we can cleanup after each open. + */ + thd->stmt_arena= c->get_instr(); + res= lex_keeper->reset_lex_and_exec_core(thd, nextp, FALSE, this); + /* Cleanup the query's items */ + if (thd->stmt_arena->free_list) + cleanup_items(thd->stmt_arena->free_list); + thd->stmt_arena= old_arena; + /* + Work around the fact that errors in selects are not returned properly + (but instead converted into a warning), so if a condition handler + caught, we have lost the result code. + */ + if (!res) + { + uint dummy1, dummy2; + + if (thd->spcont->found_handler(&dummy1, &dummy2)) + res= -1; + } + /* TODO: Assert here that we either have an error or a cursor */ + } + DBUG_RETURN(res); +} + + +int +sp_instr_copen::exec_core(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res= c->open(thd); + *nextp= m_ip+1; + return res; +} + +void +sp_instr_copen::print(String *str) +{ + LEX_STRING n; + my_bool found= m_ctx->find_cursor(m_cursor, &n); + /* copen name@offset */ + uint rsrv= SP_INSTR_UINT_MAXLEN+7; + + if (found) + rsrv+= n.length; + if (str->reserve(rsrv)) + return; + str->qs_append(STRING_WITH_LEN("copen ")); + if (found) + { + str->qs_append(n.str, n.length); + str->qs_append('@'); + } + str->qs_append(m_cursor); +} + + +/* + sp_instr_cclose class functions +*/ + +int +sp_instr_cclose::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_cclose::execute"); + + if (! c) + res= -1; + else + res= c->close(thd); + *nextp= m_ip+1; + DBUG_RETURN(res); +} + + +void +sp_instr_cclose::print(String *str) +{ + LEX_STRING n; + my_bool found= m_ctx->find_cursor(m_cursor, &n); + /* cclose name@offset */ + uint rsrv= SP_INSTR_UINT_MAXLEN+8; + + if (found) + rsrv+= n.length; + if (str->reserve(rsrv)) + return; + str->qs_append(STRING_WITH_LEN("cclose ")); + if (found) + { + str->qs_append(n.str, n.length); + str->qs_append('@'); + } + str->qs_append(m_cursor); +} + + +/* + sp_instr_cfetch class functions +*/ + +int +sp_instr_cfetch::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + Query_arena backup_arena; + DBUG_ENTER("sp_instr_cfetch::execute"); + + res= c ? c->fetch(thd, &m_varlist) : -1; + + *nextp= m_ip+1; + DBUG_RETURN(res); +} + + +void +sp_instr_cfetch::print(String *str) +{ + List_iterator_fast<struct sp_variable> li(m_varlist); + sp_variable_t *pv; + LEX_STRING n; + my_bool found= m_ctx->find_cursor(m_cursor, &n); + /* cfetch name@offset vars... */ + uint rsrv= SP_INSTR_UINT_MAXLEN+8; + + if (found) + rsrv+= n.length; + if (str->reserve(rsrv)) + return; + str->qs_append(STRING_WITH_LEN("cfetch ")); + if (found) + { + str->qs_append(n.str, n.length); + str->qs_append('@'); + } + str->qs_append(m_cursor); + while ((pv= li++)) + { + if (str->reserve(pv->name.length+SP_INSTR_UINT_MAXLEN+2)) + return; + str->qs_append(' '); + str->qs_append(pv->name.str, pv->name.length); + str->qs_append('@'); + str->qs_append(pv->offset); + } +} + + +/* + sp_instr_error class functions +*/ + +int +sp_instr_error::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_error::execute"); + + my_message(m_errcode, ER(m_errcode), MYF(0)); + *nextp= m_ip+1; + DBUG_RETURN(-1); +} + + +void +sp_instr_error::print(String *str) +{ + /* error code */ + if (str->reserve(SP_INSTR_UINT_MAXLEN+6)) + return; + str->qs_append(STRING_WITH_LEN("error ")); + str->qs_append(m_errcode); +} + + +/************************************************************************** + sp_instr_set_case_expr class implementation +**************************************************************************/ + +int +sp_instr_set_case_expr::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_set_case_expr::execute"); + + DBUG_RETURN(m_lex_keeper.reset_lex_and_exec_core(thd, nextp, TRUE, this)); +} + + +int +sp_instr_set_case_expr::exec_core(THD *thd, uint *nextp) +{ + int res= thd->spcont->set_case_expr(thd, m_case_expr_id, &m_case_expr); + + if (res && + !thd->spcont->get_case_expr(m_case_expr_id) && + thd->spcont->found_handler_here()) + { + /* + Failed to evaluate the value, the case expression is still not + initialized, and a handler has been found. Set to NULL so we can continue. + */ + + Item *null_item= new Item_null(); + + if (!null_item || + thd->spcont->set_case_expr(thd, m_case_expr_id, &null_item)) + { + /* If this also failed, we have to abort. */ + + sp_rcontext *spcont= thd->spcont; + + thd->spcont= 0; /* Avoid handlers */ + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + spcont->clear_handler(); + thd->spcont= spcont; + } + } + else + *nextp= m_ip+1; + + return res; +} + + +void +sp_instr_set_case_expr::print(String *str) +{ + /* set_case_expr (cont) id ... */ + str->reserve(2*SP_INSTR_UINT_MAXLEN+18+32); // Add some extra for expr too + str->qs_append(STRING_WITH_LEN("set_case_expr (")); + str->qs_append(m_cont_dest); + str->qs_append(STRING_WITH_LEN(") ")); + str->qs_append(m_case_expr_id); + str->qs_append(' '); + m_case_expr->print(str); +} + +uint +sp_instr_set_case_expr::opt_mark(sp_head *sp, List<sp_instr> *leads) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_cont_dest))) + { + m_cont_dest= i->opt_shortcut_jump(sp, this); + m_cont_optdest= sp->get_instr(m_cont_dest); + } + sp->add_mark_lead(m_cont_dest, leads); + return m_ip+1; +} + +void +sp_instr_set_case_expr::opt_move(uint dst, List<sp_instr> *bp) +{ + if (m_cont_dest > m_ip) + bp->push_back(this); // Forward + else if (m_cont_optdest) + m_cont_dest= m_cont_optdest->m_ip; // Backward + m_ip= dst; +} + + +/* ------------------------------------------------------------------ */ + +/* + Security context swapping +*/ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +bool +sp_change_security_context(THD *thd, sp_head *sp, Security_context **backup) +{ + *backup= 0; + if (sp->m_chistics->suid != SP_IS_NOT_SUID && + (strcmp(sp->m_definer_user.str, + thd->security_ctx->priv_user) || + my_strcasecmp(system_charset_info, sp->m_definer_host.str, + thd->security_ctx->priv_host))) + { + if (acl_getroot_no_password(&sp->m_security_ctx, sp->m_definer_user.str, + sp->m_definer_host.str, + sp->m_definer_host.str, + sp->m_db.str)) + { + my_error(ER_NO_SUCH_USER, MYF(0), sp->m_definer_user.str, + sp->m_definer_host.str); + return TRUE; + } + *backup= thd->security_ctx; + thd->security_ctx= &sp->m_security_ctx; + } + return FALSE; +} + +void +sp_restore_security_context(THD *thd, Security_context *backup) +{ + if (backup) + thd->security_ctx= backup; +} + +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + +/* + Structure that represent all instances of one table + in optimized multi-set of tables used by routine. +*/ + +typedef struct st_sp_table +{ + /* + Multi-set key: + db_name\0table_name\0alias\0 - for normal tables + db_name\0table_name\0 - for temporary tables + Note that in both cases we don't take last '\0' into account when + we count length of key. + */ + LEX_STRING qname; + uint db_length, table_name_length; + bool temp; /* true if corresponds to a temporary table */ + thr_lock_type lock_type; /* lock type used for prelocking */ + uint lock_count; + uint query_lock_count; +} SP_TABLE; + +byte * +sp_table_key(const byte *ptr, uint *plen, my_bool first) +{ + SP_TABLE *tab= (SP_TABLE *)ptr; + *plen= tab->qname.length; + return (byte *)tab->qname.str; +} + + +/* + Merge the list of tables used by some query into the multi-set of + tables used by routine. + + SYNOPSIS + merge_table_list() + thd - thread context + table - table list + lex_for_tmp_check - LEX of the query for which we are merging + table list. + + NOTE + This method will use LEX provided to check whenever we are creating + temporary table and mark it as such in target multi-set. + + RETURN VALUE + TRUE - Success + FALSE - Error +*/ + +bool +sp_head::merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check) +{ + SP_TABLE *tab; + + if (lex_for_tmp_check->sql_command == SQLCOM_DROP_TABLE && + lex_for_tmp_check->drop_temporary) + return TRUE; + + for (uint i= 0 ; i < m_sptabs.records ; i++) + { + tab= (SP_TABLE *)hash_element(&m_sptabs, i); + tab->query_lock_count= 0; + } + + for (; table ; table= table->next_global) + if (!table->derived && !table->schema_table) + { + char tname[(NAME_LEN + 1) * 3]; // db\0table\0alias\0 + uint tlen, alen; + + tlen= table->db_length; + memcpy(tname, table->db, tlen); + tname[tlen++]= '\0'; + memcpy(tname+tlen, table->table_name, table->table_name_length); + tlen+= table->table_name_length; + tname[tlen++]= '\0'; + alen= strlen(table->alias); + memcpy(tname+tlen, table->alias, alen); + tlen+= alen; + tname[tlen]= '\0'; + + /* + We ignore alias when we check if table was already marked as temporary + (and therefore should not be prelocked). Otherwise we will erroneously + treat table with same name but with different alias as non-temporary. + */ + if ((tab= (SP_TABLE *)hash_search(&m_sptabs, (byte *)tname, tlen)) || + ((tab= (SP_TABLE *)hash_search(&m_sptabs, (byte *)tname, + tlen - alen - 1)) && + tab->temp)) + { + if (tab->lock_type < table->lock_type) + tab->lock_type= table->lock_type; // Use the table with the highest lock type + tab->query_lock_count++; + if (tab->query_lock_count > tab->lock_count) + tab->lock_count++; + } + else + { + if (!(tab= (SP_TABLE *)thd->calloc(sizeof(SP_TABLE)))) + return FALSE; + if (lex_for_tmp_check->sql_command == SQLCOM_CREATE_TABLE && + lex_for_tmp_check->query_tables == table && + lex_for_tmp_check->create_info.options & HA_LEX_CREATE_TMP_TABLE) + { + tab->temp= TRUE; + tab->qname.length= tlen - alen - 1; + } + else + tab->qname.length= tlen; + tab->qname.str= (char*) thd->memdup(tname, tab->qname.length + 1); + if (!tab->qname.str) + return FALSE; + tab->table_name_length= table->table_name_length; + tab->db_length= table->db_length; + tab->lock_type= table->lock_type; + tab->lock_count= tab->query_lock_count= 1; + my_hash_insert(&m_sptabs, (byte *)tab); + } + } + return TRUE; +} + + +/* + Add tables used by routine to the table list. + + SYNOPSIS + add_used_tables_to_table_list() + thd [in] Thread context + query_tables_last_ptr [in/out] Pointer to the next_global member of + last element of the list where tables + will be added (or to its root). + belong_to_view [in] Uppermost view which uses this routine, + 0 if none. + + DESCRIPTION + Converts multi-set of tables used by this routine to table list and adds + this list to the end of table list specified by 'query_tables_last_ptr'. + + Elements of list will be allocated in PS memroot, so this list will be + persistent between PS executions. + + RETURN VALUE + TRUE - if some elements were added, FALSE - otherwise. +*/ + +bool +sp_head::add_used_tables_to_table_list(THD *thd, + TABLE_LIST ***query_tables_last_ptr, + TABLE_LIST *belong_to_view) +{ + uint i; + Query_arena *arena, backup; + bool result= FALSE; + DBUG_ENTER("sp_head::add_used_tables_to_table_list"); + + /* + Use persistent arena for table list allocation to be PS/SP friendly. + Note that we also have to copy database/table names and alias to PS/SP + memory since current instance of sp_head object can pass away before + next execution of PS/SP for which tables are added to prelocking list. + This will be fixed by introducing of proper invalidation mechanism + once new TDC is ready. + */ + arena= thd->activate_stmt_arena_if_needed(&backup); + + for (i=0 ; i < m_sptabs.records ; i++) + { + char *tab_buff, *key_buff; + TABLE_LIST *table; + SP_TABLE *stab= (SP_TABLE *)hash_element(&m_sptabs, i); + if (stab->temp) + continue; + + if (!(tab_buff= (char *)thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST)) * + stab->lock_count)) || + !(key_buff= (char*)thd->memdup(stab->qname.str, + stab->qname.length + 1))) + DBUG_RETURN(FALSE); + + for (uint j= 0; j < stab->lock_count; j++) + { + table= (TABLE_LIST *)tab_buff; + + table->db= key_buff; + table->db_length= stab->db_length; + table->table_name= table->db + table->db_length + 1; + table->table_name_length= stab->table_name_length; + table->alias= table->table_name + table->table_name_length + 1; + table->lock_type= stab->lock_type; + table->cacheable_table= 1; + table->prelocking_placeholder= 1; + table->belong_to_view= belong_to_view; + + /* Everyting else should be zeroed */ + + **query_tables_last_ptr= table; + table->prev_global= *query_tables_last_ptr; + *query_tables_last_ptr= &table->next_global; + + tab_buff+= ALIGN_SIZE(sizeof(TABLE_LIST)); + result= TRUE; + } + } + + if (arena) + thd->restore_active_arena(arena, &backup); + + DBUG_RETURN(result); +} + + +/* + Simple function for adding an explicetly named (systems) table to + the global table list, e.g. "mysql", "proc". +*/ + +TABLE_LIST * +sp_add_to_query_tables(THD *thd, LEX *lex, + const char *db, const char *name, + thr_lock_type locktype) +{ + TABLE_LIST *table; + + if (!(table= (TABLE_LIST *)thd->calloc(sizeof(TABLE_LIST)))) + { + my_error(ER_OUTOFMEMORY, MYF(0), sizeof(TABLE_LIST)); + return NULL; + } + table->db_length= strlen(db); + table->db= thd->strmake(db, table->db_length); + table->table_name_length= strlen(name); + table->table_name= thd->strmake(name, table->table_name_length); + table->alias= thd->strdup(name); + table->lock_type= locktype; + table->select_lex= lex->current_select; // QQ? + table->cacheable_table= 1; + + lex->add_to_query_tables(table); + return table; +} + diff --git a/sql/sp_head.h b/sql/sp_head.h new file mode 100644 index 00000000000..901b7a19c39 --- /dev/null +++ b/sql/sp_head.h @@ -0,0 +1,1216 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_HEAD_H_ +#define _SP_HEAD_H_ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +#include <stddef.h> + +// Values for the type enum. This reflects the order of the enum declaration +// in the CREATE TABLE command. +#define TYPE_ENUM_FUNCTION 1 +#define TYPE_ENUM_PROCEDURE 2 +#define TYPE_ENUM_TRIGGER 3 + +Item_result +sp_map_result_type(enum enum_field_types type); + +Item::Type +sp_map_item_type(enum enum_field_types type); + +uint +sp_get_flags_for_command(LEX *lex); + +struct sp_label; +class sp_instr; +class sp_instr_opt_meta; +class sp_instr_jump_if_not; +struct sp_cond_type; +struct sp_variable; + +class sp_name : public Sql_alloc +{ +public: + + LEX_STRING m_db; + LEX_STRING m_name; + LEX_STRING m_qname; + /* + Key representing routine in the set of stored routines used by statement. + Consists of 1-byte routine type and m_qname (which usually refences to + same buffer). Note that one must complete initialization of the key by + calling set_routine_type(). + */ + LEX_STRING m_sroutines_key; + + sp_name(LEX_STRING db, LEX_STRING name) + : m_db(db), m_name(name) + { + m_qname.str= m_sroutines_key.str= 0; + m_qname.length= m_sroutines_key.length= 0; + } + + /* + Creates temporary sp_name object from key, used mainly + for SP-cache lookups. + */ + sp_name(char *key, uint key_len) + { + m_sroutines_key.str= key; + m_sroutines_key.length= key_len; + m_name.str= m_qname.str= key + 1; + m_name.length= m_qname.length= key_len - 1; + m_db.str= 0; + m_db.length= 0; + } + + // Init. the qualified name from the db and name. + void init_qname(THD *thd); // thd for memroot allocation + + void set_routine_type(char type) + { + m_sroutines_key.str[0]= type; + } + + ~sp_name() + {} +}; + + +bool +check_routine_name(LEX_STRING name); + +class sp_head :private Query_arena +{ + sp_head(const sp_head &); /* Prevent use of these */ + void operator=(sp_head &); + + MEM_ROOT main_mem_root; +public: + /* Possible values of m_flags */ + enum { + HAS_RETURN= 1, // For FUNCTIONs only: is set if has RETURN + MULTI_RESULTS= 8, // Is set if a procedure with SELECT(s) + CONTAINS_DYNAMIC_SQL= 16, // Is set if a procedure with PREPARE/EXECUTE + IS_INVOKED= 32, // Is set if this sp_head is being used + HAS_SET_AUTOCOMMIT_STMT= 64,// Is set if a procedure with 'set autocommit' + /* Is set if a procedure with COMMIT (implicit or explicit) | ROLLBACK */ + HAS_COMMIT_OR_ROLLBACK= 128, + HAS_SQLCOM_RESET= 2048, + HAS_SQLCOM_FLUSH= 4096 + }; + + /* TYPE_ENUM_FUNCTION, TYPE_ENUM_PROCEDURE or TYPE_ENUM_TRIGGER */ + int m_type; + uint m_flags; // Boolean attributes of a stored routine + + create_field m_return_field_def; /* This is used for FUNCTIONs only. */ + + uchar *m_tmp_query; // Temporary pointer to sub query string + uint m_old_cmq; // Old CLIENT_MULTI_QUERIES value + st_sp_chistics *m_chistics; + ulong m_sql_mode; // For SHOW CREATE and execution + LEX_STRING m_qname; // db.name + LEX_STRING m_db; + LEX_STRING m_name; + LEX_STRING m_params; + LEX_STRING m_body; + LEX_STRING m_defstr; + LEX_STRING m_definer_user; + LEX_STRING m_definer_host; + longlong m_created; + longlong m_modified; + /* Recursion level of the current SP instance. The levels are numbered from 0 */ + ulong m_recursion_level; + /* + A list of diferent recursion level instances for the same procedure. + For every recursion level we have a sp_head instance. This instances + connected in the list. The list ordered by increasing recursion level + (m_recursion_level). + */ + sp_head *m_next_cached_sp; + /* + Pointer to the first element of the above list + */ + sp_head *m_first_instance; + /* + Pointer to the first free (non-INVOKED) routine in the list of + cached instances for this SP. This pointer is set only for the first + SP in the list of instences (see above m_first_cached_sp pointer). + The pointer equal to 0 if we have no free instances. + For non-first instance value of this pointer meanless (point to itself); + */ + sp_head *m_first_free_instance; + /* + Pointer to the last element in the list of instances of the SP. + For non-first instance value of this pointer meanless (point to itself); + */ + sp_head *m_last_cached_sp; + /* + Set containing names of stored routines used by this routine. + Note that unlike elements of similar set for statement elements of this + set are not linked in one list. Because of this we are able save memory + by using for this set same objects that are used in 'sroutines' sets + for statements of which this stored routine consists. + */ + HASH m_sroutines; + // Pointers set during parsing + uchar *m_param_begin, *m_param_end, *m_body_begin; + + /* + Security context for stored routine which should be run under + definer privileges. + */ + Security_context m_security_ctx; + + static void * + operator new(size_t size); + + static void + operator delete(void *ptr, size_t size); + + sp_head(); + + // Initialize after we have reset mem_root + void + init(LEX *lex); + + /* Copy sp name from parser. */ + void + init_sp_name(THD *thd, sp_name *spname); + + // Initialize strings after parsing header + void + init_strings(THD *thd, LEX *lex); + + int + create(THD *thd); + + virtual ~sp_head(); + + // Free memory + void + destroy(); + + bool + execute_trigger(THD *thd, const char *db, const char *table, + GRANT_INFO *grant_onfo); + + bool + execute_function(THD *thd, Item **args, uint argcount, Field *return_fld); + + bool + execute_procedure(THD *thd, List<Item> *args); + + int + show_create_procedure(THD *thd); + + int + show_create_function(THD *thd); + + void + add_instr(sp_instr *instr); + + inline uint + instructions() + { + return m_instr.elements; + } + + inline sp_instr * + last_instruction() + { + sp_instr *i; + + get_dynamic(&m_instr, (gptr)&i, m_instr.elements-1); + return i; + } + + // Resets lex in 'thd' and keeps a copy of the old one. + void + reset_lex(THD *thd); + + // Restores lex in 'thd' from our copy, but keeps some status from the + // one in 'thd', like ptr, tables, fields, etc. + void + restore_lex(THD *thd); + + // Put the instruction on the backpatch list, associated with the label. + void + push_backpatch(sp_instr *, struct sp_label *); + + // Update all instruction with this label in the backpatch list to + // the current position. + void + backpatch(struct sp_label *); + + // Start a new cont. backpatch level. If 'i' is NULL, the level is just incr. + void + new_cont_backpatch(sp_instr_opt_meta *i); + + // Add an instruction to the current level + void + add_cont_backpatch(sp_instr_opt_meta *i); + + // Backpatch (and pop) the current level to the current position. + void + do_cont_backpatch(); + + char *name(uint *lenp = 0) const + { + if (lenp) + *lenp= m_name.length; + return m_name.str; + } + + char *create_string(THD *thd, ulong *lenp); + + Field *create_result_field(uint field_max_length, const char *field_name, + TABLE *table); + + bool fill_field_definition(THD *thd, LEX *lex, + enum enum_field_types field_type, + create_field *field_def); + + void set_info(longlong created, longlong modified, + st_sp_chistics *chistics, ulong sql_mode); + + void set_definer(const char *definer, uint definerlen); + void set_definer(const LEX_STRING *user_name, const LEX_STRING *host_name); + + void reset_thd_mem_root(THD *thd); + + void restore_thd_mem_root(THD *thd); + + /** + Optimize the code. + */ + void optimize(); + + /** + Helper used during flow analysis during code optimization. + See the implementation of <code>opt_mark()</code>. + @param ip the instruction to add to the leads list + @param leads the list of remaining paths to explore in the graph that + represents the code, during flow analysis. + */ + void add_mark_lead(uint ip, List<sp_instr> *leads); + + void recursion_level_error(THD *thd); + + inline sp_instr * + get_instr(uint i) + { + sp_instr *ip; + + if (i < m_instr.elements) + get_dynamic(&m_instr, (gptr)&ip, i); + else + ip= NULL; + return ip; + } + + /* Add tables used by routine to the table list. */ + bool add_used_tables_to_table_list(THD *thd, + TABLE_LIST ***query_tables_last_ptr, + TABLE_LIST *belong_to_view); + + /* + Check if this stored routine contains statements disallowed + in a stored function or trigger, and set an appropriate error message + if this is the case. + */ + bool is_not_allowed_in_function(const char *where) + { + if (m_flags & CONTAINS_DYNAMIC_SQL) + my_error(ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0), "Dynamic SQL"); + else if (m_flags & MULTI_RESULTS) + my_error(ER_SP_NO_RETSET, MYF(0), where); + else if (m_flags & HAS_SET_AUTOCOMMIT_STMT) + my_error(ER_SP_CANT_SET_AUTOCOMMIT, MYF(0)); + else if (m_flags & HAS_COMMIT_OR_ROLLBACK) + my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0)); + else if (m_flags & HAS_SQLCOM_RESET) + my_error(ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0), "RESET"); + else if (m_flags & HAS_SQLCOM_FLUSH) + my_error(ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0), "FLUSH"); + + return test(m_flags & + (CONTAINS_DYNAMIC_SQL|MULTI_RESULTS|HAS_SET_AUTOCOMMIT_STMT| + HAS_COMMIT_OR_ROLLBACK|HAS_SQLCOM_RESET|HAS_SQLCOM_FLUSH)); + } + +#ifndef DBUG_OFF + int show_routine_code(THD *thd); +#endif + + +private: + + MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root + THD *m_thd; // Set if we have reset mem_root + + sp_pcontext *m_pcont; // Parse context + List<LEX> m_lex; // Temp. store for the other lex + DYNAMIC_ARRAY m_instr; // The "instructions" + typedef struct + { + struct sp_label *lab; + sp_instr *instr; + } bp_t; + List<bp_t> m_backpatch; // Instructions needing backpatching + /* + We need a special list for backpatching of instructions with a continue + destination (in the case of a continue handler catching an error in + the test), since it would otherwise interfere with the normal backpatch + mechanism - e.g. jump_if_not instructions have two different destinations + which are to be patched differently. + Since these occur in a more restricted way (always the same "level" in + the code), we don't need the label. + */ + List<sp_instr_opt_meta> m_cont_backpatch; + uint m_cont_level; // The current cont. backpatch level + + /* + Multi-set representing optimized list of tables to be locked by this + routine. Does not include tables which are used by invoked routines. + + Note: for prelocking-free SPs this multiset is constructed too. + We do so because the same instance of sp_head may be called both + in prelocked mode and in non-prelocked mode. + */ + HASH m_sptabs; + + bool + execute(THD *thd); + + /** + Perform a forward flow analysis in the generated code. + Mark reachable instructions, for the optimizer. + */ + void opt_mark(); + + /* + Merge the list of tables used by query into the multi-set of tables used + by routine. + */ + bool merge_table_list(THD *thd, TABLE_LIST *table, LEX *lex_for_tmp_check); +}; // class sp_head : public Sql_alloc + + +// +// "Instructions"... +// + +class sp_instr :public Query_arena, public Sql_alloc +{ + sp_instr(const sp_instr &); /* Prevent use of these */ + void operator=(sp_instr &); + +public: + + uint marked; + uint m_ip; // My index + sp_pcontext *m_ctx; // My parse context + + // Should give each a name or type code for debugging purposes? + sp_instr(uint ip, sp_pcontext *ctx) + :Query_arena(0, INITIALIZED_FOR_SP), marked(0), m_ip(ip), m_ctx(ctx) + {} + + virtual ~sp_instr() + { free_items(); } + + + /* + Execute this instruction + + SYNOPSIS + execute() + thd Thread handle + nextp OUT index of the next instruction to execute. (For most + instructions this will be the instruction following this + one). Note that this parameter is undefined in case of + errors, use get_cont_dest() to find the continuation + instruction for CONTINUE error handlers. + + RETURN + 0 on success, + other if some error occurred + */ + + virtual int execute(THD *thd, uint *nextp) = 0; + + /** + Execute <code>open_and_lock_tables()</code> for this statement. + Open and lock the tables used by this statement, as a pre-requisite + to execute the core logic of this instruction with + <code>exec_core()</code>. + @param thd the current thread + @param tables the list of tables to open and lock + @return zero on success, non zero on failure. + */ + int exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables); + + /** + Get the continuation destination of this instruction. + @return the continuation destination + */ + virtual uint get_cont_dest(); + + /* + Execute core function of instruction after all preparations (e.g. + setting of proper LEX, saving part of the thread context have been + done). + + Should be implemented for instructions using expressions or whole + statements (thus having to have own LEX). Used in concert with + sp_lex_keeper class and its descendants (there are none currently). + */ + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str) = 0; + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx) + {} + + /* + Mark this instruction as reachable during optimization and return the + index to the next instruction. Jump instruction will add their + destination to the leads list. + */ + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads) + { + marked= 1; + return m_ip+1; + } + + /* + Short-cut jumps to jumps during optimization. This is used by the + jump instructions' opt_mark() methods. 'start' is the starting point, + used to prevent the mark sweep from looping for ever. Return the + end destination. + */ + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + + /* + Inform the instruction that it has been moved during optimization. + Most instructions will simply update its index, but jump instructions + must also take care of their destination pointers. Forward jumps get + pushed to the backpatch list 'ibp'. + */ + virtual void opt_move(uint dst, List<sp_instr> *ibp) + { + m_ip= dst; + } + +}; // class sp_instr : public Sql_alloc + + +/* + Auxilary class to which instructions delegate responsibility + for handling LEX and preparations before executing statement + or calculating complex expression. + + Exist mainly to avoid having double hierarchy between instruction + classes. + + TODO: Add ability to not store LEX and do any preparations if + expression used is simple. +*/ + +class sp_lex_keeper +{ + /* Prevent use of these */ + sp_lex_keeper(const sp_lex_keeper &); + void operator=(sp_lex_keeper &); +public: + + sp_lex_keeper(LEX *lex, bool lex_resp) + : m_lex(lex), m_lex_resp(lex_resp), + lex_query_tables_own_last(NULL) + { + lex->sp_lex_in_use= TRUE; + } + virtual ~sp_lex_keeper() + { + if (m_lex_resp) + { + lex_end(m_lex); + delete m_lex; + } + } + + /* + Prepare execution of instruction using LEX, if requested check whenever + we have read access to tables used and open/lock them, call instruction's + exec_core() method, perform cleanup afterwards. + */ + int reset_lex_and_exec_core(THD *thd, uint *nextp, bool open_tables, + sp_instr* instr); + + inline uint sql_command() const + { + return (uint)m_lex->sql_command; + } + + void disable_query_cache() + { + m_lex->safe_to_cache_query= 0; + } +private: + + LEX *m_lex; + /* + Indicates whenever this sp_lex_keeper instance responsible + for LEX deletion. + */ + bool m_lex_resp; + + /* + Support for being able to execute this statement in two modes: + a) inside prelocked mode set by the calling procedure or its ancestor. + b) outside of prelocked mode, when this statement enters/leaves + prelocked mode itself. + */ + + /* + List of additional tables this statement needs to lock when it + enters/leaves prelocked mode on its own. + */ + TABLE_LIST *prelocking_tables; + + /* + The value m_lex->query_tables_own_last should be set to this when the + statement enters/leaves prelocked mode on its own. + */ + TABLE_LIST **lex_query_tables_own_last; +}; + + +// +// Call out to some prepared SQL statement. +// +class sp_instr_stmt : public sp_instr +{ + sp_instr_stmt(const sp_instr_stmt &); /* Prevent use of these */ + void operator=(sp_instr_stmt &); + +public: + + LEX_STRING m_query; // For thd->query + + sp_instr_stmt(uint ip, sp_pcontext *ctx, LEX *lex) + : sp_instr(ip, ctx), m_lex_keeper(lex, TRUE) + { + m_query.str= 0; + m_query.length= 0; + } + + virtual ~sp_instr_stmt() + {}; + + virtual int execute(THD *thd, uint *nextp); + + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + sp_lex_keeper m_lex_keeper; + +}; // class sp_instr_stmt : public sp_instr + + +class sp_instr_set : public sp_instr +{ + sp_instr_set(const sp_instr_set &); /* Prevent use of these */ + void operator=(sp_instr_set &); + +public: + + sp_instr_set(uint ip, sp_pcontext *ctx, + uint offset, Item *val, enum enum_field_types type_arg, + LEX *lex, bool lex_resp) + : sp_instr(ip, ctx), m_offset(offset), m_value(val), m_type(type_arg), + m_lex_keeper(lex, lex_resp) + {} + + virtual ~sp_instr_set() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_offset; // Frame offset + Item *m_value; + enum enum_field_types m_type; // The declared type + sp_lex_keeper m_lex_keeper; + +}; // class sp_instr_set : public sp_instr + + +/* + Set NEW/OLD row field value instruction. Used in triggers. +*/ +class sp_instr_set_trigger_field : public sp_instr +{ + sp_instr_set_trigger_field(const sp_instr_set_trigger_field &); + void operator=(sp_instr_set_trigger_field &); + +public: + + sp_instr_set_trigger_field(uint ip, sp_pcontext *ctx, + Item_trigger_field *trg_fld, + Item *val, LEX *lex) + : sp_instr(ip, ctx), + trigger_field(trg_fld), + value(val), m_lex_keeper(lex, TRUE) + {} + + virtual ~sp_instr_set_trigger_field() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + Item_trigger_field *trigger_field; + Item *value; + sp_lex_keeper m_lex_keeper; +}; // class sp_instr_trigger_field : public sp_instr + + +/* + An abstract class for all instructions with destinations that + needs to be updated by the optimizer. + Even if not all subclasses will use both the normal destination and + the continuation destination, we put them both here for simplicity. + */ +class sp_instr_opt_meta : public sp_instr +{ +public: + + uint m_dest; // Where we will go + uint m_cont_dest; // Where continue handlers will go + + sp_instr_opt_meta(uint ip, sp_pcontext *ctx) + : sp_instr(ip, ctx), + m_dest(0), m_cont_dest(0), m_optdest(0), m_cont_optdest(0) + {} + + sp_instr_opt_meta(uint ip, sp_pcontext *ctx, uint dest) + : sp_instr(ip, ctx), + m_dest(dest), m_cont_dest(0), m_optdest(0), m_cont_optdest(0) + {} + + virtual ~sp_instr_opt_meta() + {} + + virtual void set_destination(uint old_dest, uint new_dest) + = 0; + + virtual uint get_cont_dest(); + +protected: + + sp_instr *m_optdest; // Used during optimization + sp_instr *m_cont_optdest; // Used during optimization + +}; // class sp_instr_opt_meta : public sp_instr + +class sp_instr_jump : public sp_instr_opt_meta +{ + sp_instr_jump(const sp_instr_jump &); /* Prevent use of these */ + void operator=(sp_instr_jump &); + +public: + + sp_instr_jump(uint ip, sp_pcontext *ctx) + : sp_instr_opt_meta(ip, ctx) + {} + + sp_instr_jump(uint ip, sp_pcontext *ctx, uint dest) + : sp_instr_opt_meta(ip, ctx, dest) + {} + + virtual ~sp_instr_jump() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start); + + virtual void opt_move(uint dst, List<sp_instr> *ibp); + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx) + { + if (m_dest == 0) // Don't reset + m_dest= dest; + } + + /* + Update the destination; used by the optimizer. + */ + virtual void set_destination(uint old_dest, uint new_dest) + { + if (m_dest == old_dest) + m_dest= new_dest; + } + +}; // class sp_instr_jump : public sp_instr_opt_meta + + +class sp_instr_jump_if_not : public sp_instr_jump +{ + sp_instr_jump_if_not(const sp_instr_jump_if_not &); /* Prevent use of these */ + void operator=(sp_instr_jump_if_not &); + +public: + + sp_instr_jump_if_not(uint ip, sp_pcontext *ctx, Item *i, LEX *lex) + : sp_instr_jump(ip, ctx), m_expr(i), + m_lex_keeper(lex, TRUE) + {} + + sp_instr_jump_if_not(uint ip, sp_pcontext *ctx, Item *i, uint dest, LEX *lex) + : sp_instr_jump(ip, ctx, dest), m_expr(i), + m_lex_keeper(lex, TRUE) + {} + + virtual ~sp_instr_jump_if_not() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads); + + /* Override sp_instr_jump's shortcut; we stop here */ + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + + virtual void opt_move(uint dst, List<sp_instr> *ibp); + + virtual void set_destination(uint old_dest, uint new_dest) + { + sp_instr_jump::set_destination(old_dest, new_dest); + if (m_cont_dest == old_dest) + m_cont_dest= new_dest; + } + +private: + + Item *m_expr; // The condition + sp_lex_keeper m_lex_keeper; + +}; // class sp_instr_jump_if_not : public sp_instr_jump + + +class sp_instr_freturn : public sp_instr +{ + sp_instr_freturn(const sp_instr_freturn &); /* Prevent use of these */ + void operator=(sp_instr_freturn &); + +public: + + sp_instr_freturn(uint ip, sp_pcontext *ctx, + Item *val, enum enum_field_types type_arg, LEX *lex) + : sp_instr(ip, ctx), m_value(val), m_type(type_arg), + m_lex_keeper(lex, TRUE) + {} + + virtual ~sp_instr_freturn() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads) + { + marked= 1; + return UINT_MAX; + } + +protected: + + Item *m_value; + enum enum_field_types m_type; + sp_lex_keeper m_lex_keeper; + +}; // class sp_instr_freturn : public sp_instr + + +class sp_instr_hpush_jump : public sp_instr_jump +{ + sp_instr_hpush_jump(const sp_instr_hpush_jump &); /* Prevent use of these */ + void operator=(sp_instr_hpush_jump &); + +public: + + sp_instr_hpush_jump(uint ip, sp_pcontext *ctx, int htype, uint fp) + : sp_instr_jump(ip, ctx), m_type(htype), m_frame(fp) + { + m_cond.empty(); + } + + virtual ~sp_instr_hpush_jump() + { + m_cond.empty(); + } + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads); + + /* Override sp_instr_jump's shortcut; we stop here. */ + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + + inline void add_condition(struct sp_cond_type *cond) + { + m_cond.push_front(cond); + } + +private: + + int m_type; // Handler type + uint m_frame; + List<struct sp_cond_type> m_cond; + +}; // class sp_instr_hpush_jump : public sp_instr_jump + + +class sp_instr_hpop : public sp_instr +{ + sp_instr_hpop(const sp_instr_hpop &); /* Prevent use of these */ + void operator=(sp_instr_hpop &); + +public: + + sp_instr_hpop(uint ip, sp_pcontext *ctx, uint count) + : sp_instr(ip, ctx), m_count(count) + {} + + virtual ~sp_instr_hpop() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_count; + +}; // class sp_instr_hpop : public sp_instr + + +class sp_instr_hreturn : public sp_instr_jump +{ + sp_instr_hreturn(const sp_instr_hreturn &); /* Prevent use of these */ + void operator=(sp_instr_hreturn &); + +public: + + sp_instr_hreturn(uint ip, sp_pcontext *ctx, uint fp) + : sp_instr_jump(ip, ctx), m_frame(fp) + {} + + virtual ~sp_instr_hreturn() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads); + +private: + + uint m_frame; + +}; // class sp_instr_hreturn : public sp_instr_jump + + +/* This is DECLARE CURSOR */ +class sp_instr_cpush : public sp_instr +{ + sp_instr_cpush(const sp_instr_cpush &); /* Prevent use of these */ + void operator=(sp_instr_cpush &); + +public: + + sp_instr_cpush(uint ip, sp_pcontext *ctx, LEX *lex, uint offset) + : sp_instr(ip, ctx), m_lex_keeper(lex, TRUE), m_cursor(offset) + {} + + virtual ~sp_instr_cpush() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + /* + This call is used to cleanup the instruction when a sensitive + cursor is closed. For now stored procedures always use materialized + cursors and the call is not used. + */ + virtual void cleanup_stmt() { /* no op */ } +private: + + sp_lex_keeper m_lex_keeper; + uint m_cursor; /* Frame offset (for debugging) */ + +}; // class sp_instr_cpush : public sp_instr + + +class sp_instr_cpop : public sp_instr +{ + sp_instr_cpop(const sp_instr_cpop &); /* Prevent use of these */ + void operator=(sp_instr_cpop &); + +public: + + sp_instr_cpop(uint ip, sp_pcontext *ctx, uint count) + : sp_instr(ip, ctx), m_count(count) + {} + + virtual ~sp_instr_cpop() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_count; + +}; // class sp_instr_cpop : public sp_instr + + +class sp_instr_copen : public sp_instr +{ + sp_instr_copen(const sp_instr_copen &); /* Prevent use of these */ + void operator=(sp_instr_copen &); + +public: + + sp_instr_copen(uint ip, sp_pcontext *ctx, uint c) + : sp_instr(ip, ctx), m_cursor(c) + {} + + virtual ~sp_instr_copen() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_cursor; // Stack index + +}; // class sp_instr_copen : public sp_instr_stmt + + +class sp_instr_cclose : public sp_instr +{ + sp_instr_cclose(const sp_instr_cclose &); /* Prevent use of these */ + void operator=(sp_instr_cclose &); + +public: + + sp_instr_cclose(uint ip, sp_pcontext *ctx, uint c) + : sp_instr(ip, ctx), m_cursor(c) + {} + + virtual ~sp_instr_cclose() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_cursor; + +}; // class sp_instr_cclose : public sp_instr + + +class sp_instr_cfetch : public sp_instr +{ + sp_instr_cfetch(const sp_instr_cfetch &); /* Prevent use of these */ + void operator=(sp_instr_cfetch &); + +public: + + sp_instr_cfetch(uint ip, sp_pcontext *ctx, uint c) + : sp_instr(ip, ctx), m_cursor(c) + { + m_varlist.empty(); + } + + virtual ~sp_instr_cfetch() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + void add_to_varlist(struct sp_variable *var) + { + m_varlist.push_back(var); + } + +private: + + uint m_cursor; + List<struct sp_variable> m_varlist; + +}; // class sp_instr_cfetch : public sp_instr + + +class sp_instr_error : public sp_instr +{ + sp_instr_error(const sp_instr_error &); /* Prevent use of these */ + void operator=(sp_instr_error &); + +public: + + sp_instr_error(uint ip, sp_pcontext *ctx, int errcode) + : sp_instr(ip, ctx), m_errcode(errcode) + {} + + virtual ~sp_instr_error() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads) + { + marked= 1; + return UINT_MAX; + } + +private: + + int m_errcode; + +}; // class sp_instr_error : public sp_instr + + +class sp_instr_set_case_expr : public sp_instr_opt_meta +{ +public: + + sp_instr_set_case_expr(uint ip, sp_pcontext *ctx, uint case_expr_id, + Item *case_expr, LEX *lex) + : sp_instr_opt_meta(ip, ctx), + m_case_expr_id(case_expr_id), m_case_expr(case_expr), + m_lex_keeper(lex, TRUE) + {} + + virtual ~sp_instr_set_case_expr() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual int exec_core(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp, List<sp_instr> *leads); + + virtual void opt_move(uint dst, List<sp_instr> *ibp); + + virtual void set_destination(uint old_dest, uint new_dest) + { + if (m_cont_dest == old_dest) + m_cont_dest= new_dest; + } + +private: + + uint m_case_expr_id; + Item *m_case_expr; + sp_lex_keeper m_lex_keeper; + +}; // class sp_instr_set_case_expr : public sp_instr_opt_meta + + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +bool +sp_change_security_context(THD *thd, sp_head *sp, + Security_context **backup); +void +sp_restore_security_context(THD *thd, Security_context *backup); + +bool +set_routine_security_ctx(THD *thd, sp_head *sp, bool is_proc, + Security_context **save_ctx); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + +TABLE_LIST * +sp_add_to_query_tables(THD *thd, LEX *lex, + const char *db, const char *name, + thr_lock_type locktype); +Item * +sp_prepare_func_item(THD* thd, Item **it_addr); + +bool +sp_eval_expr(THD *thd, Field *result_field, Item **expr_item_ptr); + +#endif /* _SP_HEAD_H_ */ diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc new file mode 100644 index 00000000000..780243cc79f --- /dev/null +++ b/sql/sp_pcontext.cc @@ -0,0 +1,462 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation +#endif + +#if defined(WIN32) || defined(__WIN__) +#undef SAFEMALLOC /* Problems with threads */ +#endif + +#include "sp_pcontext.h" +#include "sp_head.h" + +/* Initial size for the dynamic arrays in sp_pcontext */ +#define PCONTEXT_ARRAY_INIT_ALLOC 16 +/* Increment size for the dynamic arrays in sp_pcontext */ +#define PCONTEXT_ARRAY_INCREMENT_ALLOC 8 + +/* + Sanity check for SQLSTATEs. Will not check if it's really an existing + state (there are just too many), but will check length and bad characters. + Returns TRUE if it's ok, FALSE if it's bad. +*/ +bool +sp_cond_check(LEX_STRING *sqlstate) +{ + int i; + const char *p; + + if (sqlstate->length != 5) + return FALSE; + for (p= sqlstate->str, i= 0 ; i < 5 ; i++) + { + char c = p[i]; + + if ((c < '0' || '9' < c) && + (c < 'A' || 'Z' < c)) + return FALSE; + } + return TRUE; +} + +sp_pcontext::sp_pcontext() + : Sql_alloc(), + m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), + m_context_handlers(0), m_parent(NULL), m_pboundary(0), + m_label_scope(LABEL_DEFAULT_SCOPE) +{ + VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + m_label.empty(); + m_children.empty(); + + m_var_offset= m_cursor_offset= 0; + m_num_case_exprs= 0; +} + +sp_pcontext::sp_pcontext(sp_pcontext *prev, label_scope_type label_scope) + : Sql_alloc(), + m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0), + m_context_handlers(0), m_parent(prev), m_pboundary(0), + m_label_scope(label_scope) +{ + VOID(my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + VOID(my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *), + PCONTEXT_ARRAY_INIT_ALLOC, + PCONTEXT_ARRAY_INCREMENT_ALLOC)); + m_label.empty(); + m_children.empty(); + + m_var_offset= prev->m_var_offset + prev->m_max_var_index; + m_cursor_offset= prev->current_cursor_count(); + m_num_case_exprs= prev->get_num_case_exprs(); +} + +void +sp_pcontext::destroy() +{ + List_iterator_fast<sp_pcontext> li(m_children); + sp_pcontext *child; + + while ((child= li++)) + child->destroy(); + + m_children.empty(); + m_label.empty(); + delete_dynamic(&m_vars); + delete_dynamic(&m_case_expr_id_lst); + delete_dynamic(&m_conds); + delete_dynamic(&m_cursors); + delete_dynamic(&m_handlers); +} + +sp_pcontext * +sp_pcontext::push_context(label_scope_type label_scope) +{ + sp_pcontext *child= new sp_pcontext(this, label_scope); + + if (child) + m_children.push_back(child); + return child; +} + +sp_pcontext * +sp_pcontext::pop_context() +{ + m_parent->m_max_var_index+= m_max_var_index; + + uint submax= max_handler_index(); + if (submax > m_parent->m_max_handler_index) + m_parent->m_max_handler_index= submax; + + submax= max_cursor_index(); + if (submax > m_parent->m_max_cursor_index) + m_parent->m_max_cursor_index= submax; + + if (m_num_case_exprs > m_parent->m_num_case_exprs) + m_parent->m_num_case_exprs= m_num_case_exprs; + + return m_parent; +} + +uint +sp_pcontext::diff_handlers(sp_pcontext *ctx, bool exclusive) +{ + uint n= 0; + sp_pcontext *pctx= this; + sp_pcontext *last_ctx= NULL; + + while (pctx && pctx != ctx) + { + n+= pctx->m_context_handlers; + last_ctx= pctx; + pctx= pctx->parent_context(); + } + if (pctx) + return (exclusive && last_ctx ? n - last_ctx->m_context_handlers : n); + return 0; // Didn't find ctx +} + +uint +sp_pcontext::diff_cursors(sp_pcontext *ctx, bool exclusive) +{ + uint n= 0; + sp_pcontext *pctx= this; + sp_pcontext *last_ctx= NULL; + + while (pctx && pctx != ctx) + { + n+= pctx->m_cursors.elements; + last_ctx= pctx; + pctx= pctx->parent_context(); + } + if (pctx) + return (exclusive && last_ctx ? n - last_ctx->m_cursors.elements : n); + return 0; // Didn't find ctx +} + +/* + This does a linear search (from newer to older variables, in case + we have shadowed names). + It's possible to have a more efficient allocation and search method, + but it might not be worth it. The typical number of parameters and + variables will in most cases be low (a handfull). + ...and, this is only called during parsing. +*/ +sp_variable_t * +sp_pcontext::find_variable(LEX_STRING *name, my_bool scoped) +{ + uint i= m_vars.elements - m_pboundary; + + while (i--) + { + sp_variable_t *p; + + get_dynamic(&m_vars, (gptr)&p, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)p->name.str, p->name.length) == 0) + { + return p; + } + } + if (!scoped && m_parent) + return m_parent->find_variable(name, scoped); + return NULL; +} + +/* + Find a variable by offset from the top. + This used for two things: + - When evaluating parameters at the beginning, and setting out parameters + at the end, of invokation. (Top frame only, so no recursion then.) + - For printing of sp_instr_set. (Debug mode only.) +*/ +sp_variable_t * +sp_pcontext::find_variable(uint offset) +{ + if (m_var_offset <= offset && offset < m_var_offset + m_vars.elements) + { // This frame + sp_variable_t *p; + + get_dynamic(&m_vars, (gptr)&p, offset - m_var_offset); + return p; + } + if (m_parent) + return m_parent->find_variable(offset); // Some previous frame + return NULL; // index out of bounds +} + +sp_variable_t * +sp_pcontext::push_variable(LEX_STRING *name, enum enum_field_types type, + sp_param_mode_t mode) +{ + sp_variable_t *p= (sp_variable_t *)sql_alloc(sizeof(sp_variable_t)); + + if (!p) + return NULL; + + ++m_max_var_index; + + p->name.str= name->str; + p->name.length= name->length; + p->type= type; + p->mode= mode; + p->offset= current_var_count(); + p->dflt= NULL; + insert_dynamic(&m_vars, (gptr)&p); + + return p; +} + + +sp_label_t * +sp_pcontext::push_label(char *name, uint ip) +{ + sp_label_t *lab = (sp_label_t *)sql_alloc(sizeof(sp_label_t)); + + if (lab) + { + lab->name= name; + lab->ip= ip; + lab->type= SP_LAB_IMPL; + lab->ctx= this; + m_label.push_front(lab); + } + return lab; +} + +sp_label_t * +sp_pcontext::find_label(char *name) +{ + List_iterator_fast<sp_label_t> li(m_label); + sp_label_t *lab; + + while ((lab= li++)) + if (my_strcasecmp(system_charset_info, name, lab->name) == 0) + return lab; + + /* + Note about exception handlers. + See SQL:2003 SQL/PSM (ISO/IEC 9075-4:2003), + section 13.1 <compound statement>, + syntax rule 4. + In short, a DECLARE HANDLER block can not refer + to labels from the parent context, as they are out of scope. + */ + if (m_parent && (m_label_scope == LABEL_DEFAULT_SCOPE)) + return m_parent->find_label(name); + return NULL; +} + +void +sp_pcontext::push_cond(LEX_STRING *name, sp_cond_type_t *val) +{ + sp_cond_t *p= (sp_cond_t *)sql_alloc(sizeof(sp_cond_t)); + + if (p) + { + p->name.str= name->str; + p->name.length= name->length; + p->val= val; + insert_dynamic(&m_conds, (gptr)&p); + } +} + +/* + See comment for find_variable() above +*/ +sp_cond_type_t * +sp_pcontext::find_cond(LEX_STRING *name, my_bool scoped) +{ + uint i= m_conds.elements; + + while (i--) + { + sp_cond_t *p; + + get_dynamic(&m_conds, (gptr)&p, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)p->name.str, p->name.length) == 0) + { + return p->val; + } + } + if (!scoped && m_parent) + return m_parent->find_cond(name, scoped); + return NULL; +} + +/* + This only searches the current context, for error checking of + duplicates. + Returns TRUE if found. +*/ +bool +sp_pcontext::find_handler(sp_cond_type_t *cond) +{ + uint i= m_handlers.elements; + + while (i--) + { + sp_cond_type_t *p; + + get_dynamic(&m_handlers, (gptr)&p, i); + if (cond->type == p->type) + { + switch (p->type) + { + case sp_cond_type_t::number: + if (cond->mysqlerr == p->mysqlerr) + return TRUE; + break; + case sp_cond_type_t::state: + if (strcmp(cond->sqlstate, p->sqlstate) == 0) + return TRUE; + break; + default: + return TRUE; + } + } + } + return FALSE; +} + +void +sp_pcontext::push_cursor(LEX_STRING *name) +{ + LEX_STRING n; + + if (m_cursors.elements == m_max_cursor_index) + m_max_cursor_index+= 1; + n.str= name->str; + n.length= name->length; + insert_dynamic(&m_cursors, (gptr)&n); +} + +/* + See comment for find_variable() above +*/ +my_bool +sp_pcontext::find_cursor(LEX_STRING *name, uint *poff, my_bool scoped) +{ + uint i= m_cursors.elements; + + while (i--) + { + LEX_STRING n; + + get_dynamic(&m_cursors, (gptr)&n, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)n.str, n.length) == 0) + { + *poff= m_cursor_offset + i; + return TRUE; + } + } + if (!scoped && m_parent) + return m_parent->find_cursor(name, poff, scoped); + return FALSE; +} + + +void +sp_pcontext::retrieve_field_definitions(List<create_field> *field_def_lst) +{ + /* Put local/context fields in the result list. */ + + for (uint i = 0; i < m_vars.elements; ++i) + { + sp_variable_t *var_def; + get_dynamic(&m_vars, (gptr) &var_def, i); + + field_def_lst->push_back(&var_def->field_def); + } + + /* Put the fields of the enclosed contexts in the result list. */ + + List_iterator_fast<sp_pcontext> li(m_children); + sp_pcontext *ctx; + + while ((ctx = li++)) + ctx->retrieve_field_definitions(field_def_lst); +} + +/* + Find a cursor by offset from the top. + This is only used for debugging. +*/ +my_bool +sp_pcontext::find_cursor(uint offset, LEX_STRING *n) +{ + if (m_cursor_offset <= offset && + offset < m_cursor_offset + m_cursors.elements) + { // This frame + get_dynamic(&m_cursors, (gptr)n, offset - m_cursor_offset); + return TRUE; + } + if (m_parent) + return m_parent->find_cursor(offset, n); // Some previous frame + return FALSE; // index out of bounds +} diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h new file mode 100644 index 00000000000..5bffda79f98 --- /dev/null +++ b/sql/sp_pcontext.h @@ -0,0 +1,463 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_PCONTEXT_H_ +#define _SP_PCONTEXT_H_ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +typedef enum +{ + sp_param_in, + sp_param_out, + sp_param_inout +} sp_param_mode_t; + +typedef struct sp_variable +{ + LEX_STRING name; + enum enum_field_types type; + sp_param_mode_t mode; + + /* + offset -- this the index to the variable's value in the runtime frame. + This is calculated during parsing and used when creating sp_instr_set + instructions and Item_splocal items. + I.e. values are set/referred by array indexing in runtime. + */ + uint offset; + + Item *dflt; + create_field field_def; +} sp_variable_t; + + +#define SP_LAB_IMPL 0 // Implicit label generated by parser +#define SP_LAB_BEGIN 1 // Label at BEGIN +#define SP_LAB_ITER 2 // Label at iteration control + +/* + An SQL/PSM label. Can refer to the identifier used with the + "label_name:" construct which may precede some SQL/PSM statements, or + to an implicit implementation-dependent identifier which the parser + inserts before a high-level flow control statement such as + IF/WHILE/REPEAT/LOOP, when such statement is rewritten into + a combination of low-level jump/jump_if instructions and labels. +*/ + +typedef struct sp_label +{ + char *name; + uint ip; // Instruction index + int type; // begin/iter or ref/free + sp_pcontext *ctx; // The label's context +} sp_label_t; + +typedef struct sp_cond_type +{ + enum { number, state, warning, notfound, exception } type; + char sqlstate[6]; + uint mysqlerr; +} sp_cond_type_t; + +/* + Sanity check for SQLSTATEs. Will not check if it's really an existing + state (there are just too many), but will check length bad characters. +*/ +extern bool +sp_cond_check(LEX_STRING *sqlstate); + +typedef struct sp_cond +{ + LEX_STRING name; + sp_cond_type_t *val; +} sp_cond_t; + +/** + The scope of a label in Stored Procedures, + for name resolution of labels in a parsing context. +*/ +enum label_scope_type +{ + /** + The labels declared in a parent context are in scope. + */ + LABEL_DEFAULT_SCOPE, + /** + The labels declared in a parent context are not in scope. + */ + LABEL_HANDLER_SCOPE +}; + +/** + The parse-time context, used to keep track of declared variables/parameters, + conditions, handlers, cursors and labels, during parsing. + sp_contexts are organized as a tree, with one object for each begin-end + block, one object for each exception handler, + plus a root-context for the parameters. + This is used during parsing for looking up defined names (e.g. declared + variables and visible labels), for error checking, and to calculate offsets + to be used at runtime. (During execution variable values, active handlers + and cursors, etc, are referred to by an index in a stack.) + Parsing contexts for exception handlers limit the visibility of labels. + The pcontext tree is also kept during execution and is used for error + checking (e.g. correct number of parameters), and in the future, used by + the debugger. +*/ + +class sp_pcontext : public Sql_alloc +{ +public: + + /** + Constructor. + Builds a parsing context root node. + */ + sp_pcontext(); + + // Free memory + void + destroy(); + + /** + Create and push a new context in the tree. + @param label_scope label scope for the new parsing context + @return the node created + */ + sp_pcontext * + push_context(label_scope_type label_scope); + + /** + Pop a node from the parsing context tree. + @return the parent node + */ + sp_pcontext * + pop_context(); + + sp_pcontext * + parent_context() + { + return m_parent; + } + + /* + Number of handlers/cursors to pop between this context and 'ctx'. + If 'exclusive' is true, don't count the last block we are leaving; + this is used for LEAVE where we will jump to the cpop/hpop instructions. + */ + uint + diff_handlers(sp_pcontext *ctx, bool exclusive); + uint + diff_cursors(sp_pcontext *ctx, bool exclusive); + + + // + // Parameters and variables + // + + /* + The maximum number of variables used in this and all child contexts + In the root, this gives us the number of slots needed for variables + during execution. + */ + inline uint + max_var_index() + { + return m_max_var_index; + } + + /* + The current number of variables used in the parents (from the root), + including this context. + */ + inline uint + current_var_count() + { + return m_var_offset + m_vars.elements; + } + + /* The number of variables in this context alone */ + inline uint + context_var_count() + { + return m_vars.elements; + } + + /* Map index in this pcontext to runtime offset */ + inline uint + var_context2runtime(uint i) + { + return m_var_offset + i; + } + + /* Set type of variable. 'i' is the offset from the top */ + inline void + set_type(uint i, enum enum_field_types type) + { + sp_variable_t *p= find_variable(i); + + if (p) + p->type= type; + } + + /* Set default value of variable. 'i' is the offset from the top */ + inline void + set_default(uint i, Item *it) + { + sp_variable_t *p= find_variable(i); + + if (p) + p->dflt= it; + } + + sp_variable_t * + push_variable(LEX_STRING *name, enum enum_field_types type, + sp_param_mode_t mode); + + /* + Retrieve definitions of fields from the current context and its + children. + */ + void + retrieve_field_definitions(List<create_field> *field_def_lst); + + // Find by name + sp_variable_t * + find_variable(LEX_STRING *name, my_bool scoped=0); + + // Find by offset (from the top) + sp_variable_t * + find_variable(uint offset); + + /* + Set the current scope boundary (for default values). + The argument is the number of variables to skip. + */ + inline void + declare_var_boundary(uint n) + { + m_pboundary= n; + } + + /* + CASE expressions support. + */ + + inline int + register_case_expr() + { + return m_num_case_exprs++; + } + + inline int + get_num_case_exprs() const + { + return m_num_case_exprs; + } + + inline bool + push_case_expr_id(int case_expr_id) + { + return insert_dynamic(&m_case_expr_id_lst, (gptr) &case_expr_id); + } + + inline void + pop_case_expr_id() + { + pop_dynamic(&m_case_expr_id_lst); + } + + inline int + get_current_case_expr_id() const + { + int case_expr_id; + + get_dynamic((DYNAMIC_ARRAY*)&m_case_expr_id_lst, (gptr) &case_expr_id, + m_case_expr_id_lst.elements - 1); + + return case_expr_id; + } + + // + // Labels + // + + sp_label_t * + push_label(char *name, uint ip); + + sp_label_t * + find_label(char *name); + + inline sp_label_t * + last_label() + { + sp_label_t *lab= m_label.head(); + + if (!lab && m_parent) + lab= m_parent->last_label(); + return lab; + } + + inline sp_label_t * + pop_label() + { + return m_label.pop(); + } + + // + // Conditions + // + + void + push_cond(LEX_STRING *name, sp_cond_type_t *val); + + inline void + pop_cond(uint num) + { + while (num--) + pop_dynamic(&m_conds); + } + + sp_cond_type_t * + find_cond(LEX_STRING *name, my_bool scoped=0); + + // + // Handlers + // + + inline void + push_handler(sp_cond_type_t *cond) + { + insert_dynamic(&m_handlers, (gptr)&cond); + } + + bool + find_handler(sp_cond_type *cond); + + inline uint + max_handler_index() + { + return m_max_handler_index + m_context_handlers; + } + + inline void + add_handlers(uint n) + { + m_context_handlers+= n; + } + + // + // Cursors + // + + void + push_cursor(LEX_STRING *name); + + my_bool + find_cursor(LEX_STRING *name, uint *poff, my_bool scoped=0); + + /* Find by offset (for debugging only) */ + my_bool + find_cursor(uint offset, LEX_STRING *n); + + inline uint + max_cursor_index() + { + return m_max_cursor_index + m_cursors.elements; + } + + inline uint + current_cursor_count() + { + return m_cursor_offset + m_cursors.elements; + } + +protected: + + /** + Constructor for a tree node. + @param prev the parent parsing context + @param label_scope label_scope for this parsing context + */ + sp_pcontext(sp_pcontext *prev, label_scope_type label_scope); + + /* + m_max_var_index -- number of variables (including all types of arguments) + in this context including all children contexts. + + m_max_var_index >= m_vars.elements. + + m_max_var_index of the root parsing context contains number of all + variables (including arguments) in all enclosed contexts. + */ + uint m_max_var_index; + + // The maximum sub context's framesizes + uint m_max_cursor_index; + uint m_max_handler_index; + uint m_context_handlers; // No. of handlers in this context + +private: + + sp_pcontext *m_parent; // Parent context + + /* + m_var_offset -- this is an index of the first variable in this + parsing context. + + m_var_offset is 0 for root context. + + Since now each variable is stored in separate place, no reuse is done, + so m_var_offset is different for all enclosed contexts. + */ + uint m_var_offset; + + uint m_cursor_offset; // Cursor offset for this context + + /* + Boundary for finding variables in this context. This is the number + of variables currently "invisible" to default clauses. + This is normally 0, but will be larger during parsing of + DECLARE ... DEFAULT, to get the scope right for DEFAULT values. + */ + uint m_pboundary; + + int m_num_case_exprs; + + DYNAMIC_ARRAY m_vars; // Parameters/variables + DYNAMIC_ARRAY m_case_expr_id_lst; /* Stack of CASE expression ids. */ + DYNAMIC_ARRAY m_conds; // Conditions + DYNAMIC_ARRAY m_cursors; // Cursors + DYNAMIC_ARRAY m_handlers; // Handlers, for checking for duplicates + + List<sp_label_t> m_label; // The label list + + List<sp_pcontext> m_children; // Children contexts, used for destruction + + /** + Scope of labels for this parsing context. + */ + label_scope_type m_label_scope; + +private: + sp_pcontext(const sp_pcontext &); /* Prevent use of these */ + void operator=(sp_pcontext &); +}; // class sp_pcontext : public Sql_alloc + + +#endif /* _SP_PCONTEXT_H_ */ diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc new file mode 100644 index 00000000000..e49c4eb1240 --- /dev/null +++ b/sql/sp_rcontext.cc @@ -0,0 +1,608 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include "mysql_priv.h" +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation +#endif + +#if defined(WIN32) || defined(__WIN__) +#undef SAFEMALLOC /* Problems with threads */ +#endif + +#include "mysql.h" +#include "sp_head.h" +#include "sql_cursor.h" +#include "sp_rcontext.h" +#include "sp_pcontext.h" + + +sp_rcontext::sp_rcontext(sp_pcontext *root_parsing_ctx, + Field *return_value_fld, + sp_rcontext *prev_runtime_ctx) + :m_root_parsing_ctx(root_parsing_ctx), + m_var_table(0), + m_var_items(0), + m_return_value_fld(return_value_fld), + m_return_value_set(FALSE), + m_hcount(0), + m_hsp(0), + m_ihsp(0), + m_hfound(-1), + m_ccount(0), + m_case_expr_holders(0), + m_prev_runtime_ctx(prev_runtime_ctx) +{ +} + + +sp_rcontext::~sp_rcontext() +{ + if (m_var_table) + free_blobs(m_var_table); +} + + +/* + Initialize sp_rcontext instance. + + SYNOPSIS + thd Thread handle + RETURN + FALSE on success + TRUE on error +*/ + +bool sp_rcontext::init(THD *thd) +{ + if (init_var_table(thd) || init_var_items()) + return TRUE; + + return + !(m_handler= + (sp_handler_t*)thd->alloc(m_root_parsing_ctx->max_handler_index() * + sizeof(sp_handler_t))) || + !(m_hstack= + (uint*)thd->alloc(m_root_parsing_ctx->max_handler_index() * + sizeof(uint))) || + !(m_in_handler= + (uint*)thd->alloc(m_root_parsing_ctx->max_handler_index() * + sizeof(uint))) || + !(m_cstack= + (sp_cursor**)thd->alloc(m_root_parsing_ctx->max_cursor_index() * + sizeof(sp_cursor*))) || + !(m_case_expr_holders= + (Item_cache**)thd->calloc(m_root_parsing_ctx->get_num_case_exprs() * + sizeof (Item_cache*))); +} + + +/* + Create and initialize a table to store SP-vars. + + SYNOPSIS + thd Thread handler. + RETURN + FALSE on success + TRUE on error +*/ + +bool +sp_rcontext::init_var_table(THD *thd) +{ + List<create_field> field_def_lst; + + if (!m_root_parsing_ctx->max_var_index()) + return FALSE; + + m_root_parsing_ctx->retrieve_field_definitions(&field_def_lst); + + DBUG_ASSERT(field_def_lst.elements == m_root_parsing_ctx->max_var_index()); + + if (!(m_var_table= create_virtual_tmp_table(thd, field_def_lst))) + return TRUE; + + m_var_table->copy_blobs= TRUE; + m_var_table->alias= ""; + + return FALSE; +} + + +/* + Create and initialize an Item-adapter (Item_field) for each SP-var field. + + RETURN + FALSE on success + TRUE on error +*/ + +bool +sp_rcontext::init_var_items() +{ + uint idx; + uint num_vars= m_root_parsing_ctx->max_var_index(); + + if (!(m_var_items= (Item**) sql_alloc(num_vars * sizeof (Item *)))) + return TRUE; + + for (idx = 0; idx < num_vars; ++idx) + { + if (!(m_var_items[idx]= new Item_field(m_var_table->field[idx]))) + return TRUE; + } + + return FALSE; +} + + +bool +sp_rcontext::set_return_value(THD *thd, Item **return_value_item) +{ + DBUG_ASSERT(m_return_value_fld); + + m_return_value_set = TRUE; + + return sp_eval_expr(thd, m_return_value_fld, return_value_item); +} + + +#define IS_WARNING_CONDITION(S) ((S)[0] == '0' && (S)[1] == '1') +#define IS_NOT_FOUND_CONDITION(S) ((S)[0] == '0' && (S)[1] == '2') +#define IS_EXCEPTION_CONDITION(S) ((S)[0] != '0' || (S)[1] > '2') + +/* + Find a handler for the given errno. + This is called from all error message functions (e.g. push_warning, + net_send_error, et al) when a sp_rcontext is in effect. If a handler + is found, no error is sent, and the the SP execution loop will instead + invoke the found handler. + This might be called several times before we get back to the execution + loop, so m_hfound can be >= 0 if a handler has already been found. + (In which case we don't search again - the first found handler will + be used.) + Handlers are pushed on the stack m_handler, with the latest/innermost + one on the top; we then search for matching handlers from the top and + down. + We search through all the handlers, looking for the most specific one + (sql_errno more specific than sqlstate more specific than the rest). + Note that mysql error code handlers is a MySQL extension, not part of + the standard. + + SYNOPSIS + sql_errno The error code + level Warning level + + RETURN + 1 if a handler was found, m_hfound is set to its index (>= 0) + 0 if not found, m_hfound is -1 +*/ + +bool +sp_rcontext::find_handler(uint sql_errno, + MYSQL_ERROR::enum_warning_level level) +{ + if (m_hfound >= 0) + return 1; // Already got one + + const char *sqlstate= mysql_errno_to_sqlstate(sql_errno); + int i= m_hcount, found= -1; + + /* Search handlers from the latest (innermost) to the oldest (outermost) */ + while (i--) + { + sp_cond_type_t *cond= m_handler[i].cond; + int j= m_ihsp; + + /* Check active handlers, to avoid invoking one recursively */ + while (j--) + if (m_in_handler[j] == m_handler[i].handler) + break; + if (j >= 0) + continue; // Already executing this handler + + switch (cond->type) + { + case sp_cond_type_t::number: + if (sql_errno == cond->mysqlerr && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::number)) + found= i; // Always the most specific + break; + case sp_cond_type_t::state: + if (strcmp(sqlstate, cond->sqlstate) == 0 && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::state)) + found= i; + break; + case sp_cond_type_t::warning: + if ((IS_WARNING_CONDITION(sqlstate) || + level == MYSQL_ERROR::WARN_LEVEL_WARN) && + found < 0) + found= i; + break; + case sp_cond_type_t::notfound: + if (IS_NOT_FOUND_CONDITION(sqlstate) && found < 0) + found= i; + break; + case sp_cond_type_t::exception: + if (IS_EXCEPTION_CONDITION(sqlstate) && + level == MYSQL_ERROR::WARN_LEVEL_ERROR && + found < 0) + found= i; + break; + } + } + if (found < 0) + { + /* + Only "exception conditions" are propagated to handlers in calling + contexts. If no handler is found locally for a "completion condition" + (warning or "not found") we will simply resume execution. + */ + if (m_prev_runtime_ctx && IS_EXCEPTION_CONDITION(sqlstate) && + level == MYSQL_ERROR::WARN_LEVEL_ERROR) + return m_prev_runtime_ctx->find_handler(sql_errno, level); + return FALSE; + } + m_hfound= found; + return TRUE; +} + +/* + Handle the error for a given errno. + The severity of the error is adjusted depending of the current sql_mode. + If an handler is present for the error (see find_handler()), + this function will return true. + If a handler is found and if the severity of the error indicate + that the current instruction executed should abort, + the flag thd->net.report_error is also set. + This will cause the execution of the current instruction in a + sp_instr* to fail, and give control to the handler code itself + in the sp_head::execute() loop. + + SYNOPSIS + sql_errno The error code + level Warning level + thd The current thread + - thd->net.report_error is an optional output. + + RETURN + TRUE if a handler was found. + FALSE if no handler was found. +*/ +bool +sp_rcontext::handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level, + THD *thd) +{ + bool handled= FALSE; + MYSQL_ERROR::enum_warning_level elevated_level= level; + + + /* Depending on the sql_mode of execution, + warnings may be considered errors */ + if ((level == MYSQL_ERROR::WARN_LEVEL_WARN) && + thd->really_abort_on_warning()) + { + elevated_level= MYSQL_ERROR::WARN_LEVEL_ERROR; + } + + if (find_handler(sql_errno, elevated_level)) + { + if (elevated_level == MYSQL_ERROR::WARN_LEVEL_ERROR) + { + /* + Forces to abort the current instruction execution. + NOTE: This code is altering the original meaning of + the net.report_error flag (send an error to the client). + In the context of stored procedures with error handlers, + the flag is reused to cause error propagation, + until the error handler is reached. + No messages will be sent to the client in that context. + */ + thd->net.report_error= 1; + } + handled= TRUE; + } + + return handled; +} + +void +sp_rcontext::push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i) +{ + m_cstack[m_ccount++]= new sp_cursor(lex_keeper, i); +} + + +void +sp_rcontext::pop_cursors(uint count) +{ + while (count--) + { + delete m_cstack[--m_ccount]; + } +} + + +int +sp_rcontext::set_variable(THD *thd, uint var_idx, Item **value) +{ + return set_variable(thd, m_var_table->field[var_idx], value); +} + + +int +sp_rcontext::set_variable(THD *thd, Field *field, Item **value) +{ + if (!value) + { + field->set_null(); + return 0; + } + + return sp_eval_expr(thd, field, value); +} + + +Item * +sp_rcontext::get_item(uint var_idx) +{ + return m_var_items[var_idx]; +} + + +Item ** +sp_rcontext::get_item_addr(uint var_idx) +{ + return m_var_items + var_idx; +} + + +/* + * + * sp_cursor + * + */ + +sp_cursor::sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i) + :m_lex_keeper(lex_keeper), + server_side_cursor(NULL), + m_i(i) +{ + /* + currsor can't be stored in QC, so we should prevent opening QC for + try to write results which are absent. + */ + lex_keeper->disable_query_cache(); +} + + +/* + Open an SP cursor + + SYNOPSIS + open() + THD Thread handler + + + RETURN + 0 in case of success, -1 otherwise +*/ + +int +sp_cursor::open(THD *thd) +{ + if (server_side_cursor) + { + my_message(ER_SP_CURSOR_ALREADY_OPEN, ER(ER_SP_CURSOR_ALREADY_OPEN), + MYF(0)); + return -1; + } + if (mysql_open_cursor(thd, (uint) ALWAYS_MATERIALIZED_CURSOR, &result, + &server_side_cursor)) + return -1; + return 0; +} + + +int +sp_cursor::close(THD *thd) +{ + if (! server_side_cursor) + { + my_message(ER_SP_CURSOR_NOT_OPEN, ER(ER_SP_CURSOR_NOT_OPEN), MYF(0)); + return -1; + } + destroy(); + return 0; +} + + +void +sp_cursor::destroy() +{ + delete server_side_cursor; + server_side_cursor= 0; +} + + +int +sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars) +{ + if (! server_side_cursor) + { + my_message(ER_SP_CURSOR_NOT_OPEN, ER(ER_SP_CURSOR_NOT_OPEN), MYF(0)); + return -1; + } + if (vars->elements != result.get_field_count()) + { + my_message(ER_SP_WRONG_NO_OF_FETCH_ARGS, + ER(ER_SP_WRONG_NO_OF_FETCH_ARGS), MYF(0)); + return -1; + } + + result.set_spvar_list(vars); + + /* Attempt to fetch one row */ + if (server_side_cursor->is_open()) + server_side_cursor->fetch(1); + + /* + If the cursor was pointing after the last row, the fetch will + close it instead of sending any rows. + */ + if (! server_side_cursor->is_open()) + { + my_message(ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA), MYF(0)); + return -1; + } + + return 0; +} + + +/* + Create an instance of appropriate Item_cache class depending on the + specified type in the callers arena. + + SYNOPSIS + thd thread handler + result_type type of the expression + + RETURN + Pointer to valid object on success + NULL on error + + NOTE + We should create cache items in the callers arena, as they are used + between in several instructions. +*/ + +Item_cache * +sp_rcontext::create_case_expr_holder(THD *thd, Item_result result_type) +{ + Item_cache *holder; + Query_arena current_arena; + + thd->set_n_backup_active_arena(thd->spcont->callers_arena, ¤t_arena); + + holder= Item_cache::get_cache(result_type); + + thd->restore_active_arena(thd->spcont->callers_arena, ¤t_arena); + + return holder; +} + + +/* + Set CASE expression to the specified value. + + SYNOPSIS + thd thread handler + case_expr_id identifier of the CASE expression + case_expr_item a value of the CASE expression + + RETURN + FALSE on success + TRUE on error + + NOTE + The idea is to reuse Item_cache for the expression of the one CASE + statement. This optimization takes place when there is CASE statement + inside of a loop. So, in other words, we will use the same object on each + iteration instead of creating a new one for each iteration. + + TODO + Hypothetically, a type of CASE expression can be different for each + iteration. For instance, this can happen if the expression contains a + session variable (something like @@VAR) and its type is changed from one + iteration to another. + + In order to cope with this problem, we check type each time, when we use + already created object. If the type does not match, we re-create Item. + This also can (should?) be optimized. +*/ + +int +sp_rcontext::set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr) +{ + Item *case_expr_item= sp_prepare_func_item(thd, case_expr_item_ptr); + if (!case_expr_item) + return TRUE; + + if (!m_case_expr_holders[case_expr_id] || + m_case_expr_holders[case_expr_id]->result_type() != + case_expr_item->result_type()) + { + m_case_expr_holders[case_expr_id]= + create_case_expr_holder(thd, case_expr_item->result_type()); + } + + m_case_expr_holders[case_expr_id]->store(case_expr_item); + + return FALSE; +} + + +Item * +sp_rcontext::get_case_expr(int case_expr_id) +{ + return m_case_expr_holders[case_expr_id]; +} + + +Item ** +sp_rcontext::get_case_expr_addr(int case_expr_id) +{ + return (Item**) m_case_expr_holders + case_expr_id; +} + + +/*************************************************************************** + Select_fetch_into_spvars +****************************************************************************/ + +int Select_fetch_into_spvars::prepare(List<Item> &fields, SELECT_LEX_UNIT *u) +{ + /* + Cache the number of columns in the result set in order to easily + return an error if column count does not match value count. + */ + field_count= fields.elements; + return select_result_interceptor::prepare(fields, u); +} + + +bool Select_fetch_into_spvars::send_data(List<Item> &items) +{ + List_iterator_fast<struct sp_variable> spvar_iter(*spvar_list); + List_iterator_fast<Item> item_iter(items); + sp_variable_t *spvar; + Item *item; + + /* Must be ensured by the caller */ + DBUG_ASSERT(spvar_list->elements == items.elements); + + /* + Assign the row fetched from a server side cursor to stored + procedure variables. + */ + for (; spvar= spvar_iter++, item= item_iter++; ) + { + if (thd->spcont->set_variable(thd, spvar->offset, &item)) + return TRUE; + } + return FALSE; +} diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h new file mode 100644 index 00000000000..fbf479f52aa --- /dev/null +++ b/sql/sp_rcontext.h @@ -0,0 +1,334 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_RCONTEXT_H_ +#define _SP_RCONTEXT_H_ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class implementation */ +#endif + +struct sp_cond_type; +class sp_cursor; +struct sp_variable; +class sp_lex_keeper; +class sp_instr_cpush; + +#define SP_HANDLER_NONE 0 +#define SP_HANDLER_EXIT 1 +#define SP_HANDLER_CONTINUE 2 +#define SP_HANDLER_UNDO 3 + +typedef struct +{ + struct sp_cond_type *cond; + uint handler; // Location of handler + int type; + uint foffset; // Frame offset for the handlers declare level +} sp_handler_t; + + +/* + This class is a runtime context of a Stored Routine. It is used in an + execution and is intended to contain all dynamic objects (i.e. objects, which + can be changed during execution), such as: + - stored routine variables; + - cursors; + - handlers; + + Runtime context is used with sp_head class. sp_head class is intended to + contain all static things, related to the stored routines (code, for example). + sp_head instance creates runtime context for the execution of a stored + routine. + + There is a parsing context (an instance of sp_pcontext class), which is used + on parsing stage. However, now it contains some necessary for an execution + things, such as definition of used stored routine variables. That's why + runtime context needs a reference to the parsing context. +*/ + +class sp_rcontext : public Sql_alloc +{ + sp_rcontext(const sp_rcontext &); /* Prevent use of these */ + void operator=(sp_rcontext &); + + public: + + /* + Arena used to (re) allocate items on . E.g. reallocate INOUT/OUT + SP parameters when they don't fit into prealloced items. This + is common situation with String items. It is used mainly in + sp_eval_func_item(). + */ + Query_arena *callers_arena; + +#ifndef DBUG_OFF + /* + The routine for which this runtime context is created. Used for checking + if correct runtime context is used for variable handling. + */ + sp_head *sp; +#endif + + sp_rcontext(sp_pcontext *root_parsing_ctx, Field *return_value_fld, + sp_rcontext *prev_runtime_ctx); + bool init(THD *thd); + + ~sp_rcontext(); + + int + set_variable(THD *thd, uint var_idx, Item **value); + + Item * + get_item(uint var_idx); + + Item ** + get_item_addr(uint var_idx); + + bool + set_return_value(THD *thd, Item **return_value_item); + + inline bool + is_return_value_set() const + { + return m_return_value_set; + } + + inline void + push_handler(struct sp_cond_type *cond, uint h, int type, uint f) + { + m_handler[m_hcount].cond= cond; + m_handler[m_hcount].handler= h; + m_handler[m_hcount].type= type; + m_handler[m_hcount].foffset= f; + m_hcount+= 1; + } + + inline void + pop_handlers(uint count) + { + m_hcount-= count; + } + + // Returns 1 if a handler was found, 0 otherwise. + bool + find_handler(uint sql_errno,MYSQL_ERROR::enum_warning_level level); + + // If there is an error handler for this error, handle it and return TRUE. + bool + handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level, + THD *thd); + + // Returns handler type and sets *ip to location if one was found + inline int + found_handler(uint *ip, uint *fp) + { + if (m_hfound < 0) + return SP_HANDLER_NONE; + *ip= m_handler[m_hfound].handler; + *fp= m_handler[m_hfound].foffset; + return m_handler[m_hfound].type; + } + + // Returns true if we found a handler in this context + inline bool + found_handler_here() + { + return (m_hfound >= 0); + } + + // Clears the handler find state + inline void + clear_handler() + { + m_hfound= -1; + } + + inline void + push_hstack(uint h) + { + m_hstack[m_hsp++]= h; + } + + inline uint + pop_hstack() + { + return m_hstack[--m_hsp]; + } + + inline void + enter_handler(int hid) + { + m_in_handler[m_ihsp++]= hid; + } + + inline void + exit_handler() + { + m_ihsp-= 1; + } + + void + push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i); + + void + pop_cursors(uint count); + + void + pop_all_cursors() + { + pop_cursors(m_ccount); + } + + inline sp_cursor * + get_cursor(uint i) + { + return m_cstack[i]; + } + + /* + CASE expressions support. + */ + + int + set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr); + + Item * + get_case_expr(int case_expr_id); + + Item ** + get_case_expr_addr(int case_expr_id); + +private: + sp_pcontext *m_root_parsing_ctx; + + /* Virtual table for storing variables. */ + TABLE *m_var_table; + + /* + Collection of Item_field proxies, each of them points to the corresponding + field in m_var_table. + */ + Item **m_var_items; + + /* + This is a pointer to a field, which should contain return value for stored + functions (only). For stored procedures, this pointer is NULL. + */ + Field *m_return_value_fld; + + /* + Indicates whether the return value (in m_return_value_fld) has been set + during execution. + */ + bool m_return_value_set; + + sp_handler_t *m_handler; // Visible handlers + uint m_hcount; // Stack pointer for m_handler + uint *m_hstack; // Return stack for continue handlers + uint m_hsp; // Stack pointer for m_hstack + uint *m_in_handler; // Active handler, for recursion check + uint m_ihsp; // Stack pointer for m_in_handler + int m_hfound; // Set by find_handler; -1 if not found + + sp_cursor **m_cstack; + uint m_ccount; + + Item_cache **m_case_expr_holders; + + /* Previous runtime context (NULL if none) */ + sp_rcontext *m_prev_runtime_ctx; + +private: + bool init_var_table(THD *thd); + bool init_var_items(); + + Item_cache *create_case_expr_holder(THD *thd, Item_result result_type); + + int set_variable(THD *thd, Field *field, Item **value); +}; // class sp_rcontext : public Sql_alloc + + +/* + An interceptor of cursor result set used to implement + FETCH <cname> INTO <varlist>. +*/ + +class Select_fetch_into_spvars: public select_result_interceptor +{ + List<struct sp_variable> *spvar_list; + uint field_count; +public: + Select_fetch_into_spvars() {} /* Remove gcc warning */ + uint get_field_count() { return field_count; } + void set_spvar_list(List<struct sp_variable> *vars) { spvar_list= vars; } + + virtual bool send_eof() { return FALSE; } + virtual bool send_data(List<Item> &items); + virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u); +}; + + +/* A mediator between stored procedures and server side cursors */ + +class sp_cursor : public Sql_alloc +{ +public: + + sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i); + + virtual ~sp_cursor() + { + destroy(); + } + + sp_lex_keeper * + get_lex_keeper() { return m_lex_keeper; } + + int + open(THD *thd); + + int + close(THD *thd); + + inline my_bool + is_open() + { + return test(server_side_cursor); + } + + int + fetch(THD *, List<struct sp_variable> *vars); + + inline sp_instr_cpush * + get_instr() + { + return m_i; + } + +private: + + Select_fetch_into_spvars result; + sp_lex_keeper *m_lex_keeper; + Server_side_cursor *server_side_cursor; + sp_instr_cpush *m_i; // My push instruction + void + destroy(); + +}; // class sp_cursor : public Sql_alloc + +#endif /* _SP_RCONTEXT_H_ */ diff --git a/sql/spatial.cc b/sql/spatial.cc index 4e17e766090..939e7d2a3b4 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,13 +21,15 @@ /***************************** Gis_class_info *******************************/ -Geometry::Class_info *Geometry::ci_collection[Geometry::wkb_end+1]= +String Geometry::bad_geometry_data("Bad object", &my_charset_bin); + +Geometry::Class_info *Geometry::ci_collection[Geometry::wkb_last+1]= { NULL, NULL, NULL, NULL, NULL, NULL, NULL }; static Geometry::Class_info **ci_collection_end= - Geometry::ci_collection+Geometry::wkb_end + 1; + Geometry::ci_collection+Geometry::wkb_last + 1; Geometry::Class_info::Class_info(const char *name, int type_id, void(*create_func)(void *)): @@ -176,7 +177,9 @@ static double wkb_get_double(const char *ptr, Geometry::wkbByteOrder bo) { double res; if (bo != Geometry::wkb_xdr) + { float8get(res, ptr); + } else { char inv_array[8]; @@ -547,7 +550,7 @@ bool Gis_line_string::get_mbr(MBR *mbr, const char **end) const } -int Gis_line_string::length(double *len) const +int Gis_line_string::geom_length(double *len) const { uint32 n_points; double prev_x, prev_y; @@ -943,15 +946,14 @@ int Gis_polygon::centroid_xy(double *x, double *y) const while (--n_points) // One point is already read { - double x, y; - get_point(&x, &y, data); + double tmp_x, tmp_y; + get_point(&tmp_x, &tmp_y, data); data+= (SIZEOF_STORED_DOUBLE*2); - /* QQ: Is the following prev_x+x right ? */ - cur_area+= (prev_x + x) * (prev_y - y); - cur_cx+= x; - cur_cy+= y; - prev_x= x; - prev_y= y; + cur_area+= (prev_x + tmp_x) * (prev_y - tmp_y); + cur_cx+= tmp_x; + cur_cy+= tmp_y; + prev_x= tmp_x; + prev_y= tmp_y; } cur_area= fabs(cur_area) / 2; cur_cx= cur_cx / (org_n_points - 1); @@ -1295,7 +1297,7 @@ int Gis_multi_line_string::geometry_n(uint32 num, String *result) const } -int Gis_multi_line_string::length(double *len) const +int Gis_multi_line_string::geom_length(double *len) const { uint32 n_line_strings; const char *data= m_data; @@ -1312,7 +1314,7 @@ int Gis_multi_line_string::length(double *len) const Gis_line_string ls; data+= WKB_HEADER_SIZE; ls.set_data_ptr(data, (uint32) (m_data_end - data)); - if (ls.length(&ls_len)) + if (ls.geom_length(&ls_len)) return 1; *len+= ls_len; /* @@ -1790,7 +1792,7 @@ bool Gis_geometry_collection::get_data_as_wkt(String *txt, geom->set_data_ptr(data, (uint) (m_data_end - data)); if (geom->as_wkt(txt, &data)) return 1; - if (txt->append(",", 1, 512)) + if (txt->append(STRING_WITH_LEN(","), 512)) return 1; } txt->length(txt->length() - 1); diff --git a/sql/spatial.h b/sql/spatial.h index 553544d4c3f..86232fcd524 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2002-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -165,8 +164,8 @@ struct Geometry_buffer; class Geometry { public: - Geometry() {} /* remove gcc warning */ - virtual ~Geometry() {} /* remove gcc warning */ + Geometry() {} /* Remove gcc warning */ + virtual ~Geometry() {} /* Remove gcc warning */ static void *operator new(size_t size, void *buffer) { return buffer; @@ -175,7 +174,10 @@ public: static void operator delete(void *ptr, void *buffer) {} - static void operator delete(void *buffer) {} /* remove gcc warning */ + static void operator delete(void *buffer) + {} + + static String bad_geometry_data; enum wkbType { @@ -186,7 +188,7 @@ public: wkb_multilinestring= 5, wkb_multipolygon= 6, wkb_geometrycollection= 7, - wkb_end=7 + wkb_last=7 }; enum wkbByteOrder { @@ -215,7 +217,7 @@ public: virtual bool dimension(uint32 *dim, const char **end) const=0; virtual int get_x(double *x) const { return -1; } virtual int get_y(double *y) const { return -1; } - virtual int length(double *len) const { return -1; } + virtual int geom_length(double *len) const { return -1; } virtual int area(double *ar, const char **end) const { return -1;} virtual int is_closed(int *closed) const { return -1; } virtual int num_interior_ring(uint32 *n_int_rings) const { return -1; } @@ -271,12 +273,12 @@ public: } bool envelope(String *result) const; - static Class_info *ci_collection[wkb_end+1]; + static Class_info *ci_collection[wkb_last+1]; protected: static Class_info *find_class(int type_id) { - return ((type_id < wkb_point) || (type_id > wkb_end)) ? + return ((type_id < wkb_point) || (type_id > wkb_last)) ? NULL : ci_collection[type_id]; } static Class_info *find_class(const char *name, uint32 len); @@ -301,6 +303,8 @@ protected: class Gis_point: public Geometry { public: + Gis_point() {} /* Remove gcc warning */ + virtual ~Gis_point() {} /* Remove gcc warning */ uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); @@ -348,12 +352,14 @@ public: class Gis_line_string: public Geometry { public: + Gis_line_string() {} /* Remove gcc warning */ + virtual ~Gis_line_string() {} /* Remove gcc warning */ uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); bool get_data_as_wkt(String *txt, const char **end) const; bool get_mbr(MBR *mbr, const char **end) const; - int length(double *len) const; + int geom_length(double *len) const; int is_closed(int *closed) const; int num_points(uint32 *n_points) const; int start_point(String *point) const; @@ -374,6 +380,8 @@ public: class Gis_polygon: public Geometry { public: + Gis_polygon() {} /* Remove gcc warning */ + virtual ~Gis_polygon() {} /* Remove gcc warning */ uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); @@ -400,6 +408,8 @@ public: class Gis_multi_point: public Geometry { public: + Gis_multi_point() {} /* Remove gcc warning */ + virtual ~Gis_multi_point() {} /* Remove gcc warning */ uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); @@ -422,6 +432,8 @@ public: class Gis_multi_line_string: public Geometry { public: + Gis_multi_line_string() {} /* Remove gcc warning */ + virtual ~Gis_multi_line_string() {} /* Remove gcc warning */ uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); @@ -429,7 +441,7 @@ public: bool get_mbr(MBR *mbr, const char **end) const; int num_geometries(uint32 *num) const; int geometry_n(uint32 num, String *result) const; - int length(double *len) const; + int geom_length(double *len) const; int is_closed(int *closed) const; bool dimension(uint32 *dim, const char **end) const { @@ -446,6 +458,8 @@ public: class Gis_multi_polygon: public Geometry { public: + Gis_multi_polygon() {} /* Remove gcc warning */ + virtual ~Gis_multi_polygon() {} /* Remove gcc warning */ uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); @@ -470,6 +484,8 @@ public: class Gis_geometry_collection: public Geometry { public: + Gis_geometry_collection() {} /* Remove gcc warning */ + virtual ~Gis_geometry_collection() {} /* Remove gcc warning */ uint32 get_data_size() const; bool init_from_wkt(Gis_read_stream *trs, String *wkb); uint init_from_wkb(const char *wkb, uint len, wkbByteOrder bo, String *res); diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 6cbe6554235..ee15f95f305 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -17,8 +16,8 @@ /* The privileges are saved in the following tables: - mysql/user ; super user who are allowed to do almoust anything - mysql/host ; host priviliges. This is used if host is empty in mysql/db. + mysql/user ; super user who are allowed to do almost anything + mysql/host ; host privileges. This is used if host is empty in mysql/db. mysql/db ; database privileges / user data in tables is sorted according to how many not-wild-cards there is @@ -32,6 +31,8 @@ #endif #include <m_ctype.h> #include <stdarg.h> +#include "sp_head.h" +#include "sp.h" #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -58,15 +59,15 @@ static DYNAMIC_ARRAY acl_hosts,acl_users,acl_dbs; static MEM_ROOT mem, memex; static bool initialized=0; static bool allow_all_hosts=1; -static HASH acl_check_hosts, column_priv_hash; +static HASH acl_check_hosts, column_priv_hash, proc_priv_hash, func_priv_hash; static DYNAMIC_ARRAY acl_wild_hosts; static hash_filo *acl_cache; -static uint grant_version=0; -static uint priv_version=0; /* Version of priv tables. incremented by acl_load */ +static uint grant_version=0; /* Version of priv tables. incremented by acl_load */ static ulong get_access(TABLE *form,uint fieldnr, uint *next_field=0); static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b); static ulong get_sort(uint count,...); static void init_check_host(void); +static void rebuild_check_host(void); static ACL_USER *find_acl_user(const char *host, const char *user, my_bool exact); static bool update_user_table(THD *thd, TABLE *table, @@ -159,6 +160,7 @@ my_bool acl_init(bool dont_read_acl_tables) */ if (!(thd=new THD)) DBUG_RETURN(1); /* purecov: inspected */ + thd->thread_stack= (char*) &thd; thd->store_globals(); /* It is safe to call acl_reload() since acl_* arrays and hashes which @@ -198,7 +200,7 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) int password_length; DBUG_ENTER("acl_load"); - priv_version++; /* Privileges updated */ + grant_version++; /* Privileges updated */ acl_cache->clear(1); // Clear locked hostname cache @@ -213,19 +215,18 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) if (lower_case_table_names && host.db) { /* - We make a temporary copy of the database, force it to lower case, - and then check it against the original name. + convert db to lower case and give a warning if the db wasn't + already in lower case */ - (void)strnmov(tmp_name, host.db, sizeof(tmp_name)); + (void) strmov(tmp_name, host.db); my_casedn_str(files_charset_info, host.db); if (strcmp(host.db, tmp_name) != 0) - { sql_print_warning("'host' entry '%s|%s' had database in mixed " "case that has been forced to lowercase because " "lower_case_table_names is set. It will not be " "possible to remove this privilege using REVOKE.", - host.host.hostname, host.db); - } + host.host.hostname ? host.host.hostname : "", + host.db ? host.db : ""); } host.access= get_access(table,2); host.access= fix_rights_for_db(host.access); @@ -234,11 +235,12 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) { sql_print_warning("'host' entry '%s|%s' " "ignored in --skip-name-resolve mode.", - host.host.hostname, host.db?host.db:""); + host.host.hostname ? host.host.hostname : "", + host.db ? host.db : ""); continue; } #ifndef TO_BE_REMOVED - if (table->fields == 8) + if (table->s->fields == 8) { // Without grant if (host.access & CREATE_ACL) host.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL; @@ -263,8 +265,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) } DBUG_PRINT("info",("user table fields: %d, password length: %d", - table->fields, password_length)); - + table->s->fields, password_length)); + pthread_mutex_lock(&LOCK_global_system_variables); if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH) { @@ -304,7 +306,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) { sql_print_warning("'user' entry '%s@%s' " "ignored in --skip-name-resolve mode.", - user.user, user.host.hostname); + user.user ? user.user : "", + user.host.hostname ? user.host.hostname : ""); continue; } @@ -332,10 +335,34 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) { uint next_field; user.access= get_access(table,3,&next_field) & GLOBAL_ACLS; + /* + if it is pre 5.0.1 privilege table then map CREATE privilege on + CREATE VIEW & SHOW VIEW privileges + */ + if (table->s->fields <= 31 && (user.access & CREATE_ACL)) + user.access|= (CREATE_VIEW_ACL | SHOW_VIEW_ACL); + + /* + if it is pre 5.0.2 privilege table then map CREATE/ALTER privilege on + CREATE PROCEDURE & ALTER PROCEDURE privileges + */ + if (table->s->fields <= 33 && (user.access & CREATE_ACL)) + user.access|= CREATE_PROC_ACL; + if (table->s->fields <= 33 && (user.access & ALTER_ACL)) + user.access|= ALTER_PROC_ACL; + + /* + pre 5.0.3 did not have CREATE_USER_ACL + */ + if (table->s->fields <= 36 && (user.access & GRANT_ACL)) + user.access|= CREATE_USER_ACL; + user.sort= get_sort(2,user.host.hostname,user.user); user.hostname_length= (user.host.hostname ? (uint) strlen(user.host.hostname) : 0); - if (table->fields >= 31) /* Starting from 4.0.2 we have more fields */ + + /* Starting from 4.0.2 we have more fields */ + if (table->s->fields >= 31) { char *ssl_type=get_field(&mem, table->field[next_field++]); if (!ssl_type) @@ -356,17 +383,26 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) ptr = get_field(&mem, table->field[next_field++]); user.user_resource.updates=ptr ? atoi(ptr) : 0; ptr = get_field(&mem, table->field[next_field++]); - user.user_resource.connections=ptr ? atoi(ptr) : 0; + user.user_resource.conn_per_hour= ptr ? atoi(ptr) : 0; if (user.user_resource.questions || user.user_resource.updates || - user.user_resource.connections) + user.user_resource.conn_per_hour) mqh_used=1; + + if (table->s->fields >= 36) + { + /* Starting from 5.0.3 we have max_user_connections field */ + ptr= get_field(&mem, table->field[next_field++]); + user.user_resource.user_conn= ptr ? atoi(ptr) : 0; + } + else + user.user_resource.user_conn= 0; } else { user.ssl_type=SSL_TYPE_NONE; bzero((char *)&(user.user_resource),sizeof(user.user_resource)); #ifndef TO_BE_REMOVED - if (table->fields <= 13) + if (table->s->fields <= 13) { // Without grant if (user.access & CREATE_ACL) user.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL; @@ -380,8 +416,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) #endif } VOID(push_dynamic(&acl_users,(gptr) &user)); - if (!user.host.hostname || user.host.hostname[0] == wild_many && - !user.host.hostname[1]) + if (!user.host.hostname || + (user.host.hostname[0] == wild_many && !user.host.hostname[1])) allow_all_hosts=1; // Anyone can connect } } @@ -407,7 +443,9 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) { sql_print_warning("'db' entry '%s %s@%s' " "ignored in --skip-name-resolve mode.", - db.db, db.user, db.host.hostname); + db.db, + db.user ? db.user : "", + db.host.hostname ? db.host.hostname : ""); continue; } db.access=get_access(table,3); @@ -415,10 +453,10 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) if (lower_case_table_names) { /* - We make a temporary copy of the database, force it to lower case, - and then check it against the original name. + convert db to lower case and give a warning if the db wasn't + already in lower case */ - (void)strnmov(tmp_name, db.db, sizeof(tmp_name)); + (void)strmov(tmp_name, db.db); my_casedn_str(files_charset_info, db.db); if (strcmp(db.db, tmp_name) != 0) { @@ -426,12 +464,14 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) "case that has been forced to lowercase because " "lower_case_table_names is set. It will not be " "possible to remove this privilege using REVOKE.", - db.db, db.user, db.host.hostname); + db.db, + db.user ? db.user : "", + db.host.hostname ? db.host.hostname : ""); } } db.sort=get_sort(3,db.host.hostname,db.db,db.user); #ifndef TO_BE_REMOVED - if (table->fields <= 9) + if (table->s->fields <= 9) { // Without grant if (db.access & CREATE_ACL) db.access|=REFERENCES_ACL | INDEX_ACL | ALTER_ACL; @@ -511,12 +551,12 @@ my_bool acl_reload(THD *thd) obtaining acl_cache->lock mutex. */ bzero((char*) tables, sizeof(tables)); - tables[0].alias=tables[0].real_name=(char*) "host"; - tables[1].alias=tables[1].real_name=(char*) "user"; - tables[2].alias=tables[2].real_name=(char*) "db"; - tables[0].db=tables[1].db=tables[2].db= (char*) "mysql"; - tables[0].next= tables+1; - tables[1].next= tables+2; + tables[0].alias= tables[0].table_name= (char*) "host"; + tables[1].alias= tables[1].table_name= (char*) "user"; + tables[2].alias= tables[2].table_name= (char*) "db"; + tables[0].db=tables[1].db=tables[2].db=(char*) "mysql"; + tables[0].next_local= tables[0].next_global= tables+1; + tables[1].next_local= tables[1].next_global= tables+2; tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; if (simple_open_n_lock_tables(thd, tables)) @@ -565,8 +605,8 @@ end: Get all access bits from table after fieldnr IMPLEMENTATION - We know that the access privileges ends when there is no more fields - or the field is not an enum with two elements. + We know that the access privileges ends when there is no more fields + or the field is not an enum with two elements. SYNOPSIS get_access() @@ -665,11 +705,11 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b) SYNOPSIS acl_getroot() thd thread handle. If all checks are OK, - thd->priv_user, thd->master_access are updated. - thd->host, thd->ip, thd->user are used for checks. + thd->security_ctx->priv_user/master_access are updated. + thd->security_ctx->host/ip/user are used for checks. mqh user resources; on success mqh is reset, else unchanged - passwd scrambled & crypted password, recieved from client + passwd scrambled & crypted password, received from client (to check): thd->scramble or thd->scramble_323 is used to decrypt passwd, so they must contain original random string, @@ -680,7 +720,7 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b) RETURN VALUE 0 success: thd->priv_user, thd->priv_host, thd->master_access, mqh are updated - 1 user not found or authentification failure + 1 user not found or authentication failure 2 user found, has long (4.1.1) salt, but passwd is in old (3.23) format. -1 user found, has short (3.23) salt, but passwd is in new (4.1.1) format. */ @@ -691,6 +731,7 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, ulong user_access= NO_ACCESS; int res= 1; ACL_USER *acl_user= 0; + Security_context *sctx= thd->security_ctx; DBUG_ENTER("acl_getroot"); if (!initialized) @@ -698,9 +739,7 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, /* here if mysqld's been started with --skip-grant-tables option. */ - thd->priv_user= (char *) ""; // privileges for - *thd->priv_host= '\0'; // the user are unknown - thd->master_access= ~NO_ACCESS; // everything is allowed + sctx->skip_grants(); bzero((char*) mqh, sizeof(*mqh)); DBUG_RETURN(0); } @@ -716,9 +755,9 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, for (uint i=0 ; i < acl_users.elements ; i++) { ACL_USER *acl_user_tmp= dynamic_element(&acl_users,i,ACL_USER*); - if (!acl_user_tmp->user || !strcmp(thd->user, acl_user_tmp->user)) + if (!acl_user_tmp->user || !strcmp(sctx->user, acl_user_tmp->user)) { - if (compare_hostname(&acl_user_tmp->host, thd->host, thd->ip)) + if (compare_hostname(&acl_user_tmp->host, sctx->host, sctx->ip)) { /* check password: it should be empty or valid */ if (passwd_len == acl_user_tmp->salt_len) @@ -820,12 +859,12 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, break; } DBUG_PRINT("info",("checkpoint 2")); - /* If X509 issuer is speified, we check it... */ + /* If X509 issuer is specified, we check it... */ if (acl_user->x509_issuer) { DBUG_PRINT("info",("checkpoint 3")); - char *ptr = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0); - DBUG_PRINT("info",("comparing issuers: '%s' and '%s'", + char *ptr = X509_NAME_oneline(X509_get_issuer_name(cert), 0, 0); + DBUG_PRINT("info",("comparing issuers: '%s' and '%s'", acl_user->x509_issuer, ptr)); if (strcmp(acl_user->x509_issuer, ptr)) { @@ -833,6 +872,7 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, sql_print_information("X509 issuer mismatch: should be '%s' " "but is '%s'", acl_user->x509_issuer, ptr); free(ptr); + user_access=NO_ACCESS; break; } user_access= acl_user->access; @@ -848,11 +888,13 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, if (strcmp(acl_user->x509_subject,ptr)) { if (global_system_variables.log_warnings) - sql_print_information("X509 subject mismatch: '%s' vs '%s'", + sql_print_information("X509 subject mismatch: should be '%s' but is '%s'", acl_user->x509_subject, ptr); + free(ptr); + user_access=NO_ACCESS; + break; } - else - user_access= acl_user->access; + user_access= acl_user->access; free(ptr); } break; @@ -865,20 +907,119 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, break; #endif /* HAVE_OPENSSL */ } - thd->master_access= user_access; - thd->priv_user= acl_user->user ? thd->user : (char *) ""; + sctx->master_access= user_access; + sctx->priv_user= acl_user->user ? sctx->user : (char *) ""; *mqh= acl_user->user_resource; if (acl_user->host.hostname) - strmake(thd->priv_host, acl_user->host.hostname, MAX_HOSTNAME); + strmake(sctx->priv_host, acl_user->host.hostname, MAX_HOSTNAME); else - *thd->priv_host= 0; + *sctx->priv_host= 0; } VOID(pthread_mutex_unlock(&acl_cache->lock)); DBUG_RETURN(res); } +/* + This is like acl_getroot() above, but it doesn't check password, + and we don't care about the user resources. + + SYNOPSIS + acl_getroot_no_password() + sctx Context which should be initialized + user user name + host host name + ip IP + db current data base name + + RETURN + FALSE OK + TRUE Error +*/ + +bool acl_getroot_no_password(Security_context *sctx, char *user, char *host, + char *ip, char *db) +{ + int res= 1; + uint i; + ACL_USER *acl_user= 0; + DBUG_ENTER("acl_getroot_no_password"); + + DBUG_PRINT("enter", ("Host: '%s', Ip: '%s', User: '%s', db: '%s'", + (host ? host : "(NULL)"), (ip ? ip : "(NULL)"), + user, (db ? db : "(NULL)"))); + sctx->user= user; + sctx->host= host; + sctx->ip= ip; + sctx->host_or_ip= host ? host : (ip ? ip : ""); + + if (!initialized) + { + /* + here if mysqld's been started with --skip-grant-tables option. + */ + sctx->skip_grants(); + DBUG_RETURN(FALSE); + } + + VOID(pthread_mutex_lock(&acl_cache->lock)); + + sctx->master_access= 0; + sctx->db_access= 0; + sctx->priv_user= (char *) ""; + *sctx->priv_host= 0; + + /* + Find acl entry in user database. + This is specially tailored to suit the check we do for CALL of + a stored procedure; user is set to what is actually a + priv_user, which can be ''. + */ + for (i=0 ; i < acl_users.elements ; i++) + { + acl_user= dynamic_element(&acl_users,i,ACL_USER*); + if ((!acl_user->user && !user[0]) || + (acl_user->user && strcmp(user, acl_user->user) == 0)) + { + if (compare_hostname(&acl_user->host, host, ip)) + { + res= 0; + break; + } + } + } + + if (acl_user) + { + for (i=0 ; i < acl_dbs.elements ; i++) + { + ACL_DB *acl_db= dynamic_element(&acl_dbs, i, ACL_DB*); + if (!acl_db->user || + (user && user[0] && !strcmp(user, acl_db->user))) + { + if (compare_hostname(&acl_db->host, host, ip)) + { + if (!acl_db->db || (db && !wild_compare(db, acl_db->db, 0))) + { + sctx->db_access= acl_db->access; + break; + } + } + } + } + sctx->master_access= acl_user->access; + sctx->priv_user= acl_user->user ? user : (char *) ""; + + if (acl_user->host.hostname) + strmake(sctx->priv_host, acl_user->host.hostname, MAX_HOSTNAME); + else + *sctx->priv_host= 0; + } + VOID(pthread_mutex_unlock(&acl_cache->lock)); + DBUG_RETURN(res); +} + static byte* check_get_key(ACL_USER *buff,uint *length, my_bool not_used __attribute__((unused))) { @@ -902,20 +1043,21 @@ static void acl_update_user(const char *user, const char *host, { ACL_USER *acl_user=dynamic_element(&acl_users,i,ACL_USER*); if (!acl_user->user && !user[0] || - acl_user->user && - !strcmp(user,acl_user->user)) + acl_user->user && !strcmp(user,acl_user->user)) { if (!acl_user->host.hostname && !host[0] || acl_user->host.hostname && !my_strcasecmp(system_charset_info, host, acl_user->host.hostname)) { acl_user->access=privileges; - if (mqh->bits & 1) + if (mqh->specified_limits & USER_RESOURCES::QUERIES_PER_HOUR) acl_user->user_resource.questions=mqh->questions; - if (mqh->bits & 2) + if (mqh->specified_limits & USER_RESOURCES::UPDATES_PER_HOUR) acl_user->user_resource.updates=mqh->updates; - if (mqh->bits & 4) - acl_user->user_resource.connections=mqh->connections; + if (mqh->specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR) + acl_user->user_resource.conn_per_hour= mqh->conn_per_hour; + if (mqh->specified_limits & USER_RESOURCES::USER_CONNECTIONS) + acl_user->user_resource.user_conn= mqh->user_conn; if (ssl_type != SSL_TYPE_NOT_SPECIFIED) { acl_user->ssl_type= ssl_type; @@ -964,16 +1106,14 @@ static void acl_insert_user(const char *user, const char *host, set_user_salt(&acl_user, password, password_len); VOID(push_dynamic(&acl_users,(gptr) &acl_user)); - if (!acl_user.host.hostname || acl_user.host.hostname[0] == wild_many - && !acl_user.host.hostname[1]) + if (!acl_user.host.hostname || + (acl_user.host.hostname[0] == wild_many && !acl_user.host.hostname[1])) allow_all_hosts=1; // Anyone can connect /* purecov: tested */ qsort((gptr) dynamic_element(&acl_users,0,ACL_USER*),acl_users.elements, sizeof(ACL_USER),(qsort_cmp) acl_compare); - /* We must free acl_check_hosts as its memory is mapped to acl_user */ - delete_dynamic(&acl_wild_hosts); - hash_free(&acl_check_hosts); - init_check_host(); + /* Rebuild 'acl_check_hosts' since 'acl_users' has been modified */ + rebuild_check_host(); } @@ -1027,7 +1167,7 @@ static void acl_insert_db(const char *user, const char *host, const char *db, ACL_DB acl_db; safe_mutex_assert_owner(&acl_cache->lock); acl_db.user=strdup_root(&mem,user); - update_hostname(&acl_db.host,strdup_root(&mem,host)); + update_hostname(&acl_db.host, *host ? strdup_root(&mem,host) : 0); acl_db.db=strdup_root(&mem,db); acl_db.access=privileges; acl_db.sort=get_sort(3,acl_db.host.hostname,acl_db.db,acl_db.user); @@ -1048,7 +1188,7 @@ static void acl_insert_db(const char *user, const char *host, const char *db, ulong acl_get(const char *host, const char *ip, const char *user, const char *db, my_bool db_is_pattern) { - ulong host_access= ~(ulong)0,db_access= 0; + ulong host_access= ~(ulong)0, db_access= 0; uint i,key_length; char key[ACL_KEY_LENGTH],*tmp_db,*end; acl_entry *entry; @@ -1177,6 +1317,22 @@ static void init_check_host(void) } +/* + Rebuild lists used for checking of allowed hosts + + We need to rebuild 'acl_check_hosts' and 'acl_wild_hosts' after adding, + dropping or renaming user, since they contain pointers to elements of + 'acl_user' array, which are invalidated by drop operation, and use + ACL_USER::host::hostname as a key, which is changed by rename. +*/ +void rebuild_check_host(void) +{ + delete_dynamic(&acl_wild_hosts); + hash_free(&acl_check_hosts); + init_check_host(); +} + + /* Return true if there is no users that can match the given host */ bool acl_check_host(const char *host, const char *ip) @@ -1228,29 +1384,28 @@ bool check_change_password(THD *thd, const char *host, const char *user, { if (!initialized) { - net_printf(thd,ER_OPTION_PREVENTS_STATEMENT, - "--skip-grant-tables"); + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); return(1); } if (!thd->slave_thread && - (strcmp(thd->user,user) || - my_strcasecmp(system_charset_info, host, thd->priv_host))) + (strcmp(thd->security_ctx->user, user) || + my_strcasecmp(system_charset_info, host, + thd->security_ctx->priv_host))) { - if (check_access(thd, UPDATE_ACL, "mysql",0,1,0)) + if (check_access(thd, UPDATE_ACL, "mysql",0,1,0,0)) return(1); } - if (!thd->slave_thread && !thd->user[0]) + if (!thd->slave_thread && !thd->security_ctx->user[0]) { - send_error(thd, ER_PASSWORD_ANONYMOUS_USER); + my_message(ER_PASSWORD_ANONYMOUS_USER, ER(ER_PASSWORD_ANONYMOUS_USER), + MYF(0)); return(1); } uint len=strlen(new_password); if (len && len != SCRAMBLED_PASSWORD_CHAR_LENGTH && len != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) { - net_printf(thd, 0, - "Password hash should be a %d-digit hexadecimal number", - SCRAMBLED_PASSWORD_CHAR_LENGTH); + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); return -1; } return(0); @@ -1291,7 +1446,7 @@ bool change_password(THD *thd, const char *host, const char *user, DBUG_RETURN(1); bzero((char*) &tables, sizeof(tables)); - tables.alias=tables.real_name= (char*) "user"; + tables.alias= tables.table_name= (char*) "user"; tables.db= (char*) "mysql"; #ifdef HAVE_REPLICATION @@ -1307,7 +1462,7 @@ bool change_password(THD *thd, const char *host, const char *user, */ tables.updating= 1; /* Thanks to bzero, tables.next==0 */ - if (!tables_ok(0, &tables)) + if (!tables_ok(thd, &tables)) DBUG_RETURN(0); } #endif @@ -1320,7 +1475,7 @@ bool change_password(THD *thd, const char *host, const char *user, if (!(acl_user= find_acl_user(host, user, TRUE))) { VOID(pthread_mutex_unlock(&acl_cache->lock)); - send_error(thd, ER_PASSWORD_NO_MATCH); + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), MYF(0)); goto end; } /* update loaded acl entry: */ @@ -1332,22 +1487,20 @@ bool change_password(THD *thd, const char *host, const char *user, new_password, new_password_len)) { VOID(pthread_mutex_unlock(&acl_cache->lock)); /* purecov: deadcode */ - send_error(thd,0); /* purecov: deadcode */ goto end; } acl_cache->clear(1); // Clear locked hostname cache VOID(pthread_mutex_unlock(&acl_cache->lock)); result= 0; - query_length= - my_sprintf(buff, - (buff,"SET PASSWORD FOR \"%-.120s\"@\"%-.120s\"=\"%-.120s\"", - acl_user->user ? acl_user->user : "", - acl_user->host.hostname ? acl_user->host.hostname : "", - new_password)); - mysql_update_log.write(thd, buff, query_length); if (mysql_bin_log.is_open()) { + query_length= + my_sprintf(buff, + (buff,"SET PASSWORD FOR '%-.120s'@'%-.120s'='%-.120s'", + acl_user->user ? acl_user->user : "", + acl_user->host.hostname ? acl_user->host.hostname : "", + new_password)); thd->clear_error(); Query_log_event qinfo(thd, buff, query_length, 0, FALSE); mysql_bin_log.write(&qinfo); @@ -1359,6 +1512,34 @@ end: /* + Find user in ACL + + SYNOPSIS + is_acl_user() + host host name + user user name + + RETURN + FALSE user not fond + TRUE there are such user +*/ + +bool is_acl_user(const char *host, const char *user) +{ + bool res; + + /* --skip-grants */ + if (!initialized) + return TRUE; + + VOID(pthread_mutex_lock(&acl_cache->lock)); + res= find_acl_user(host, user, TRUE) != NULL; + VOID(pthread_mutex_unlock(&acl_cache->lock)); + return res; +} + + +/* Find first entry that matches the current user */ @@ -1374,11 +1555,10 @@ find_acl_user(const char *host, const char *user, my_bool exact) { ACL_USER *acl_user=dynamic_element(&acl_users,i,ACL_USER*); DBUG_PRINT("info",("strcmp('%s','%s'), compare_hostname('%s','%s'),", - user, - acl_user->user ? acl_user->user : "", - host, - acl_user->host.hostname ? acl_user->host.hostname : - "")); + user, acl_user->user ? acl_user->user : "", + host, + acl_user->host.hostname ? acl_user->host.hostname : + "")); if (!acl_user->user && !user[0] || acl_user->user && !strcmp(user,acl_user->user)) { @@ -1428,7 +1608,7 @@ static const char *calc_ip(const char *ip, long *val, char end) static void update_hostname(acl_host_and_ip *host, const char *hostname) { - host->hostname=(char*) hostname; // This will not be modified! + host->hostname=(char*) hostname; // This will not be modified! if (!hostname || (!(hostname=calc_ip(hostname,&host->ip,'/')) || !(hostname=calc_ip(hostname+1,&host->ip_mask,'\0')))) @@ -1448,8 +1628,8 @@ static bool compare_hostname(const acl_host_and_ip *host, const char *hostname, } return (!host->hostname || (hostname && !wild_case_compare(system_charset_info, - hostname,host->hostname)) || - (ip && !wild_compare(ip,host->hostname,0))); + hostname, host->hostname)) || + (ip && !wild_compare(ip, host->hostname, 0))); } bool hostname_requires_resolving(const char *hostname) @@ -1490,20 +1670,23 @@ static bool update_user_table(THD *thd, TABLE *table, const char *host, const char *user, const char *new_password, uint new_password_len) { + char user_key[MAX_KEY_LENGTH]; int error; DBUG_ENTER("update_user_table"); DBUG_PRINT("enter",("user: %s host: %s",user,host)); table->field[0]->store(host,(uint) strlen(host), system_charset_info); table->field[1]->store(user,(uint) strlen(user), system_charset_info); + key_copy((byte *) user_key, table->record[0], table->key_info, + table->key_info->key_length); table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, + if (table->file->index_read_idx(table->record[0], 0, + (byte *) user_key, table->key_info->key_length, HA_READ_KEY_EXACT)) { - my_error(ER_PASSWORD_NO_MATCH,MYF(0)); /* purecov: deadcode */ + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), + MYF(0)); /* purecov: deadcode */ DBUG_RETURN(1); /* purecov: deadcode */ } store_record(table,record[1]); @@ -1517,21 +1700,30 @@ static bool update_user_table(THD *thd, TABLE *table, } -/* Return 1 if we are allowed to create new users */ +/* + Return 1 if we are allowed to create new users + the logic here is: INSERT_ACL is sufficient. + It's also a requirement in opt_safe_user_create, + otherwise CREATE_USER_ACL is enough. +*/ static bool test_if_create_new_users(THD *thd) { - bool create_new_users=1; // Assume that we are allowed to create new users - if (opt_safe_user_create && !(thd->master_access & INSERT_ACL)) + Security_context *sctx= thd->security_ctx; + bool create_new_users= test(sctx->master_access & INSERT_ACL) || + (!opt_safe_user_create && + test(sctx->master_access & CREATE_USER_ACL)); + if (!create_new_users) { TABLE_LIST tl; ulong db_access; bzero((char*) &tl,sizeof(tl)); tl.db= (char*) "mysql"; - tl.real_name= (char*) "user"; + tl.table_name= (char*) "user"; + create_new_users= 1; - db_access=acl_get(thd->host, thd->ip, - thd->priv_user, tl.db, 0); + db_access=acl_get(sctx->host, sctx->ip, + sctx->priv_user, tl.db, 0); if (!(db_access & INSERT_ACL)) { if (check_grant(thd, INSERT_ACL, &tl, 0, UINT_MAX, 1)) @@ -1548,14 +1740,17 @@ static bool test_if_create_new_users(THD *thd) static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, ulong rights, bool revoke_grant, - bool create_user) + bool can_create_user, bool no_auto_create) { int error = -1; bool old_row_exists=0; const char *password= ""; uint password_len= 0; char what= (revoke_grant) ? 'N' : 'Y'; + byte user_key[MAX_KEY_LENGTH]; + LEX *lex= thd->lex; DBUG_ENTER("replace_user_table"); + safe_mutex_assert_owner(&acl_cache->lock); if (combo.password.str && combo.password.str[0]) @@ -1563,9 +1758,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, if (combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH && combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) { - my_printf_error(ER_UNKNOWN_ERROR, - "Password hash should be a %d-digit hexadecimal number", - MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); DBUG_RETURN(-1); } password_len= combo.password.length; @@ -1574,23 +1767,46 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(combo.user.str,combo.user.length, system_charset_info); + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0], 0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT)) + user_key, table->key_info->key_length, + HA_READ_KEY_EXACT)) { - if (!create_user) + /* what == 'N' means revoke */ + if (what == 'N') { - if (what == 'N') - my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); - else - my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0), - thd->user, thd->host_or_ip); + my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); + goto end; + } + /* + There are four options which affect the process of creation of + a new user (mysqld option --safe-create-user, 'insert' privilege + on 'mysql.user' table, using 'GRANT' with 'IDENTIFIED BY' and + SQL_MODE flag NO_AUTO_CREATE_USER). Below is the simplified rule + how it should work. + if (safe-user-create && ! INSERT_priv) => reject + else if (identified_by) => create + else if (no_auto_create_user) => reject + else create + + see also test_if_create_new_users() + */ + else if (!password_len && no_auto_create) + { + my_error(ER_PASSWORD_NO_MATCH, MYF(0), combo.user.str, combo.host.str); + goto end; + } + else if (!can_create_user) + { + my_error(ER_CANT_CREATE_USER_WITH_GRANT, MYF(0), + thd->security_ctx->user, thd->security_ctx->host_or_ip); goto end; } old_row_exists = 0; - restore_record(table,default_values); // cp empty row from default_values + restore_record(table,s->default_values); table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(combo.user.str,combo.user.length, @@ -1604,8 +1820,9 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, store_record(table,record[1]); // Save copy for update if (combo.password.str) // If password given table->field[2]->store(password, password_len, system_charset_info); - else if (!rights && !revoke_grant && thd->lex->ssl_type == SSL_TYPE_NOT_SPECIFIED && - !thd->lex->mqh.bits) + else if (!rights && !revoke_grant && + lex->ssl_type == SSL_TYPE_NOT_SPECIFIED && + !lex->mqh.specified_limits) { DBUG_RETURN(0); } @@ -1625,40 +1842,40 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, (*tmp_field)->store(&what, 1, &my_charset_latin1); } rights= get_access(table, 3, &next_field); - DBUG_PRINT("info",("table->fields: %d",table->fields)); - if (table->fields >= 31) /* From 4.0.0 we have more fields */ + DBUG_PRINT("info",("table fields: %d",table->s->fields)); + if (table->s->fields >= 31) /* From 4.0.0 we have more fields */ { /* We write down SSL related ACL stuff */ - switch (thd->lex->ssl_type) { + switch (lex->ssl_type) { case SSL_TYPE_ANY: - table->field[next_field]->store("ANY", 3, &my_charset_latin1); + table->field[next_field]->store(STRING_WITH_LEN("ANY"), + &my_charset_latin1); table->field[next_field+1]->store("", 0, &my_charset_latin1); table->field[next_field+2]->store("", 0, &my_charset_latin1); table->field[next_field+3]->store("", 0, &my_charset_latin1); break; case SSL_TYPE_X509: - table->field[next_field]->store("X509", 4, &my_charset_latin1); + table->field[next_field]->store(STRING_WITH_LEN("X509"), + &my_charset_latin1); table->field[next_field+1]->store("", 0, &my_charset_latin1); table->field[next_field+2]->store("", 0, &my_charset_latin1); table->field[next_field+3]->store("", 0, &my_charset_latin1); break; case SSL_TYPE_SPECIFIED: - table->field[next_field]->store("SPECIFIED", 9, &my_charset_latin1); + table->field[next_field]->store(STRING_WITH_LEN("SPECIFIED"), + &my_charset_latin1); table->field[next_field+1]->store("", 0, &my_charset_latin1); table->field[next_field+2]->store("", 0, &my_charset_latin1); table->field[next_field+3]->store("", 0, &my_charset_latin1); - if (thd->lex->ssl_cipher) - table->field[next_field+1]->store(thd->lex->ssl_cipher, - strlen(thd->lex->ssl_cipher), - system_charset_info); - if (thd->lex->x509_issuer) - table->field[next_field+2]->store(thd->lex->x509_issuer, - strlen(thd->lex->x509_issuer), - system_charset_info); - if (thd->lex->x509_subject) - table->field[next_field+3]->store(thd->lex->x509_subject, - strlen(thd->lex->x509_subject), - system_charset_info); + if (lex->ssl_cipher) + table->field[next_field+1]->store(lex->ssl_cipher, + strlen(lex->ssl_cipher), system_charset_info); + if (lex->x509_issuer) + table->field[next_field+2]->store(lex->x509_issuer, + strlen(lex->x509_issuer), system_charset_info); + if (lex->x509_subject) + table->field[next_field+3]->store(lex->x509_subject, + strlen(lex->x509_subject), system_charset_info); break; case SSL_TYPE_NOT_SPECIFIED: break; @@ -1669,18 +1886,19 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, table->field[next_field+3]->store("", 0, &my_charset_latin1); break; } - - /* Skip over SSL related fields to first user limits related field */ - next_field+= 4; - - USER_RESOURCES mqh= thd->lex->mqh; - if (mqh.bits & 1) - table->field[next_field]->store((longlong) mqh.questions); - if (mqh.bits & 2) - table->field[next_field+1]->store((longlong) mqh.updates); - if (mqh.bits & 4) - table->field[next_field+2]->store((longlong) mqh.connections); - mqh_used = mqh_used || mqh.questions || mqh.updates || mqh.connections; + next_field+=4; + + USER_RESOURCES mqh= lex->mqh; + if (mqh.specified_limits & USER_RESOURCES::QUERIES_PER_HOUR) + table->field[next_field]->store((longlong) mqh.questions, TRUE); + if (mqh.specified_limits & USER_RESOURCES::UPDATES_PER_HOUR) + table->field[next_field+1]->store((longlong) mqh.updates, TRUE); + if (mqh.specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR) + table->field[next_field+2]->store((longlong) mqh.conn_per_hour, TRUE); + if (table->s->fields >= 36 && + (mqh.specified_limits & USER_RESOURCES::USER_CONNECTIONS)) + table->field[next_field+3]->store((longlong) mqh.user_conn, TRUE); + mqh_used= mqh_used || mqh.questions || mqh.updates || mqh.conn_per_hour; } if (old_row_exists) { @@ -1716,19 +1934,19 @@ end: if (old_row_exists) acl_update_user(combo.user.str, combo.host.str, combo.password.str, password_len, - thd->lex->ssl_type, - thd->lex->ssl_cipher, - thd->lex->x509_issuer, - thd->lex->x509_subject, - &thd->lex->mqh, + lex->ssl_type, + lex->ssl_cipher, + lex->x509_issuer, + lex->x509_subject, + &lex->mqh, rights); else acl_insert_user(combo.user.str, combo.host.str, password, password_len, - thd->lex->ssl_type, - thd->lex->ssl_cipher, - thd->lex->x509_issuer, - thd->lex->x509_subject, - &thd->lex->mqh, + lex->ssl_type, + lex->ssl_cipher, + lex->x509_issuer, + lex->x509_subject, + &lex->mqh, rights); } DBUG_RETURN(error); @@ -1748,6 +1966,7 @@ static int replace_db_table(TABLE *table, const char *db, bool old_row_exists=0; int error; char what= (revoke_grant) ? 'N' : 'Y'; + byte user_key[MAX_KEY_LENGTH]; DBUG_ENTER("replace_db_table"); if (!initialized) @@ -1759,18 +1978,20 @@ static int replace_db_table(TABLE *table, const char *db, /* Check if there is such a user in user table in memory? */ if (!find_acl_user(combo.host.str,combo.user.str, FALSE)) { - my_error(ER_PASSWORD_NO_MATCH,MYF(0)); + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), MYF(0)); DBUG_RETURN(-1); } table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(db,(uint) strlen(db), system_charset_info); table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT)) + user_key, table->key_info->key_length, + HA_READ_KEY_EXACT)) { if (what == 'N') { // no row, no revoke @@ -1778,7 +1999,7 @@ static int replace_db_table(TABLE *table, const char *db, goto abort; } old_row_exists = 0; - restore_record(table,default_values); // cp empty row from default_values + restore_record(table, s->default_values); table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(db,(uint) strlen(db), system_charset_info); table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); @@ -1790,7 +2011,7 @@ static int replace_db_table(TABLE *table, const char *db, } store_rights=get_rights_for_db(rights); - for (i= 3, priv= 1; i < table->fields; i++, priv <<= 1) + for (i= 3, priv= 1; i < table->s->fields; i++, priv <<= 1) { if (priv & store_rights) // do it if priv is chosen table->field [i]->store(&what,1, &my_charset_latin1);// set requested privileges @@ -1857,27 +2078,40 @@ static byte* get_key_column(GRANT_COLUMN *buff,uint *length, } -class GRANT_TABLE :public Sql_alloc +class GRANT_NAME :public Sql_alloc { public: acl_host_and_ip host; char *db, *user, *tname, *hash_key; - ulong privs, cols; + ulong privs; ulong sort; uint key_length; + GRANT_NAME(const char *h, const char *d,const char *u, + const char *t, ulong p); + GRANT_NAME (TABLE *form); + virtual ~GRANT_NAME() {}; + virtual bool ok() { return privs != 0; } +}; + + +class GRANT_TABLE :public GRANT_NAME +{ +public: + ulong cols; HASH hash_columns; GRANT_TABLE(const char *h, const char *d,const char *u, const char *t, ulong p, ulong c); GRANT_TABLE (TABLE *form, TABLE *col_privs); + ~GRANT_TABLE(); bool ok() { return privs != 0 || cols != 0; } }; -GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, - const char *t, ulong p, ulong c) - :privs(p), cols(c) +GRANT_NAME::GRANT_NAME(const char *h, const char *d,const char *u, + const char *t, ulong p) + :privs(p) { /* Host given by user */ update_hostname(&host, strdup_root(&memex, h)); @@ -1893,15 +2127,20 @@ GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, key_length =(uint) strlen(d)+(uint) strlen(u)+(uint) strlen(t)+3; hash_key = (char*) alloc_root(&memex,key_length); strmov(strmov(strmov(hash_key,user)+1,db)+1,tname); +} + + +GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, + const char *t, ulong p, ulong c) + :GRANT_NAME(h,d,u,t,p), cols(c) +{ (void) hash_init(&hash_columns,system_charset_info, 0,0,0, (hash_get_key) get_key_column,0,0); } -GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) +GRANT_NAME::GRANT_NAME(TABLE *form) { - byte key[MAX_KEY_LENGTH]; - update_hostname(&host, get_field(&memex, form->field[0])); db= get_field(&memex,form->field[1]); user= get_field(&memex,form->field[2]); @@ -1912,7 +2151,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) if (!db || !tname) { /* Wrong table row; Ignore it */ - privs = cols = 0; /* purecov: inspected */ + privs= 0; return; /* purecov: inspected */ } if (lower_case_table_names) @@ -1925,31 +2164,49 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) hash_key = (char*) alloc_root(&memex,key_length); strmov(strmov(strmov(hash_key,user)+1,db)+1,tname); privs = (ulong) form->field[6]->val_int(); - cols = (ulong) form->field[7]->val_int(); privs = fix_rights_for_table(privs); +} + + +GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) + :GRANT_NAME(form) +{ + byte key[MAX_KEY_LENGTH]; + + if (!db || !tname) + { + /* Wrong table row; Ignore it */ + hash_clear(&hash_columns); /* allow for destruction */ + cols= 0; + return; + } + cols= (ulong) form->field[7]->val_int(); cols = fix_rights_for_column(cols); (void) hash_init(&hash_columns,system_charset_info, 0,0,0, (hash_get_key) get_key_column,0,0); if (cols) { - int key_len; + uint key_prefix_len; + KEY_PART_INFO *key_part= col_privs->key_info->key_part; col_privs->field[0]->store(host.hostname, host.hostname ? (uint) strlen(host.hostname) : 0, system_charset_info); col_privs->field[1]->store(db,(uint) strlen(db), system_charset_info); col_privs->field[2]->store(user,(uint) strlen(user), system_charset_info); col_privs->field[3]->store(tname,(uint) strlen(tname), system_charset_info); - key_len=(col_privs->field[0]->pack_length()+ - col_privs->field[1]->pack_length()+ - col_privs->field[2]->pack_length()+ - col_privs->field[3]->pack_length()); - key_copy(key,col_privs,0,key_len); + + key_prefix_len= (key_part[0].store_length + + key_part[1].store_length + + key_part[2].store_length + + key_part[3].store_length); + key_copy(key, col_privs->record[0], col_privs->key_info, key_prefix_len); col_privs->field[4]->store("",0, &my_charset_latin1); + col_privs->file->ha_index_init(0); if (col_privs->file->index_read(col_privs->record[0], - (byte*) col_privs->field[0]->ptr, - key_len, HA_READ_KEY_EXACT)) + (byte*) key, + key_prefix_len, HA_READ_KEY_EXACT)) { cols = 0; /* purecov: deadcode */ col_privs->file->ha_index_end(); @@ -1971,13 +2228,19 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) } my_hash_insert(&hash_columns, (byte *) mem_check); } while (!col_privs->file->index_next(col_privs->record[0]) && - !key_cmp_if_same(col_privs,key,0,key_len)); + !key_cmp_if_same(col_privs,key,0,key_prefix_len)); col_privs->file->ha_index_end(); } } -static byte* get_grant_table(GRANT_TABLE *buff,uint *length, +GRANT_TABLE::~GRANT_TABLE() +{ + hash_free(&hash_columns); +} + + +static byte* get_grant_table(GRANT_NAME *buff,uint *length, my_bool not_used __attribute__((unused))) { *length=buff->key_length; @@ -1993,43 +2256,62 @@ void free_grant_table(GRANT_TABLE *grant_table) /* Search after a matching grant. Prefer exact grants before not exact ones */ -static GRANT_TABLE *table_hash_search(const char *host,const char* ip, +static GRANT_NAME *name_hash_search(HASH *name_hash, + const char *host,const char* ip, const char *db, const char *user, const char *tname, bool exact) { char helping [NAME_LEN*2+USERNAME_LENGTH+3]; uint len; - GRANT_TABLE *grant_table,*found=0; + GRANT_NAME *grant_name,*found=0; HASH_SEARCH_STATE state; len = (uint) (strmov(strmov(strmov(helping,user)+1,db)+1,tname)-helping)+ 1; - for (grant_table=(GRANT_TABLE*) hash_first(&column_priv_hash, - (byte*) helping, - len, &state) ; - grant_table ; - grant_table= (GRANT_TABLE*) hash_next(&column_priv_hash,(byte*) helping, - len, &state)) + for (grant_name= (GRANT_NAME*) hash_first(name_hash, (byte*) helping, + len, &state); + grant_name ; + grant_name= (GRANT_NAME*) hash_next(name_hash,(byte*) helping, + len, &state)) { if (exact) { - if ((host && + if (!grant_name->host.hostname || + (host && !my_strcasecmp(system_charset_info, host, - grant_table->host.hostname)) || - (ip && !strcmp(ip, grant_table->host.hostname))) - return grant_table; + grant_name->host.hostname)) || + (ip && !strcmp(ip, grant_name->host.hostname))) + return grant_name; } else { - if (compare_hostname(&grant_table->host, host, ip) && - (!found || found->sort < grant_table->sort)) - found=grant_table; // Host ok + if (compare_hostname(&grant_name->host, host, ip) && + (!found || found->sort < grant_name->sort)) + found=grant_name; // Host ok } } return found; } +inline GRANT_NAME * +routine_hash_search(const char *host, const char *ip, const char *db, + const char *user, const char *tname, bool proc, bool exact) +{ + return (GRANT_TABLE*) + name_hash_search(proc ? &proc_priv_hash : &func_priv_hash, + host, ip, db, user, tname, exact); +} + + +inline GRANT_TABLE * +table_hash_search(const char *host, const char *ip, const char *db, + const char *user, const char *tname, bool exact) +{ + return (GRANT_TABLE*) name_hash_search(&column_priv_hash, host, ip, db, + user, tname, exact); +} + inline GRANT_COLUMN * column_hash_search(GRANT_TABLE *t, const char *cname, uint length) @@ -2045,49 +2327,64 @@ static int replace_column_table(GRANT_TABLE *g_t, ulong rights, bool revoke_grant) { int error=0,result=0; - uint key_length; byte key[MAX_KEY_LENGTH]; + uint key_prefix_length; + KEY_PART_INFO *key_part= table->key_info->key_part; DBUG_ENTER("replace_column_table"); - table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); - table->field[1]->store(db,(uint) strlen(db), system_charset_info); - table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); - table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); - key_length=(table->field[0]->pack_length()+ table->field[1]->pack_length()+ - table->field[2]->pack_length()+ table->field[3]->pack_length()); - key_copy(key,table,0,key_length); + table->field[0]->store(combo.host.str,combo.host.length, + system_charset_info); + table->field[1]->store(db,(uint) strlen(db), + system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, + system_charset_info); + table->field[3]->store(table_name,(uint) strlen(table_name), + system_charset_info); + + /* Get length of 3 first key parts */ + key_prefix_length= (key_part[0].store_length + key_part[1].store_length + + key_part[2].store_length + key_part[3].store_length); + key_copy(key, table->record[0], table->key_info, key_prefix_length); - rights &= COL_ACLS; // Only ACL for columns + rights&= COL_ACLS; // Only ACL for columns /* first fix privileges for all columns in column list */ List_iterator <LEX_COLUMN> iter(columns); - class LEX_COLUMN *xx; + class LEX_COLUMN *column; table->file->ha_index_init(0); - while ((xx=iter++)) + while ((column= iter++)) { - ulong privileges = xx->rights; + ulong privileges= column->rights; bool old_row_exists=0; - key_restore(table,key,0,key_length); - table->field[4]->store(xx->column.ptr(),xx->column.length(), + byte user_key[MAX_KEY_LENGTH]; + + key_restore(table->record[0],key,table->key_info, + key_prefix_length); + table->field[4]->store(column->column.ptr(), column->column.length(), system_charset_info); + /* Get key for the first 4 columns */ + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read(table->record[0],(byte*) table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT)) + if (table->file->index_read(table->record[0], user_key, + table->key_info->key_length, + HA_READ_KEY_EXACT)) { if (revoke_grant) { my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), - combo.user.str, combo.host.str, table_name); /* purecov: inspected */ - result= -1; /* purecov: inspected */ - continue; /* purecov: inspected */ + combo.user.str, combo.host.str, + table_name); /* purecov: inspected */ + result= -1; /* purecov: inspected */ + continue; /* purecov: inspected */ } old_row_exists = 0; - restore_record(table,default_values); // Get empty record - key_restore(table,key,0,key_length); - table->field[4]->store(xx->column.ptr(),xx->column.length(), + restore_record(table, s->default_values); // Get empty record + key_restore(table->record[0],key,table->key_info, + key_prefix_length); + table->field[4]->store(column->column.ptr(),column->column.length(), system_charset_info); } else @@ -2103,10 +2400,11 @@ static int replace_column_table(GRANT_TABLE *g_t, store_record(table,record[1]); // copy original row } - table->field[6]->store((longlong) get_rights_for_column(privileges)); + table->field[6]->store((longlong) get_rights_for_column(privileges), TRUE); if (old_row_exists) { + GRANT_COLUMN *grant_column; if (privileges) error=table->file->update_row(table->record[1],table->record[0]); else @@ -2117,21 +2415,21 @@ static int replace_column_table(GRANT_TABLE *g_t, result= -1; /* purecov: inspected */ goto end; /* purecov: inspected */ } - GRANT_COLUMN *grant_column = column_hash_search(g_t, - xx->column.ptr(), - xx->column.length()); + grant_column= column_hash_search(g_t, column->column.ptr(), + column->column.length()); if (grant_column) // Should always be true - grant_column->rights = privileges; // Update hash + grant_column->rights= privileges; // Update hash } else // new grant { + GRANT_COLUMN *grant_column; if ((error=table->file->write_row(table->record[0]))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ result= -1; /* purecov: inspected */ goto end; /* purecov: inspected */ } - GRANT_COLUMN *grant_column = new GRANT_COLUMN(xx->column,privileges); + grant_column= new GRANT_COLUMN(column->column,privileges); my_hash_insert(&g_t->hash_columns,(byte*) grant_column); } } @@ -2143,10 +2441,14 @@ static int replace_column_table(GRANT_TABLE *g_t, if (revoke_grant) { + byte user_key[MAX_KEY_LENGTH]; + key_copy(user_key, table->record[0], table->key_info, + key_prefix_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read(table->record[0], (byte*) table->field[0]->ptr, - key_length, - HA_READ_KEY_EXACT)) + if (table->file->index_read(table->record[0], user_key, + key_prefix_length, + HA_READ_KEY_EXACT)) goto end; /* Scan through all rows with the same host,db,user and table */ @@ -2165,7 +2467,7 @@ static int replace_column_table(GRANT_TABLE *g_t, privileges&= ~rights; table->field[6]->store((longlong) - get_rights_for_column(privileges)); + get_rights_for_column(privileges), TRUE); table->field[4]->val_str(&column_name); grant_column = column_hash_search(g_t, column_name.ptr(), @@ -2197,7 +2499,7 @@ static int replace_column_table(GRANT_TABLE *g_t, } } } while (!table->file->index_next(table->record[0]) && - !key_cmp_if_same(table,key,0,key_length)); + !key_cmp_if_same(table, key, 0, key_prefix_length)); } end: @@ -2212,13 +2514,15 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, ulong rights, ulong col_rights, bool revoke_grant) { - char grantor[HOSTNAME_LENGTH+USERNAME_LENGTH+2]; + char grantor[USER_HOST_BUFF_SIZE]; int old_row_exists = 1; int error=0; ulong store_table_rights, store_col_rights; + byte user_key[MAX_KEY_LENGTH]; DBUG_ENTER("replace_table_table"); - strxmov(grantor, thd->user, "@", thd->host_or_ip, NullS); + strxmov(grantor, thd->security_ctx->user, "@", + thd->security_ctx->host_or_ip, NullS); /* The following should always succeed as new users are created before @@ -2226,20 +2530,23 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, */ if (!find_acl_user(combo.host.str,combo.user.str, FALSE)) { - my_error(ER_PASSWORD_NO_MATCH,MYF(0)); /* purecov: deadcode */ + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), + MYF(0)); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ } - restore_record(table,default_values); // Get empty record + restore_record(table, s->default_values); // Get empty record table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); table->field[1]->store(db,(uint) strlen(db), system_charset_info); table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); store_record(table,record[1]); // store at pos 1 + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, + if (table->file->index_read_idx(table->record[0], 0, + user_key, table->key_info->key_length, HA_READ_KEY_EXACT)) { /* @@ -2251,7 +2558,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, { // no row, no revoke my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), combo.user.str, combo.host.str, - table_name); /* purecov: deadcode */ + table_name); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ } old_row_exists = 0; @@ -2280,8 +2587,8 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, } table->field[4]->store(grantor,(uint) strlen(grantor), system_charset_info); - table->field[6]->store((longlong) store_table_rights); - table->field[7]->store((longlong) store_col_rights); + table->field[6]->store((longlong) store_table_rights, TRUE); + table->field[7]->store((longlong) store_col_rights, TRUE); rights=fix_rights_for_table(store_table_rights); col_rights=fix_rights_for_column(store_col_rights); @@ -2320,6 +2627,122 @@ table_error: } +static int replace_routine_table(THD *thd, GRANT_NAME *grant_name, + TABLE *table, const LEX_USER &combo, + const char *db, const char *routine_name, + bool is_proc, ulong rights, bool revoke_grant) +{ + char grantor[USER_HOST_BUFF_SIZE]; + int old_row_exists= 1; + int error=0; + ulong store_proc_rights; + DBUG_ENTER("replace_routine_table"); + + if (!initialized) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); + DBUG_RETURN(-1); + } + + strxmov(grantor, thd->security_ctx->user, "@", + thd->security_ctx->host_or_ip, NullS); + + /* + The following should always succeed as new users are created before + this function is called! + */ + if (!find_acl_user(combo.host.str, combo.user.str, FALSE)) + { + my_error(ER_PASSWORD_NO_MATCH,MYF(0)); + DBUG_RETURN(-1); + } + + restore_record(table, s->default_values); // Get empty record + table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); + table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); + table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); + table->field[3]->store(routine_name,(uint) strlen(routine_name), + &my_charset_latin1); + table->field[4]->store((longlong)(is_proc ? + TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION), + TRUE); + store_record(table,record[1]); // store at pos 1 + + if (table->file->index_read_idx(table->record[0],0, + (byte*) table->field[0]->ptr,0, + HA_READ_KEY_EXACT)) + { + /* + The following should never happen as we first check the in memory + grant tables for the user. There is however always a small change that + the user has modified the grant tables directly. + */ + if (revoke_grant) + { // no row, no revoke + my_error(ER_NONEXISTING_PROC_GRANT, MYF(0), + combo.user.str, combo.host.str, routine_name); + DBUG_RETURN(-1); + } + old_row_exists= 0; + restore_record(table,record[1]); // Get saved record + } + + store_proc_rights= get_rights_for_procedure(rights); + if (old_row_exists) + { + ulong j; + store_record(table,record[1]); + j= (ulong) table->field[6]->val_int(); + + if (revoke_grant) + { + /* column rights are already fixed in mysql_table_grant */ + store_proc_rights=j & ~store_proc_rights; + } + else + { + store_proc_rights|= j; + } + } + + table->field[5]->store(grantor,(uint) strlen(grantor), &my_charset_latin1); + table->field[6]->store((longlong) store_proc_rights, TRUE); + rights=fix_rights_for_procedure(store_proc_rights); + + if (old_row_exists) + { + if (store_proc_rights) + { + if ((error=table->file->update_row(table->record[1],table->record[0]))) + goto table_error; + } + else if ((error= table->file->delete_row(table->record[1]))) + goto table_error; + } + else + { + error=table->file->write_row(table->record[0]); + if (error && error != HA_ERR_FOUND_DUPP_KEY) + goto table_error; + } + + if (rights) + { + grant_name->privs= rights; + } + else + { + hash_delete(is_proc ? &proc_priv_hash : &func_priv_hash,(byte*) grant_name); + } + DBUG_RETURN(0); + + /* This should never happen */ +table_error: + table->file->print_error(error,MYF(0)); + DBUG_RETURN(-1); +} + + /* Store table level and column level grants in the privilege tables @@ -2333,59 +2756,63 @@ table_error: revoke_grant Set to 1 if this is a REVOKE command RETURN - 0 ok - 1 error + FALSE ok + TRUE error */ -int mysql_table_grant(THD *thd, TABLE_LIST *table_list, +bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, List <LEX_USER> &user_list, List <LEX_COLUMN> &columns, ulong rights, bool revoke_grant) { ulong column_priv= 0; List_iterator <LEX_USER> str_list (user_list); - LEX_USER *Str; + LEX_USER *Str, *tmp_Str; TABLE_LIST tables[3]; bool create_new_users=0; + char *db_name, *table_name; DBUG_ENTER("mysql_table_grant"); if (!initialized) { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(TRUE); /* purecov: inspected */ } if (rights & ~TABLE_ACLS) { - my_error(ER_ILLEGAL_GRANT_FOR_TABLE,MYF(0)); - DBUG_RETURN(-1); + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), + MYF(0)); + DBUG_RETURN(TRUE); } if (!revoke_grant) { - if (columns.elements && !revoke_grant) + if (columns.elements) { - TABLE *table; class LEX_COLUMN *column; List_iterator <LEX_COLUMN> column_iter(columns); - if (!(table=open_ltable(thd,table_list,TL_READ))) - DBUG_RETURN(-1); + if (open_and_lock_tables(thd, table_list)) + DBUG_RETURN(TRUE); + while ((column = column_iter++)) { uint unused_field_idx= NO_CACHED_FIELD_INDEX; - Field *f= find_field_in_table(thd,table,column->column.ptr(), - column->column.length(),1,0,&unused_field_idx); - if (!f) + TABLE_LIST *dummy; + Field *f=find_field_in_table_ref(thd, table_list, column->column.ptr(), + column->column.length(), + column->column.ptr(), NULL, NULL, + NULL, TRUE, FALSE, + &unused_field_idx, FALSE, &dummy); + if (f == (Field*)0) { my_error(ER_BAD_FIELD_ERROR, MYF(0), column->column.c_ptr(), table_list->alias); - DBUG_RETURN(-1); - } - if (f == (Field*)-1) - { - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } + if (f == (Field *)-1) + DBUG_RETURN(TRUE); column_priv|= column->rights; } close_thread_tables(thd); @@ -2396,12 +2823,12 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, { char buf[FN_REFLEN]; sprintf(buf,"%s/%s/%s.frm",mysql_data_home, table_list->db, - table_list->real_name); + table_list->table_name); fn_format(buf,buf,"","",4+16+32); if (access(buf,F_OK)) { my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->alias); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } if (table_list->grant.want_privilege) @@ -2410,7 +2837,8 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, get_privilege_desc(command, sizeof(command), table_list->grant.want_privilege); my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), - command, thd->priv_user, thd->host_or_ip, table_list->alias); + command, thd->security_ctx->priv_user, + thd->security_ctx->host_or_ip, table_list->alias); DBUG_RETURN(-1); } } @@ -2419,14 +2847,16 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, /* open the mysql.tables_priv and mysql.columns_priv tables */ bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name= (char*) "user"; - tables[1].alias=tables[1].real_name= (char*) "tables_priv"; - tables[2].alias=tables[2].real_name= (char*) "columns_priv"; - tables[0].next=tables+1; + tables[0].alias=tables[0].table_name= (char*) "user"; + tables[1].alias=tables[1].table_name= (char*) "tables_priv"; + tables[2].alias=tables[2].table_name= (char*) "columns_priv"; + tables[0].next_local= tables[0].next_global= tables+1; /* Don't open column table if we don't need it ! */ - tables[1].next=((column_priv || - (revoke_grant && ((rights & COL_ACLS) || columns.elements))) - ? tables+2 : 0); + tables[1].next_local= + tables[1].next_global= ((column_priv || + (revoke_grant && + ((rights & COL_ACLS) || columns.elements))) + ? tables+2 : 0); tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_WRITE; tables[0].db=tables[1].db=tables[2].db=(char*) "mysql"; @@ -2442,66 +2872,72 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, account in tests. */ tables[0].updating= tables[1].updating= tables[2].updating= 1; - if (!tables_ok(0, tables)) - DBUG_RETURN(0); + if (!tables_ok(thd, tables)) + DBUG_RETURN(FALSE); } #endif if (simple_open_n_lock_tables(thd,tables)) { // Should never happen close_thread_tables(thd); /* purecov: deadcode */ - DBUG_RETURN(-1); /* purecov: deadcode */ + DBUG_RETURN(TRUE); /* purecov: deadcode */ } if (!revoke_grant) create_new_users= test_if_create_new_users(thd); - int result=0; + bool result= FALSE; rw_wrlock(&LOCK_grant); pthread_mutex_lock(&acl_cache->lock); MEM_ROOT *old_root= thd->mem_root; thd->mem_root= &memex; + grant_version++; - while ((Str = str_list++)) + while ((tmp_Str = str_list++)) { int error; GRANT_TABLE *grant_table; - if (Str->host.length > HOSTNAME_LENGTH || - Str->user.length > USERNAME_LENGTH) + if (!(Str= get_current_user(thd, tmp_Str))) { - my_error(ER_GRANT_WRONG_HOST_OR_USER,MYF(0)); - result= -1; + result= TRUE; continue; - } + } /* Create user if needed */ error=replace_user_table(thd, tables[0].table, *Str, - 0, revoke_grant, create_new_users); + 0, revoke_grant, create_new_users, + test(thd->variables.sql_mode & + MODE_NO_AUTO_CREATE_USER)); if (error) { - result= -1; // Remember error + result= TRUE; // Remember error continue; // Add next user } + db_name= (table_list->view_db.length ? + table_list->view_db.str : + table_list->db); + table_name= (table_list->view_name.length ? + table_list->view_name.str : + table_list->table_name); + /* Find/create cached table grant */ - grant_table= table_hash_search(Str->host.str,NullS,table_list->db, - Str->user.str, - table_list->real_name,1); + grant_table= table_hash_search(Str->host.str, NullS, db_name, + Str->user.str, table_name, 1); if (!grant_table) { if (revoke_grant) { my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), - Str->user.str, Str->host.str, table_list->real_name); - result= -1; + Str->user.str, Str->host.str, table_list->table_name); + result= TRUE; continue; } - grant_table = new GRANT_TABLE (Str->host.str,table_list->db, - Str->user.str, - table_list->real_name, + grant_table = new GRANT_TABLE (Str->host.str, db_name, + Str->user.str, table_name, rights, column_priv); if (!grant_table) // end of memory { - result= -1; /* purecov: deadcode */ + result= TRUE; /* purecov: deadcode */ continue; /* purecov: deadcode */ } my_hash_insert(&column_priv_hash,(byte*) grant_table); @@ -2541,42 +2977,219 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, /* update table and columns */ - if (replace_table_table(thd,grant_table,tables[1].table,*Str, - table_list->db, - table_list->real_name, + if (replace_table_table(thd, grant_table, tables[1].table, *Str, + db_name, table_name, rights, column_priv, revoke_grant)) { /* Should only happen if table is crashed */ - result= -1; /* purecov: deadcode */ + result= TRUE; /* purecov: deadcode */ } else if (tables[2].table) { - if ((replace_column_table(grant_table,tables[2].table, *Str, + if ((replace_column_table(grant_table, tables[2].table, *Str, columns, - table_list->db, - table_list->real_name, + db_name, table_name, rights, revoke_grant))) { - result= -1; + result= TRUE; } } } grant_option=TRUE; thd->mem_root= old_root; pthread_mutex_unlock(&acl_cache->lock); + + if (!result) /* success */ + { + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } + rw_unlock(&LOCK_grant); - if (!result) + + if (!result) /* success */ + send_ok(thd); + + /* Tables are automatically closed */ + DBUG_RETURN(result); +} + + +/* + Store routine level grants in the privilege tables + + SYNOPSIS + mysql_routine_grant() + thd Thread handle + table_list List of routines to give grant + is_proc true indicates routine list are procedures + user_list List of users to give grant + rights Table level grant + revoke_grant Set to 1 if this is a REVOKE command + + RETURN + 0 ok + 1 error +*/ + +bool mysql_routine_grant(THD *thd, TABLE_LIST *table_list, bool is_proc, + List <LEX_USER> &user_list, ulong rights, + bool revoke_grant, bool no_error) +{ + List_iterator <LEX_USER> str_list (user_list); + LEX_USER *Str, *tmp_Str; + TABLE_LIST tables[2]; + bool create_new_users=0, result=0; + char *db_name, *table_name; + DBUG_ENTER("mysql_routine_grant"); + + if (!initialized) + { + if (!no_error) + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), + "--skip-grant-tables"); + DBUG_RETURN(TRUE); + } + if (rights & ~PROC_ACLS) + { + if (!no_error) + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), + MYF(0)); + DBUG_RETURN(TRUE); + } + + if (!revoke_grant) + { + if (sp_exist_routines(thd, table_list, is_proc, no_error)<0) + DBUG_RETURN(TRUE); + } + + /* open the mysql.user and mysql.procs_priv tables */ + + bzero((char*) &tables,sizeof(tables)); + tables[0].alias=tables[0].table_name= (char*) "user"; + tables[1].alias=tables[1].table_name= (char*) "procs_priv"; + tables[0].next_local= tables[0].next_global= tables+1; + tables[0].lock_type=tables[1].lock_type=TL_WRITE; + tables[0].db=tables[1].db=(char*) "mysql"; + +#ifdef HAVE_REPLICATION + /* + GRANT and REVOKE are applied the slave in/exclusion rules as they are + some kind of updates to the mysql.% tables. + */ + if (thd->slave_thread && table_rules_on) + { + /* + The tables must be marked "updating" so that tables_ok() takes them into + account in tests. + */ + tables[0].updating= tables[1].updating= 1; + if (!tables_ok(thd, tables)) + DBUG_RETURN(FALSE); + } +#endif + + if (simple_open_n_lock_tables(thd,tables)) + { // Should never happen + close_thread_tables(thd); + DBUG_RETURN(TRUE); + } + + if (!revoke_grant) + create_new_users= test_if_create_new_users(thd); + rw_wrlock(&LOCK_grant); + pthread_mutex_lock(&acl_cache->lock); + MEM_ROOT *old_root= thd->mem_root; + thd->mem_root= &memex; + + DBUG_PRINT("info",("now time to iterate and add users")); + + while ((tmp_Str= str_list++)) + { + int error; + GRANT_NAME *grant_name; + if (!(Str= get_current_user(thd, tmp_Str))) + { + result= TRUE; + continue; + } + /* Create user if needed */ + error=replace_user_table(thd, tables[0].table, *Str, + 0, revoke_grant, create_new_users, + test(thd->variables.sql_mode & + MODE_NO_AUTO_CREATE_USER)); + if (error) + { + result= TRUE; // Remember error + continue; // Add next user + } + + db_name= table_list->db; + table_name= table_list->table_name; + + grant_name= routine_hash_search(Str->host.str, NullS, db_name, + Str->user.str, table_name, is_proc, 1); + if (!grant_name) + { + if (revoke_grant) + { + if (!no_error) + my_error(ER_NONEXISTING_PROC_GRANT, MYF(0), + Str->user.str, Str->host.str, table_name); + result= TRUE; + continue; + } + grant_name= new GRANT_NAME(Str->host.str, db_name, + Str->user.str, table_name, + rights); + if (!grant_name) + { + result= TRUE; + continue; + } + my_hash_insert(is_proc ? &proc_priv_hash : &func_priv_hash,(byte*) grant_name); + } + + if (replace_routine_table(thd, grant_name, tables[1].table, *Str, + db_name, table_name, is_proc, rights, revoke_grant)) + { + result= TRUE; + continue; + } + } + grant_option=TRUE; + thd->mem_root= old_root; + pthread_mutex_unlock(&acl_cache->lock); + if (!result && !no_error) + { + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } + + rw_unlock(&LOCK_grant); + + if (!result && !no_error) send_ok(thd); + /* Tables are automatically closed */ DBUG_RETURN(result); } -int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, - ulong rights, bool revoke_grant) +bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, + ulong rights, bool revoke_grant) { List_iterator <LEX_USER> str_list (list); - LEX_USER *Str; + LEX_USER *Str, *tmp_Str; char tmp_db[NAME_LEN+1]; bool create_new_users=0; TABLE_LIST tables[2]; @@ -2585,7 +3198,7 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); /* purecov: tested */ - DBUG_RETURN(-1); /* purecov: tested */ + DBUG_RETURN(TRUE); /* purecov: tested */ } if (lower_case_table_names && db) @@ -2597,13 +3210,11 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, /* open the mysql.user and mysql.db tables */ bzero((char*) &tables,sizeof(tables)); - tables[0].alias=tables[0].real_name=(char*) "user"; - tables[1].alias=tables[1].real_name=(char*) "db"; - tables[0].next=tables+1; - tables[1].next=0; + tables[0].alias=tables[0].table_name=(char*) "user"; + tables[1].alias=tables[1].table_name=(char*) "db"; + tables[0].next_local= tables[0].next_global= tables+1; tables[0].lock_type=tables[1].lock_type=TL_WRITE; tables[0].db=tables[1].db=(char*) "mysql"; - tables[0].table=tables[1].table=0; #ifdef HAVE_REPLICATION /* @@ -2617,15 +3228,15 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, account in tests. */ tables[0].updating= tables[1].updating= 1; - if (!tables_ok(0, tables)) - DBUG_RETURN(0); + if (!tables_ok(thd, tables)) + DBUG_RETURN(FALSE); } #endif if (simple_open_n_lock_tables(thd,tables)) { // This should never happen close_thread_tables(thd); /* purecov: deadcode */ - DBUG_RETURN(-1); /* purecov: deadcode */ + DBUG_RETURN(TRUE); /* purecov: deadcode */ } if (!revoke_grant) @@ -2637,20 +3248,17 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, grant_version++; int result=0; - while ((Str = str_list++)) + while ((tmp_Str = str_list++)) { - if (Str->host.length > HOSTNAME_LENGTH || - Str->user.length > USERNAME_LENGTH) + if (!(Str= get_current_user(thd, tmp_Str))) { - my_error(ER_GRANT_WRONG_HOST_OR_USER,MYF(0)); - result= -1; + result= TRUE; continue; } - if ((replace_user_table(thd, - tables[0].table, - *Str, - (!db ? rights : 0), revoke_grant, - create_new_users))) + if (replace_user_table(thd, tables[0].table, *Str, + (!db ? rights : 0), revoke_grant, create_new_users, + test(thd->variables.sql_mode & + MODE_NO_AUTO_CREATE_USER))) result= -1; else if (db) { @@ -2669,11 +3277,23 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, } } VOID(pthread_mutex_unlock(&acl_cache->lock)); + + if (!result) + { + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + } + rw_unlock(&LOCK_grant); close_thread_tables(thd); if (!result) send_ok(thd); + DBUG_RETURN(result); } @@ -2685,6 +3305,8 @@ void grant_free(void) DBUG_ENTER("grant_free"); grant_option = FALSE; hash_free(&column_priv_hash); + hash_free(&proc_priv_hash); + hash_free(&func_priv_hash); free_root(&memex,MYF(0)); DBUG_VOID_RETURN; } @@ -2710,6 +3332,7 @@ my_bool grant_init() if (!(thd= new THD)) DBUG_RETURN(1); /* purecov: deadcode */ + thd->thread_stack= (char*) &thd; thd->store_globals(); return_val= grant_reload(thd); delete thd; @@ -2738,7 +3361,7 @@ static my_bool grant_load(TABLE_LIST *tables) { MEM_ROOT *memex_ptr; my_bool return_val= 1; - TABLE *t_table, *c_table; + TABLE *t_table, *c_table, *p_table; bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE; MEM_ROOT **save_mem_root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); @@ -2748,53 +3371,119 @@ static my_bool grant_load(TABLE_LIST *tables) (void) hash_init(&column_priv_hash,system_charset_info, 0,0,0, (hash_get_key) get_grant_table, (hash_free_key) free_grant_table,0); + (void) hash_init(&proc_priv_hash,system_charset_info, + 0,0,0, (hash_get_key) get_grant_table, + 0,0); + (void) hash_init(&func_priv_hash,system_charset_info, + 0,0,0, (hash_get_key) get_grant_table, + 0,0); init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0); t_table = tables[0].table; c_table = tables[1].table; + p_table= tables[2].table; t_table->file->ha_index_init(0); - if (t_table->file->index_first(t_table->record[0])) + p_table->file->ha_index_init(0); + if (!t_table->file->index_first(t_table->record[0])) { - return_val= 0; - goto end_unlock; - } - grant_option= TRUE; + memex_ptr= &memex; + my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr); + do + { + GRANT_TABLE *mem_check; + if (!(mem_check=new GRANT_TABLE(t_table,c_table))) + { + /* This could only happen if we are out memory */ + grant_option= FALSE; + goto end_unlock; + } - memex_ptr= &memex; - my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr); - do + if (check_no_resolve) + { + if (hostname_requires_resolving(mem_check->host.hostname)) + { + sql_print_warning("'tables_priv' entry '%s %s@%s' " + "ignored in --skip-name-resolve mode.", + mem_check->tname, + mem_check->user ? mem_check->user : "", + mem_check->host.hostname ? + mem_check->host.hostname : ""); + continue; + } + } + + if (! mem_check->ok()) + delete mem_check; + else if (my_hash_insert(&column_priv_hash,(byte*) mem_check)) + { + delete mem_check; + grant_option= FALSE; + goto end_unlock; + } + } + while (!t_table->file->index_next(t_table->record[0])); + } + if (!p_table->file->index_first(p_table->record[0])) { - GRANT_TABLE *mem_check; - if (!(mem_check=new GRANT_TABLE(t_table,c_table))) + memex_ptr= &memex; + my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr); + do { - /* This could only happen if we are out memory */ - grant_option= FALSE; /* purecov: deadcode */ - goto end_unlock; - } + GRANT_NAME *mem_check; + HASH *hash; + if (!(mem_check=new GRANT_NAME(p_table))) + { + /* This could only happen if we are out memory */ + grant_option= FALSE; + goto end_unlock; + } - if (check_no_resolve) - { - if (hostname_requires_resolving(mem_check->host.hostname)) + if (check_no_resolve) { - sql_print_warning("'tables_priv' entry '%s %s@%s' " - "ignored in --skip-name-resolve mode.", - mem_check->tname, mem_check->user, - mem_check->host.hostname); + if (hostname_requires_resolving(mem_check->host.hostname)) + { + sql_print_warning("'procs_priv' entry '%s %s@%s' " + "ignored in --skip-name-resolve mode.", + mem_check->tname, mem_check->user, + mem_check->host.hostname ? + mem_check->host.hostname : ""); + continue; + } + } + if (p_table->field[4]->val_int() == TYPE_ENUM_PROCEDURE) + { + hash= &proc_priv_hash; + } + else + if (p_table->field[4]->val_int() == TYPE_ENUM_FUNCTION) + { + hash= &func_priv_hash; + } + else + { + sql_print_warning("'procs_priv' entry '%s' " + "ignored, bad routine type", + mem_check->tname); continue; } - } - if (mem_check->ok() && my_hash_insert(&column_priv_hash,(byte*) mem_check)) - { - grant_option= FALSE; - goto end_unlock; + mem_check->privs= fix_rights_for_procedure(mem_check->privs); + if (! mem_check->ok()) + delete mem_check; + else if (my_hash_insert(hash, (byte*) mem_check)) + { + delete mem_check; + grant_option= FALSE; + goto end_unlock; + } } + while (!p_table->file->index_next(p_table->record[0])); } - while (!t_table->file->index_next(t_table->record[0])); - + grant_option= TRUE; return_val=0; // Return ok end_unlock: t_table->file->ha_index_end(); + p_table->file->ha_index_end(); my_pthread_setspecific_ptr(THR_MALLOC, save_mem_root_ptr); DBUG_RETURN(return_val); } @@ -2820,8 +3509,8 @@ end_unlock: my_bool grant_reload(THD *thd) { - TABLE_LIST tables[2]; - HASH old_column_priv_hash; + TABLE_LIST tables[3]; + HASH old_column_priv_hash, old_proc_priv_hash, old_func_priv_hash; bool old_grant_option; MEM_ROOT old_mem; my_bool return_val= 1; @@ -2832,11 +3521,13 @@ my_bool grant_reload(THD *thd) DBUG_RETURN(0); bzero((char*) tables, sizeof(tables)); - tables[0].alias=tables[0].real_name= (char*) "tables_priv"; - tables[1].alias=tables[1].real_name= (char*) "columns_priv"; - tables[0].db=tables[1].db= (char *) "mysql"; - tables[0].next=tables+1; - tables[0].lock_type=tables[1].lock_type=TL_READ; + tables[0].alias= tables[0].table_name= (char*) "tables_priv"; + tables[1].alias= tables[1].table_name= (char*) "columns_priv"; + tables[2].alias= tables[2].table_name= (char*) "procs_priv"; + tables[0].db= tables[1].db= tables[2].db= (char *) "mysql"; + tables[0].next_local= tables[0].next_global= tables+1; + tables[1].next_local= tables[1].next_global= tables+2; + tables[0].lock_type= tables[1].lock_type= tables[2].lock_type= TL_READ; /* To avoid deadlocks we should obtain table locks before @@ -2848,6 +3539,8 @@ my_bool grant_reload(THD *thd) rw_wrlock(&LOCK_grant); grant_version++; old_column_priv_hash= column_priv_hash; + old_proc_priv_hash= proc_priv_hash; + old_func_priv_hash= func_priv_hash; old_grant_option= grant_option; old_mem= memex; @@ -2856,12 +3549,16 @@ my_bool grant_reload(THD *thd) DBUG_PRINT("error",("Reverting to old privileges")); grant_free(); /* purecov: deadcode */ column_priv_hash= old_column_priv_hash; /* purecov: deadcode */ + proc_priv_hash= old_proc_priv_hash; + func_priv_hash= old_func_priv_hash; grant_option= old_grant_option; /* purecov: deadcode */ memex= old_mem; /* purecov: deadcode */ } else { hash_free(&old_column_priv_hash); + hash_free(&old_proc_priv_hash); + hash_free(&old_func_priv_hash); free_root(&old_mem,MYF(0)); } rw_unlock(&LOCK_grant); @@ -2873,32 +3570,94 @@ end: /**************************************************************************** Check table level grants - All errors are written directly to the client if no_errors is given ! + + SYNOPSIS + bool check_grant() + thd Thread handler + want_access Bits of privileges user needs to have + tables List of tables to check. The user should have 'want_access' + to all tables in list. + show_table <> 0 if we are in show table. In this case it's enough to have + any privilege for the table + number Check at most this number of tables. + no_errors If 0 then we write an error. The error is sent directly to + the client + + RETURN + 0 ok + 1 Error: User did not have the requested privileges + + NOTE + This functions assumes that either number of tables to be inspected + by it is limited explicitly (i.e. is is not UINT_MAX) or table list + used and thd->lex->query_tables_own_last value correspond to each + other (the latter should be either 0 or point to next_global member + of one of elements of this table list). ****************************************************************************/ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, uint show_table, uint number, bool no_errors) { - TABLE_LIST *table; - char *user = thd->priv_user; + TABLE_LIST *table, *first_not_own_table= thd->lex->first_not_own_table(); + Security_context *sctx= thd->security_ctx; + uint i; + ulong orig_want_access= want_access; DBUG_ENTER("check_grant"); + DBUG_ASSERT(number > 0); - want_access &= ~thd->master_access; - if (!want_access) - DBUG_RETURN(0); // ok + /* + Walk through the list of tables that belong to the query and save the + requested access (orig_want_privilege) to be able to use it when + checking access rights to the underlying tables of a view. Our grant + system gradually eliminates checked bits from want_privilege and thus + after all checks are done we can no longer use it. + The check that first_not_own_table is not reached is for the case when + the given table list refers to the list for prelocking (contains tables + of other queries). For simple queries first_not_own_table is 0. + */ + for (i= 0, table= tables; + table != first_not_own_table && i < number; + table= table->next_global, i++) + { + /* Remove SHOW_VIEW_ACL, because it will be checked during making view */ + table->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL); + } rw_rdlock(&LOCK_grant); - for (table= tables; table && number--; table= table->next) + for (table= tables; + table && number-- && table != first_not_own_table; + table= table->next_global) { - if (!(~table->grant.privilege & want_access) || table->derived) + GRANT_TABLE *grant_table; + sctx = test(table->security_ctx) ? + table->security_ctx : thd->security_ctx; + + want_access= orig_want_access; + want_access&= ~sctx->master_access; + if (!want_access) + continue; // ok + + if (!(~table->grant.privilege & want_access) || + table->derived || table->schema_table) { - table->grant.want_privilege=0; - continue; // Already checked + /* + It is subquery in the FROM clause. VIEW set table->derived after + table opening, but this function always called before table opening. + */ + if (!table->referencing_view) + { + /* + If it's a temporary table created for a subquery in the FROM + clause, or an INFORMATION_SCHEMA table, drop the request for + a privilege. + */ + table->grant.want_privilege= 0; + } + continue; } - GRANT_TABLE *grant_table = table_hash_search(thd->host,thd->ip, - table->db,user, - table->real_name,0); - if (!grant_table) + if (!(grant_table= table_hash_search(sctx->host, sctx->ip, + table->db, sctx->priv_user, + table->table_name,0))) { want_access &= ~table->grant.privilege; goto err; // No grants @@ -2930,147 +3689,224 @@ err: { char command[128]; get_privilege_desc(command, sizeof(command), want_access); - net_printf(thd,ER_TABLEACCESS_DENIED_ERROR, - command, - thd->priv_user, - thd->host_or_ip, - table ? table->real_name : "unknown"); + my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), + command, + sctx->priv_user, + sctx->host_or_ip, + table ? table->table_name : "unknown"); } DBUG_RETURN(1); } -bool check_grant_column(THD *thd,TABLE *table, const char *name, - uint length, uint show_tables) +/* + Check column rights in given security context + + SYNOPSIS + check_grant_column() + thd thread handler + grant grant information structure + db_name db name + table_name table name + name column name + length column name length + sctx security context + + RETURN + FALSE OK + TRUE access denied +*/ + +bool check_grant_column(THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *name, uint length, Security_context *sctx) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; + ulong want_access= grant->want_privilege & ~grant->privilege; + DBUG_ENTER("check_grant_column"); + DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access)); - ulong want_access=table->grant.want_privilege; if (!want_access) - return 0; // Already checked + DBUG_RETURN(0); // Already checked rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->table_cache_key, - thd->priv_user, - table->real_name, 0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + grant->grant_table= + table_hash_search(sctx->host, sctx->ip, db_name, + sctx->priv_user, + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } - if (!(grant_table=table->grant.grant_table)) + if (!(grant_table= grant->grant_table)) goto err; /* purecov: deadcode */ grant_column=column_hash_search(grant_table, name, length); if (grant_column && !(~grant_column->rights & want_access)) { rw_unlock(&LOCK_grant); - return 0; - } -#ifdef NOT_USED - if (show_tables && (grant_column || table->grant.privilege & COL_ACLS)) - { - rw_unlock(&LOCK_grant); /* purecov: deadcode */ - return 0; /* purecov: deadcode */ + DBUG_RETURN(0); } -#endif - /* We must use my_printf_error() here! */ err: rw_unlock(&LOCK_grant); - if (!show_tables) + char command[128]; + get_privilege_desc(command, sizeof(command), want_access); + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + command, + sctx->priv_user, + sctx->host_or_ip, + name, + table_name); + DBUG_RETURN(1); +} + + +/* + Check the access right to a column depending on the type of table. + + SYNOPSIS + check_column_grant_in_table_ref() + thd thread handler + table_ref table reference where to check the field + name name of field to check + length length of name + + DESCRIPTION + Check the access rights to a column depending on the type of table + reference where the column is checked. The function provides a + generic interface to check column access rights that hides the + heterogeneity of the column representation - whether it is a view + or a stored table colum. + + RETURN + FALSE OK + TRUE access denied +*/ + +bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, + const char *name, uint length) +{ + GRANT_INFO *grant; + const char *db_name; + const char *table_name; + Security_context *sctx= test(table_ref->security_ctx) ? + table_ref->security_ctx : thd->security_ctx; + + if (table_ref->view || table_ref->field_translation) { - char command[128]; - get_privilege_desc(command, sizeof(command), want_access); - my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, - ER(ER_COLUMNACCESS_DENIED_ERROR), - MYF(0), - command, - thd->priv_user, - thd->host_or_ip, - name, - table ? table->real_name : "unknown"); + /* View or derived information schema table. */ + ulong view_privs; + grant= &(table_ref->grant); + db_name= table_ref->view_db.str; + table_name= table_ref->view_name.str; + if (table_ref->belong_to_view && + (thd->lex->sql_command == SQLCOM_SHOW_FIELDS || + thd->lex->sql_command == SQLCOM_SHOW_CREATE)) + { + view_privs= get_column_grant(thd, grant, db_name, table_name, name); + if (view_privs & VIEW_ANY_ACL) + { + table_ref->belong_to_view->allowed_show= TRUE; + return FALSE; + } + table_ref->belong_to_view->allowed_show= FALSE; + my_message(ER_VIEW_NO_EXPLAIN, ER(ER_VIEW_NO_EXPLAIN), MYF(0)); + return TRUE; + } } - return 1; + else + { + /* Normal or temporary table. */ + TABLE *table= table_ref->table; + grant= &(table->grant); + db_name= table->s->db; + table_name= table->s->table_name; + } + + if (grant->want_privilege) + return check_grant_column(thd, grant, db_name, table_name, name, + length, sctx); + else + return FALSE; + } -bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table) +bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, + const char* db_name, const char *table_name, + Field_iterator *fields) { + Security_context *sctx= thd->security_ctx; GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; - Field *field=0,**ptr; - want_access &= ~table->grant.privilege; + want_access &= ~grant->privilege; if (!want_access) return 0; // Already checked if (!grant_option) - { - field= table->field[0]; // To give a meaningful error message goto err2; - } rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->table_cache_key, - thd->priv_user, - table->real_name,0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + grant->grant_table= + table_hash_search(sctx->host, sctx->ip, db_name, + sctx->priv_user, + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } /* The following should always be true */ - if (!(grant_table=table->grant.grant_table)) + if (!(grant_table= grant->grant_table)) goto err; /* purecov: inspected */ - for (ptr=table->field; (field= *ptr) ; ptr++) + for (; !fields->end_of_fields(); fields->next()) { - grant_column=column_hash_search(grant_table, field->field_name, - (uint) strlen(field->field_name)); + const char *field_name= fields->name(); + grant_column= column_hash_search(grant_table, field_name, + (uint) strlen(field_name)); if (!grant_column || (~grant_column->rights & want_access)) goto err; } rw_unlock(&LOCK_grant); return 0; - /* We must use my_printf_error() here! */ err: rw_unlock(&LOCK_grant); err2: char command[128]; get_privilege_desc(command, sizeof(command), want_access); - my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, - ER(ER_COLUMNACCESS_DENIED_ERROR), - MYF(0), - command, - thd->priv_user, - thd->host_or_ip, - field ? field->field_name : "unknown", - table->real_name); + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + command, + sctx->priv_user, + sctx->host_or_ip, + fields->name(), + table_name); return 1; } /* Check if a user has the right to access a database - Access is accepted if he has a grant for any table in the database + Access is accepted if he has a grant for any table/routine in the database Return 1 if access is denied */ bool check_grant_db(THD *thd,const char *db) { + Security_context *sctx= thd->security_ctx; char helping [NAME_LEN+USERNAME_LENGTH+2]; uint len; - bool error=1; + bool error= 1; - len = (uint) (strmov(strmov(helping,thd->priv_user)+1,db)-helping)+ 1; + len= (uint) (strmov(strmov(helping, sctx->priv_user) + 1, db) - helping) + 1; rw_rdlock(&LOCK_grant); for (uint idx=0 ; idx < column_priv_hash.records ; idx++) @@ -3079,7 +3915,7 @@ bool check_grant_db(THD *thd,const char *db) idx); if (len < grant_table->key_length && !memcmp(grant_table->hash_key,helping,len) && - compare_hostname(&grant_table->host, thd->host, thd->ip)) + compare_hostname(&grant_table->host, sctx->host, sctx->ip)) { error=0; // Found match break; @@ -3089,6 +3925,109 @@ bool check_grant_db(THD *thd,const char *db) return error; } + +/**************************************************************************** + Check routine level grants + + SYNPOSIS + bool check_grant_routine() + thd Thread handler + want_access Bits of privileges user needs to have + procs List of routines to check. The user should have 'want_access' + is_proc True if the list is all procedures, else functions + no_errors If 0 then we write an error. The error is sent directly to + the client + + RETURN + 0 ok + 1 Error: User did not have the requested privielges +****************************************************************************/ + +bool check_grant_routine(THD *thd, ulong want_access, + TABLE_LIST *procs, bool is_proc, bool no_errors) +{ + TABLE_LIST *table; + Security_context *sctx= thd->security_ctx; + char *user= sctx->priv_user; + char *host= sctx->priv_host; + DBUG_ENTER("check_grant_routine"); + + want_access&= ~sctx->master_access; + if (!want_access) + DBUG_RETURN(0); // ok + + rw_rdlock(&LOCK_grant); + for (table= procs; table; table= table->next_global) + { + GRANT_NAME *grant_proc; + if ((grant_proc= routine_hash_search(host, sctx->ip, table->db, user, + table->table_name, is_proc, 0))) + table->grant.privilege|= grant_proc->privs; + + if (want_access & ~table->grant.privilege) + { + want_access &= ~table->grant.privilege; + goto err; + } + } + rw_unlock(&LOCK_grant); + DBUG_RETURN(0); +err: + rw_unlock(&LOCK_grant); + if (!no_errors) + { + char buff[1024]; + const char *command=""; + if (table) + strxmov(buff, table->db, ".", table->table_name, NullS); + if (want_access & EXECUTE_ACL) + command= "execute"; + else if (want_access & ALTER_PROC_ACL) + command= "alter routine"; + else if (want_access & GRANT_ACL) + command= "grant"; + my_error(ER_PROCACCESS_DENIED_ERROR, MYF(0), + command, user, host, table ? buff : "unknown"); + } + DBUG_RETURN(1); +} + + +/* + Check if routine has any of the + routine level grants + + SYNPOSIS + bool check_routine_level_acl() + thd Thread handler + db Database name + name Routine name + + RETURN + 0 Ok + 1 error +*/ + +bool check_routine_level_acl(THD *thd, const char *db, const char *name, + bool is_proc) +{ + bool no_routine_acl= 1; + if (grant_option) + { + GRANT_NAME *grant_proc; + Security_context *sctx= thd->security_ctx; + rw_rdlock(&LOCK_grant); + if ((grant_proc= routine_hash_search(sctx->priv_host, + sctx->ip, db, + sctx->priv_user, + name, is_proc, 0))) + no_routine_acl= !(grant_proc->privs & SHOW_PROC_ACLS); + rw_unlock(&LOCK_grant); + } + return no_routine_acl; +} + + /***************************************************************************** Functions to retrieve the grant for a table/column (for SHOW functions) *****************************************************************************/ @@ -3096,7 +4035,7 @@ bool check_grant_db(THD *thd,const char *db) ulong get_table_grant(THD *thd, TABLE_LIST *table) { ulong privilege; - char *user = thd->priv_user; + Security_context *sctx= thd->security_ctx; const char *db = table->db ? table->db : thd->db; GRANT_TABLE *grant_table; @@ -3104,8 +4043,8 @@ ulong get_table_grant(THD *thd, TABLE_LIST *table) #ifdef EMBEDDED_LIBRARY grant_table= NULL; #else - grant_table= table_hash_search(thd->host, thd->ip, db, user, - table->real_name, 0); + grant_table= table_hash_search(sctx->host, sctx->ip, db, sctx->priv_user, + table->table_name, 0); #endif table->grant.grant_table=grant_table; // Remember for column test table->grant.version=grant_version; @@ -3117,7 +4056,27 @@ ulong get_table_grant(THD *thd, TABLE_LIST *table) } -ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) +/* + Determine the access priviliges for a field. + + SYNOPSIS + get_column_grant() + thd thread handler + grant grants table descriptor + db_name name of database that the field belongs to + table_name name of table that the field belongs to + field_name name of field + + DESCRIPTION + The procedure may also modify: grant->grant_table and grant->version. + + RETURN + The access priviliges for the field db_name.table_name.field_name +*/ + +ulong get_column_grant(THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *field_name) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; @@ -3125,30 +4084,32 @@ ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->db, - thd->priv_user, - table->real_name,0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + Security_context *sctx= thd->security_ctx; + grant->grant_table= + table_hash_search(sctx->host, sctx->ip, + db_name, sctx->priv_user, + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } - if (!(grant_table=table->grant.grant_table)) - priv=table->grant.privilege; + if (!(grant_table= grant->grant_table)) + priv= grant->privilege; else { - grant_column=column_hash_search(grant_table, field->field_name, - (uint) strlen(field->field_name)); + grant_column= column_hash_search(grant_table, field_name, + (uint) strlen(field_name)); if (!grant_column) - priv=table->grant.privilege; + priv= (grant->privilege | grant_table->privs); else - priv=table->grant.privilege | grant_column->rights; + priv= (grant->privilege | grant_table->privs | grant_column->rights); } rw_unlock(&LOCK_grant); return priv; } + /* Help function for mysql_show_grants */ static void add_user_option(String *grant, ulong value, const char *name) @@ -3166,18 +4127,26 @@ static void add_user_option(String *grant, ulong value, const char *name) static const char *command_array[]= { - "SELECT", "INSERT","UPDATE","DELETE","CREATE", "DROP", "RELOAD","SHUTDOWN", - "PROCESS","FILE","GRANT","REFERENCES","INDEX", "ALTER", "SHOW DATABASES", - "SUPER", "CREATE TEMPORARY TABLES", "LOCK TABLES", "EXECUTE", - "REPLICATION SLAVE", "REPLICATION CLIENT", + "SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "RELOAD", + "SHUTDOWN", "PROCESS","FILE", "GRANT", "REFERENCES", "INDEX", + "ALTER", "SHOW DATABASES", "SUPER", "CREATE TEMPORARY TABLES", + "LOCK TABLES", "EXECUTE", "REPLICATION SLAVE", "REPLICATION CLIENT", + "CREATE VIEW", "SHOW VIEW", "CREATE ROUTINE", "ALTER ROUTINE", + "CREATE USER" }; static uint command_lengths[]= { - 6,6,6,6,6,4,6,8,7,4,5,10,5,5,14,5,23,11,7,17,18 + 6, 6, 6, 6, 6, 4, 6, 8, 7, 4, 5, 10, 5, 5, 14, 5, 23, 11, 7, 17, 18, 11, 9, + 14, 13, 11 }; +static int show_routine_grants(THD *thd, LEX_USER *lex_user, HASH *hash, + const char *type, int typelen, + char *buff, int buffsize); + + /* SHOW GRANTS; Send grants for a user to the client @@ -3185,7 +4154,7 @@ static uint command_lengths[]= Send to client grant-like strings depicting user@host privileges */ -int mysql_show_grants(THD *thd,LEX_USER *lex_user) +bool mysql_show_grants(THD *thd,LEX_USER *lex_user) { ulong want_access; uint counter,index; @@ -3200,44 +4169,21 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (!initialized) { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); - DBUG_RETURN(-1); - } - - if (!lex_user->host.str) - { - lex_user->host.str= (char*) "%"; - lex_user->host.length=1; - } - if (lex_user->host.length > HOSTNAME_LENGTH || - lex_user->user.length > USERNAME_LENGTH) - { - my_error(ER_GRANT_WRONG_HOST_OR_USER,MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } rw_rdlock(&LOCK_grant); VOID(pthread_mutex_lock(&acl_cache->lock)); - for (counter=0 ; counter < acl_users.elements ; counter++) - { - const char *user,*host; - acl_user=dynamic_element(&acl_users,counter,ACL_USER*); - if (!(user=acl_user->user)) - user= ""; - if (!(host=acl_user->host.hostname)) - host= ""; - if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(system_charset_info, lex_user->host.str, host)) - break; - } - if (counter == acl_users.elements) + acl_user= find_acl_user(lex_user->host.str, lex_user->user.str, TRUE); + if (!acl_user) { VOID(pthread_mutex_unlock(&acl_cache->lock)); rw_unlock(&LOCK_grant); my_error(ER_NONEXISTING_GRANT, MYF(0), lex_user->user.str, lex_user->host.str); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } Item_string *field=new Item_string("",0,&my_charset_latin1); @@ -3247,25 +4193,26 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) strxmov(buff,"Grants for ",lex_user->user.str,"@", lex_user->host.str,NullS); field_list.push_back(field); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) { VOID(pthread_mutex_unlock(&acl_cache->lock)); rw_unlock(&LOCK_grant); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } /* Add first global access grants */ { String global(buff,sizeof(buff),system_charset_info); global.length(0); - global.append("GRANT ",6); + global.append(STRING_WITH_LEN("GRANT ")); want_access= acl_user->access; if (test_all_bits(want_access, (GLOBAL_ACLS & ~ GRANT_ACL))) - global.append("ALL PRIVILEGES",14); + global.append(STRING_WITH_LEN("ALL PRIVILEGES")); else if (!(want_access & ~GRANT_ACL)) - global.append("USAGE",5); + global.append(STRING_WITH_LEN("USAGE")); else { bool found=0; @@ -3275,17 +4222,18 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (test_access & j) { if (found) - global.append(", ",2); + global.append(STRING_WITH_LEN(", ")); found=1; global.append(command_array[counter],command_lengths[counter]); } } } - global.append (" ON *.* TO '",12); + global.append (STRING_WITH_LEN(" ON *.* TO '")); global.append(lex_user->user.str, lex_user->user.length, system_charset_info); - global.append ("'@'",3); - global.append(lex_user->host.str,lex_user->host.length); + global.append (STRING_WITH_LEN("'@'")); + global.append(lex_user->host.str,lex_user->host.length, + system_charset_info); global.append ('\''); if (acl_user->salt_len) { @@ -3294,23 +4242,23 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) make_password_from_salt(passwd_buff, acl_user->salt); else make_password_from_salt_323(passwd_buff, (ulong *) acl_user->salt); - global.append(" IDENTIFIED BY PASSWORD '",25); + global.append(STRING_WITH_LEN(" IDENTIFIED BY PASSWORD '")); global.append(passwd_buff); global.append('\''); } /* "show grants" SSL related stuff */ if (acl_user->ssl_type == SSL_TYPE_ANY) - global.append(" REQUIRE SSL",12); + global.append(STRING_WITH_LEN(" REQUIRE SSL")); else if (acl_user->ssl_type == SSL_TYPE_X509) - global.append(" REQUIRE X509",13); + global.append(STRING_WITH_LEN(" REQUIRE X509")); else if (acl_user->ssl_type == SSL_TYPE_SPECIFIED) { int ssl_options = 0; - global.append(" REQUIRE ",9); + global.append(STRING_WITH_LEN(" REQUIRE ")); if (acl_user->x509_issuer) { ssl_options++; - global.append("ISSUER \'",8); + global.append(STRING_WITH_LEN("ISSUER \'")); global.append(acl_user->x509_issuer,strlen(acl_user->x509_issuer)); global.append('\''); } @@ -3318,32 +4266,38 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) { if (ssl_options++) global.append(' '); - global.append("SUBJECT \'",9); - global.append(acl_user->x509_subject,strlen(acl_user->x509_subject)); + global.append(STRING_WITH_LEN("SUBJECT \'")); + global.append(acl_user->x509_subject,strlen(acl_user->x509_subject), + system_charset_info); global.append('\''); } if (acl_user->ssl_cipher) { if (ssl_options++) global.append(' '); - global.append("CIPHER '",8); - global.append(acl_user->ssl_cipher,strlen(acl_user->ssl_cipher)); + global.append(STRING_WITH_LEN("CIPHER '")); + global.append(acl_user->ssl_cipher,strlen(acl_user->ssl_cipher), + system_charset_info); global.append('\''); } } if ((want_access & GRANT_ACL) || - (acl_user->user_resource.questions | acl_user->user_resource.updates | - acl_user->user_resource.connections)) + (acl_user->user_resource.questions || + acl_user->user_resource.updates || + acl_user->user_resource.conn_per_hour || + acl_user->user_resource.user_conn)) { - global.append(" WITH",5); + global.append(STRING_WITH_LEN(" WITH")); if (want_access & GRANT_ACL) - global.append(" GRANT OPTION",13); + global.append(STRING_WITH_LEN(" GRANT OPTION")); add_user_option(&global, acl_user->user_resource.questions, "MAX_QUERIES_PER_HOUR"); add_user_option(&global, acl_user->user_resource.updates, "MAX_UPDATES_PER_HOUR"); - add_user_option(&global, acl_user->user_resource.connections, + add_user_option(&global, acl_user->user_resource.conn_per_hour, "MAX_CONNECTIONS_PER_HOUR"); + add_user_option(&global, acl_user->user_resource.user_conn, + "MAX_USER_CONNECTIONS"); } protocol->prepare_for_resend(); protocol->store(global.ptr(),global.length(),global.charset()); @@ -3373,12 +4327,12 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) { String db(buff,sizeof(buff),system_charset_info); db.length(0); - db.append("GRANT ",6); + db.append(STRING_WITH_LEN("GRANT ")); if (test_all_bits(want_access,(DB_ACLS & ~GRANT_ACL))) - db.append("ALL PRIVILEGES",14); + db.append(STRING_WITH_LEN("ALL PRIVILEGES")); else if (!(want_access & ~GRANT_ACL)) - db.append("USAGE",5); + db.append(STRING_WITH_LEN("USAGE")); else { int found=0, cnt; @@ -3388,22 +4342,23 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (test_access & j) { if (found) - db.append(", ",2); + db.append(STRING_WITH_LEN(", ")); found = 1; db.append(command_array[cnt],command_lengths[cnt]); } } } - db.append (" ON ",4); + db.append (STRING_WITH_LEN(" ON ")); append_identifier(thd, &db, acl_db->db, strlen(acl_db->db)); - db.append (".* TO '",7); + db.append (STRING_WITH_LEN(".* TO '")); db.append(lex_user->user.str, lex_user->user.length, system_charset_info); - db.append ("'@'",3); - db.append(lex_user->host.str, lex_user->host.length); + db.append (STRING_WITH_LEN("'@'")); + db.append(lex_user->host.str, lex_user->host.length, + system_charset_info); db.append ('\''); if (want_access & GRANT_ACL) - db.append(" WITH GRANT OPTION",18); + db.append(STRING_WITH_LEN(" WITH GRANT OPTION")); protocol->prepare_for_resend(); protocol->store(db.ptr(),db.length(),db.charset()); if (protocol->write()) @@ -3418,16 +4373,17 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) /* Add table & column access */ for (index=0 ; index < column_priv_hash.records ; index++) { - const char *user; + const char *user, *host; GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, index); if (!(user=grant_table->user)) user= ""; + if (!(host= grant_table->host.hostname)) + host= ""; if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(system_charset_info, lex_user->host.str, - grant_table->host.hostname)) + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) { ulong table_access= grant_table->privs; if ((table_access | grant_table->cols) != 0) @@ -3436,12 +4392,12 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) ulong test_access= (table_access | grant_table->cols) & ~GRANT_ACL; global.length(0); - global.append("GRANT ",6); + global.append(STRING_WITH_LEN("GRANT ")); if (test_all_bits(table_access, (TABLE_ACLS & ~GRANT_ACL))) - global.append("ALL PRIVILEGES",14); + global.append(STRING_WITH_LEN("ALL PRIVILEGES")); else if (!test_access) - global.append("USAGE",5); + global.append(STRING_WITH_LEN("USAGE")); else { /* Add specific column access */ @@ -3453,7 +4409,7 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (test_access & j) { if (found) - global.append(", ",2); + global.append(STRING_WITH_LEN(", ")); found= 1; global.append(command_array[counter],command_lengths[counter]); @@ -3477,14 +4433,14 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) */ if (table_access & j) { - global.append(", ", 2); + global.append(STRING_WITH_LEN(", ")); global.append(command_array[counter], command_lengths[counter]); } - global.append(" (",2); + global.append(STRING_WITH_LEN(" (")); } else - global.append(", ",2); + global.append(STRING_WITH_LEN(", ")); global.append(grant_column->column, grant_column->key_length, system_charset_info); @@ -3496,20 +4452,21 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) } } } - global.append(" ON ",4); + global.append(STRING_WITH_LEN(" ON ")); append_identifier(thd, &global, grant_table->db, strlen(grant_table->db)); global.append('.'); append_identifier(thd, &global, grant_table->tname, strlen(grant_table->tname)); - global.append(" TO '",5); + global.append(STRING_WITH_LEN(" TO '")); global.append(lex_user->user.str, lex_user->user.length, system_charset_info); - global.append("'@'",3); - global.append(lex_user->host.str,lex_user->host.length); + global.append(STRING_WITH_LEN("'@'")); + global.append(lex_user->host.str,lex_user->host.length, + system_charset_info); global.append('\''); if (table_access & GRANT_ACL) - global.append(" WITH GRANT OPTION",18); + global.append(STRING_WITH_LEN(" WITH GRANT OPTION")); protocol->prepare_for_resend(); protocol->store(global.ptr(),global.length(),global.charset()); if (protocol->write()) @@ -3520,6 +4477,21 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) } } } + + if (show_routine_grants(thd, lex_user, &proc_priv_hash, + STRING_WITH_LEN("PROCEDURE"), buff, sizeof(buff))) + { + error= -1; + goto end; + } + + if (show_routine_grants(thd, lex_user, &func_priv_hash, + STRING_WITH_LEN("FUNCTION"), buff, sizeof(buff))) + { + error= -1; + goto end; + } + end: VOID(pthread_mutex_unlock(&acl_cache->lock)); rw_unlock(&LOCK_grant); @@ -3528,6 +4500,84 @@ end: DBUG_RETURN(error); } +static int show_routine_grants(THD* thd, LEX_USER *lex_user, HASH *hash, + const char *type, int typelen, + char *buff, int buffsize) +{ + uint counter, index; + int error= 0; + Protocol *protocol= thd->protocol; + /* Add routine access */ + for (index=0 ; index < hash->records ; index++) + { + const char *user, *host; + GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, index); + + if (!(user=grant_proc->user)) + user= ""; + if (!(host= grant_proc->host.hostname)) + host= ""; + + if (!strcmp(lex_user->user.str,user) && + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) + { + ulong proc_access= grant_proc->privs; + if (proc_access != 0) + { + String global(buff, buffsize, system_charset_info); + ulong test_access= proc_access & ~GRANT_ACL; + + global.length(0); + global.append(STRING_WITH_LEN("GRANT ")); + + if (!test_access) + global.append(STRING_WITH_LEN("USAGE")); + else + { + /* Add specific procedure access */ + int found= 0; + ulong j; + + for (counter= 0, j= SELECT_ACL; j <= PROC_ACLS; counter++, j<<= 1) + { + if (test_access & j) + { + if (found) + global.append(STRING_WITH_LEN(", ")); + found= 1; + global.append(command_array[counter],command_lengths[counter]); + } + } + } + global.append(STRING_WITH_LEN(" ON ")); + global.append(type,typelen); + global.append(' '); + append_identifier(thd, &global, grant_proc->db, + strlen(grant_proc->db)); + global.append('.'); + append_identifier(thd, &global, grant_proc->tname, + strlen(grant_proc->tname)); + global.append(STRING_WITH_LEN(" TO '")); + global.append(lex_user->user.str, lex_user->user.length, + system_charset_info); + global.append(STRING_WITH_LEN("'@'")); + global.append(lex_user->host.str,lex_user->host.length, + system_charset_info); + global.append('\''); + if (proc_access & GRANT_ACL) + global.append(STRING_WITH_LEN(" WITH GRANT OPTION")); + protocol->prepare_for_resend(); + protocol->store(global.ptr(),global.length(),global.charset()); + if (protocol->write()) + { + error= -1; + break; + } + } + } + } + return error; +} /* Make a clear-text version of the requested privilege. @@ -3571,28 +4621,53 @@ void get_mqh(const char *user, const char *host, USER_CONN *uc) pthread_mutex_unlock(&acl_cache->lock); } +/* + Open the grant tables. + + SYNOPSIS + open_grant_tables() + thd The current thread. + tables (out) The 4 elements array for the opened tables. + + DESCRIPTION + Tables are numbered as follows: + 0 user + 1 db + 2 tables_priv + 3 columns_priv + + RETURN + 1 Skip GRANT handling during replication. + 0 OK. + < 0 Error. +*/ + +#define GRANT_TABLES 5 int open_grant_tables(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("open_grant_tables"); if (!initialized) { - net_printf(thd,ER_OPTION_PREVENTS_STATEMENT, "--skip-grant-tables"); + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); DBUG_RETURN(-1); } - bzero((char*) tables, 4*sizeof(*tables)); - tables->alias= tables->real_name= (char*) "user"; - (tables+1)->alias= (tables+1)->real_name= (char*) "db"; - (tables+2)->alias= (tables+2)->real_name= (char*) "tables_priv"; - (tables+3)->alias= (tables+3)->real_name= (char*) "columns_priv"; - tables->next= tables+1; - (tables+1)->next= tables+2; - (tables+2)->next= tables+3; - (tables+3)->next= 0; + bzero((char*) tables, GRANT_TABLES*sizeof(*tables)); + tables->alias= tables->table_name= (char*) "user"; + (tables+1)->alias= (tables+1)->table_name= (char*) "db"; + (tables+2)->alias= (tables+2)->table_name= (char*) "tables_priv"; + (tables+3)->alias= (tables+3)->table_name= (char*) "columns_priv"; + (tables+4)->alias= (tables+4)->table_name= (char*) "procs_priv"; + tables->next_local= tables->next_global= tables+1; + (tables+1)->next_local= (tables+1)->next_global= tables+2; + (tables+2)->next_local= (tables+2)->next_global= tables+3; + (tables+3)->next_local= (tables+3)->next_global= tables+4; tables->lock_type= (tables+1)->lock_type= - (tables+2)->lock_type= (tables+3)->lock_type= TL_WRITE; - tables->db= (tables+1)->db= (tables+2)->db= (tables+3)->db=(char*) "mysql"; + (tables+2)->lock_type= (tables+3)->lock_type= + (tables+4)->lock_type= TL_WRITE; + tables->db= (tables+1)->db= (tables+2)->db= + (tables+3)->db= (tables+4)->db= (char*) "mysql"; #ifdef HAVE_REPLICATION /* @@ -3605,10 +4680,12 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables) The tables must be marked "updating" so that tables_ok() takes them into account in tests. */ - tables[0].updating=tables[1].updating=tables[2].updating=tables[3].updating=1; - if (!tables_ok(0, tables)) + tables[0].updating=tables[1].updating=tables[2].updating= + tables[3].updating=tables[4].updating=1; + if (!tables_ok(thd, tables)) DBUG_RETURN(1); - tables[0].updating=tables[1].updating=tables[2].updating=tables[3].updating=0; + tables[0].updating=tables[1].updating=tables[2].updating= + tables[3].updating=tables[4].updating=0;; } #endif @@ -3648,145 +4725,769 @@ ACL_USER *check_acl_user(LEX_USER *user_name, return acl_user; } +/* + Modify a privilege table. + + SYNOPSIS + modify_grant_table() + table The table to modify. + host_field The host name field. + user_field The user name field. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Update user/host in the current record if user_to is not NULL. + Delete the current record if user_to is NULL. -int mysql_drop_user(THD *thd, List <LEX_USER> &list) + RETURN + 0 OK. + != 0 Error. +*/ + +static int modify_grant_table(TABLE *table, Field *host_field, + Field *user_field, LEX_USER *user_to) { - uint counter, acl_userd; - int result; - ACL_USER *acl_user; - ACL_DB *acl_db; - TABLE_LIST tables[4]; + int error; + DBUG_ENTER("modify_grant_table"); - DBUG_ENTER("mysql_drop_user"); + if (user_to) + { + /* rename */ + store_record(table, record[1]); + host_field->store(user_to->host.str, user_to->host.length, + system_charset_info); + user_field->store(user_to->user.str, user_to->user.length, + system_charset_info); + if ((error= table->file->update_row(table->record[1], table->record[0]))) + table->file->print_error(error, MYF(0)); + } + else + { + /* delete */ + if ((error=table->file->delete_row(table->record[0]))) + table->file->print_error(error, MYF(0)); + } - if ((result= open_grant_tables(thd, tables))) - DBUG_RETURN(result == 1 ? 0 : 1); + DBUG_RETURN(error); +} - rw_wrlock(&LOCK_grant); - VOID(pthread_mutex_lock(&acl_cache->lock)); +/* + Handle a privilege table. - LEX_USER *user_name; - List_iterator <LEX_USER> user_list(list); - while ((user_name=user_list++)) + SYNOPSIS + handle_grant_table() + tables The array with the four open tables. + table_no The number of the table to handle (0..4). + drop If user_from is to be dropped. + user_from The the user to be searched/dropped/renamed. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Scan through all records in a grant table and apply the requested + operation. For the "user" table, a single index access is sufficient, + since there is an unique index on (host, user). + Delete from grant table if drop is true. + Update in grant table if drop is false and user_to is not NULL. + Search in grant table if drop is false and user_to is NULL. + Tables are numbered as follows: + 0 user + 1 db + 2 tables_priv + 3 columns_priv + 4 procs_priv + + RETURN + > 0 At least one record matched. + 0 OK, but no record matched. + < 0 Error. +*/ + +static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, + LEX_USER *user_from, LEX_USER *user_to) +{ + int result= 0; + int error; + TABLE *table= tables[table_no].table; + Field *host_field= table->field[0]; + Field *user_field= table->field[table_no ? 2 : 1]; + char *host_str= user_from->host.str; + char *user_str= user_from->user.str; + const char *host; + const char *user; + byte user_key[MAX_KEY_LENGTH]; + uint key_prefix_length; + DBUG_ENTER("handle_grant_table"); + + if (! table_no) // mysql.user table { - if (!(acl_user= check_acl_user(user_name, &counter))) + /* + The 'user' table has an unique index on (host, user). + Thus, we can handle everything with a single index access. + The host- and user fields are consecutive in the user table records. + So we set host- and user fields of table->record[0] and use the + pointer to the host field as key. + index_read_idx() will replace table->record[0] (its first argument) + by the searched record, if it exists. + */ + DBUG_PRINT("info",("read table: '%s' search: '%s'@'%s'", + table->s->table_name, user_str, host_str)); + host_field->store(host_str, user_from->host.length, system_charset_info); + user_field->store(user_str, user_from->user.length, system_charset_info); + + key_prefix_length= (table->key_info->key_part[0].store_length + + table->key_info->key_part[1].store_length); + key_copy(user_key, table->record[0], table->key_info, key_prefix_length); + + if ((error= table->file->index_read_idx(table->record[0], 0, + user_key, key_prefix_length, + HA_READ_KEY_EXACT))) { - result= -1; - continue; + if (error != HA_ERR_KEY_NOT_FOUND) + { + table->file->print_error(error, MYF(0)); + result= -1; + } } - if ((acl_user->access & ~0)) + else + { + /* If requested, delete or update the record. */ + result= ((drop || user_to) && + modify_grant_table(table, host_field, user_field, user_to)) ? + -1 : 1; /* Error or found. */ + } + DBUG_PRINT("info",("read result: %d", result)); + } + else + { + /* + The non-'user' table do not have indexes on (host, user). + And their host- and user fields are not consecutive. + Thus, we need to do a table scan to find all matching records. + */ + if ((error= table->file->ha_rnd_init(1))) { + table->file->print_error(error, MYF(0)); result= -1; - continue; } - acl_userd= counter; + else + { +#ifdef EXTRA_DEBUG + DBUG_PRINT("info",("scan table: '%s' search: '%s'@'%s'", + table->s->table_name, user_str, host_str)); +#endif + while ((error= table->file->rnd_next(table->record[0])) != + HA_ERR_END_OF_FILE) + { + if (error) + { + /* Most probable 'deleted record'. */ + DBUG_PRINT("info",("scan error: %d", error)); + continue; + } + if (! (host= get_field(&mem, host_field))) + host= ""; + if (! (user= get_field(&mem, user_field))) + user= ""; + +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan fields: '%s'@'%s' '%s' '%s' '%s'", + user, host, + get_field(&mem, table->field[1]) /*db*/, + get_field(&mem, table->field[3]) /*table*/, + get_field(&mem, table->field[4]) /*column*/)); +#endif + if (strcmp(user_str, user) || + my_strcasecmp(system_charset_info, host_str, host)) + continue; + + /* If requested, delete or update the record. */ + result= ((drop || user_to) && + modify_grant_table(table, host_field, user_field, user_to)) ? + -1 : result ? result : 1; /* Error or keep result or found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + break ; + } + (void) table->file->ha_rnd_end(); + DBUG_PRINT("info",("scan result: %d", result)); + } + } - for (counter= 0 ; counter < acl_dbs.elements ; counter++) + DBUG_RETURN(result); +} + + +/* + Handle an in-memory privilege structure. + + SYNOPSIS + handle_grant_struct() + struct_no The number of the structure to handle (0..3). + drop If user_from is to be dropped. + user_from The the user to be searched/dropped/renamed. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Scan through all elements in an in-memory grant structure and apply + the requested operation. + Delete from grant structure if drop is true. + Update in grant structure if drop is false and user_to is not NULL. + Search in grant structure if drop is false and user_to is NULL. + Structures are numbered as follows: + 0 acl_users + 1 acl_dbs + 2 column_priv_hash + 3 procs_priv_hash + + RETURN + > 0 At least one element matched. + 0 OK, but no element matched. + -1 Wrong arguments to function +*/ + +static int handle_grant_struct(uint struct_no, bool drop, + LEX_USER *user_from, LEX_USER *user_to) +{ + int result= 0; + uint idx; + uint elements; + const char *user; + const char *host; + ACL_USER *acl_user; + ACL_DB *acl_db; + GRANT_NAME *grant_name; + DBUG_ENTER("handle_grant_struct"); + DBUG_PRINT("info",("scan struct: %u search: '%s'@'%s'", + struct_no, user_from->user.str, user_from->host.str)); + + LINT_INIT(acl_user); + LINT_INIT(acl_db); + LINT_INIT(grant_name); + + safe_mutex_assert_owner(&acl_cache->lock); + + /* Get the number of elements in the in-memory structure. */ + switch (struct_no) { + case 0: + elements= acl_users.elements; + break; + case 1: + elements= acl_dbs.elements; + break; + case 2: + elements= column_priv_hash.records; + break; + case 3: + elements= proc_priv_hash.records; + break; + default: + return -1; + } + +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan struct: %u search user: '%s' host: '%s'", + struct_no, user_from->user.str, user_from->host.str)); +#endif + /* Loop over all elements. */ + for (idx= 0; idx < elements; idx++) + { + /* + Get a pointer to the element. + */ + switch (struct_no) { + case 0: + acl_user= dynamic_element(&acl_users, idx, ACL_USER*); + user= acl_user->user; + host= acl_user->host.hostname; + break; + + case 1: + acl_db= dynamic_element(&acl_dbs, idx, ACL_DB*); + user= acl_db->user; + host= acl_db->host.hostname; + break; + + case 2: + grant_name= (GRANT_NAME*) hash_element(&column_priv_hash, idx); + user= grant_name->user; + host= grant_name->host.hostname; + break; + + case 3: + grant_name= (GRANT_NAME*) hash_element(&proc_priv_hash, idx); + user= grant_name->user; + host= grant_name->host.hostname; + break; + default: + assert(0); + } + if (! user) + user= ""; + if (! host) + host= ""; + +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan struct: %u index: %u user: '%s' host: '%s'", + struct_no, idx, user, host)); +#endif + if (strcmp(user_from->user.str, user) || + my_strcasecmp(system_charset_info, user_from->host.str, host)) + continue; + + result= 1; /* At least one element found. */ + if ( drop ) { - const char *user,*host; - acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); - if (!(user= acl_db->user)) - user= ""; - if (!(host= acl_db->host.hostname)) - host= ""; + switch ( struct_no ) + { + case 0: + delete_dynamic_element(&acl_users, idx); + break; + + case 1: + delete_dynamic_element(&acl_dbs, idx); + break; + + case 2: + hash_delete(&column_priv_hash, (byte*) grant_name); + break; - if (!strcmp(user_name->user.str,user) && - !my_strcasecmp(system_charset_info, user_name->host.str, host)) + case 3: + hash_delete(&proc_priv_hash, (byte*) grant_name); break; + } + elements--; + idx--; } - if (counter != acl_dbs.elements) + else if ( user_to ) { - result= -1; - continue; + switch ( struct_no ) { + case 0: + acl_user->user= strdup_root(&mem, user_to->user.str); + acl_user->host.hostname= strdup_root(&mem, user_to->host.str); + break; + + case 1: + acl_db->user= strdup_root(&mem, user_to->user.str); + acl_db->host.hostname= strdup_root(&mem, user_to->host.str); + break; + + case 2: + case 3: + grant_name->user= strdup_root(&mem, user_to->user.str); + update_hostname(&grant_name->host, + strdup_root(&mem, user_to->host.str)); + break; + } + } + else + { + /* If search is requested, we do not need to search further. */ + break; + } + } +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan struct: %u result %d", struct_no, result)); +#endif + + DBUG_RETURN(result); +} + + +/* + Handle all privilege tables and in-memory privilege structures. + + SYNOPSIS + handle_grant_data() + tables The array with the four open tables. + drop If user_from is to be dropped. + user_from The the user to be searched/dropped/renamed. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Go through all grant tables and in-memory grant structures and apply + the requested operation. + Delete from grant data if drop is true. + Update in grant data if drop is false and user_to is not NULL. + Search in grant data if drop is false and user_to is NULL. + + RETURN + > 0 At least one element matched. + 0 OK, but no element matched. + < 0 Error. +*/ + +static int handle_grant_data(TABLE_LIST *tables, bool drop, + LEX_USER *user_from, LEX_USER *user_to) +{ + int result= 0; + int found; + DBUG_ENTER("handle_grant_data"); + + /* Handle user table. */ + if ((found= handle_grant_table(tables, 0, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch the in-memory array. */ + result= -1; + } + else + { + /* Handle user array. */ + if ((handle_grant_struct(0, drop, user_from, user_to) && ! result) || + found) + { + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; } + } - for (counter= 0 ; counter < column_priv_hash.records ; counter++) + /* Handle db table. */ + if ((found= handle_grant_table(tables, 1, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch the in-memory array. */ + result= -1; + } + else + { + /* Handle db array. */ + if (((handle_grant_struct(1, drop, user_from, user_to) && ! result) || + found) && ! result) { - const char *user,*host; - GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, - counter); - if (!(user=grant_table->user)) - user= ""; - if (!(host=grant_table->host.hostname)) - host= ""; + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; + } + } - if (!strcmp(user_name->user.str,user) && - !my_strcasecmp(system_charset_info, user_name->host.str, host)) - break; + /* Handle procedures table. */ + if ((found= handle_grant_table(tables, 4, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch in-memory array. */ + result= -1; + } + else + { + /* Handle procs array. */ + if (((handle_grant_struct(3, drop, user_from, user_to) && ! result) || + found) && ! result) + { + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; } - if (counter != column_priv_hash.records) + } + + /* Handle tables table. */ + if ((found= handle_grant_table(tables, 2, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch columns and in-memory array. */ + result= -1; + } + else + { + if (found && ! result) + { + result= 1; /* At least one record found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; + } + + /* Handle columns table. */ + if ((found= handle_grant_table(tables, 3, drop, user_from, user_to)) < 0) { + /* Handle of table failed, don't touch the in-memory array. */ result= -1; + } + else + { + /* Handle columns hash. */ + if (((handle_grant_struct(2, drop, user_from, user_to) && ! result) || + found) && ! result) + result= 1; /* At least one record/element found. */ + } + } + end: + DBUG_RETURN(result); +} + + +static void append_user(String *str, LEX_USER *user) +{ + if (str->length()) + str->append(','); + str->append('\''); + str->append(user->user.str); + str->append(STRING_WITH_LEN("'@'")); + str->append(user->host.str); + str->append('\''); +} + + +/* + Create a list of users. + + SYNOPSIS + mysql_create_user() + thd The current thread. + list The users to create. + + RETURN + FALSE OK. + TRUE Error. +*/ + +bool mysql_create_user(THD *thd, List <LEX_USER> &list) +{ + int result; + String wrong_users; + ulong sql_mode; + LEX_USER *user_name, *tmp_user_name; + List_iterator <LEX_USER> user_list(list); + TABLE_LIST tables[GRANT_TABLES]; + DBUG_ENTER("mysql_create_user"); + + /* CREATE USER may be skipped on replication client. */ + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + while ((tmp_user_name= user_list++)) + { + if (!(user_name= get_current_user(thd, tmp_user_name))) + { + result= TRUE; continue; } - tables[0].table->field[0]->store(user_name->host.str,(uint) - user_name->host.length, - system_charset_info); - tables[0].table->field[1]->store(user_name->user.str,(uint) - user_name->user.length, - system_charset_info); - tables[0].table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (!tables[0].table->file->index_read_idx(tables[0].table->record[0],0, - (byte*) tables[0].table-> - field[0]->ptr, - tables[0].table-> - key_info[0].key_length, - HA_READ_KEY_EXACT)) - { - int error; - if ((error = tables[0].table->file->delete_row(tables[0].table-> - record[0]))) - { - tables[0].table->file->print_error(error, MYF(0)); - result= -1; - goto end; - } - delete_dynamic_element(&acl_users, acl_userd); + /* + Search all in-memory structures and grant tables + for a mention of the new user name. + */ + if (handle_grant_data(tables, 0, user_name, NULL)) + { + append_user(&wrong_users, user_name); + result= TRUE; + continue; + } + + sql_mode= thd->variables.sql_mode; + if (replace_user_table(thd, tables[0].table, *user_name, 0, 0, 1, 0)) + { + append_user(&wrong_users, user_name); + result= TRUE; } } + VOID(pthread_mutex_unlock(&acl_cache->lock)); + + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + + rw_unlock(&LOCK_grant); + close_thread_tables(thd); if (result) - my_error(ER_DROP_USER, MYF(0)); + my_error(ER_CANNOT_USER, MYF(0), "CREATE USER", wrong_users.c_ptr_safe()); + DBUG_RETURN(result); +} -end: - /* Reload acl_check_hosts as its memory is mapped to acl_user */ - delete_dynamic(&acl_wild_hosts); - hash_free(&acl_check_hosts); - init_check_host(); + +/* + Drop a list of users and all their privileges. + + SYNOPSIS + mysql_drop_user() + thd The current thread. + list The users to drop. + + RETURN + FALSE OK. + TRUE Error. +*/ + +bool mysql_drop_user(THD *thd, List <LEX_USER> &list) +{ + int result; + String wrong_users; + LEX_USER *user_name, *tmp_user_name; + List_iterator <LEX_USER> user_list(list); + TABLE_LIST tables[GRANT_TABLES]; + DBUG_ENTER("mysql_drop_user"); + + /* DROP USER may be skipped on replication client. */ + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + while ((tmp_user_name= user_list++)) + { + user_name= get_current_user(thd, tmp_user_name); + if (!(user_name= get_current_user(thd, tmp_user_name))) + { + result= TRUE; + continue; + } + if (handle_grant_data(tables, 1, user_name, NULL) <= 0) + { + append_user(&wrong_users, user_name); + result= TRUE; + } + } + + /* Rebuild 'acl_check_hosts' since 'acl_users' has been modified */ + rebuild_check_host(); VOID(pthread_mutex_unlock(&acl_cache->lock)); + + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + rw_unlock(&LOCK_grant); close_thread_tables(thd); + if (result) + my_error(ER_CANNOT_USER, MYF(0), "DROP USER", wrong_users.c_ptr_safe()); DBUG_RETURN(result); } -int mysql_revoke_all(THD *thd, List <LEX_USER> &list) + +/* + Rename a user. + + SYNOPSIS + mysql_rename_user() + thd The current thread. + list The user name pairs: (from, to). + + RETURN + FALSE OK. + TRUE Error. +*/ + +bool mysql_rename_user(THD *thd, List <LEX_USER> &list) { - uint counter, revoked; + int result; + String wrong_users; + LEX_USER *user_from, *tmp_user_from; + LEX_USER *user_to, *tmp_user_to; + List_iterator <LEX_USER> user_list(list); + TABLE_LIST tables[GRANT_TABLES]; + DBUG_ENTER("mysql_rename_user"); + + /* RENAME USER may be skipped on replication client. */ + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + while ((tmp_user_from= user_list++)) + { + if (!(user_from= get_current_user(thd, tmp_user_from))) + { + result= TRUE; + continue; + } + tmp_user_to= user_list++; + if (!(user_to= get_current_user(thd, tmp_user_to))) + { + result= TRUE; + continue; + } + DBUG_ASSERT(user_to != 0); /* Syntax enforces pairs of users. */ + + /* + Search all in-memory structures and grant tables + for a mention of the new user name. + */ + if (handle_grant_data(tables, 0, user_to, NULL) || + handle_grant_data(tables, 0, user_from, user_to) <= 0) + { + append_user(&wrong_users, user_from); + result= TRUE; + } + } + + /* Rebuild 'acl_check_hosts' since 'acl_users' has been modified */ + rebuild_check_host(); + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + + rw_unlock(&LOCK_grant); + close_thread_tables(thd); + if (result) + my_error(ER_CANNOT_USER, MYF(0), "RENAME USER", wrong_users.c_ptr_safe()); + DBUG_RETURN(result); +} + + +/* + Revoke all privileges from a list of users. + + SYNOPSIS + mysql_revoke_all() + thd The current thread. + list The users to revoke all privileges from. + + RETURN + > 0 Error. Error message already sent. + 0 OK. + < 0 Error. Error message not yet sent. +*/ + +bool mysql_revoke_all(THD *thd, List <LEX_USER> &list) +{ + uint counter, revoked, is_proc; int result; ACL_DB *acl_db; - TABLE_LIST tables[4]; + TABLE_LIST tables[GRANT_TABLES]; DBUG_ENTER("mysql_revoke_all"); if ((result= open_grant_tables(thd, tables))) - DBUG_RETURN(result == 1 ? 0 : 1); + DBUG_RETURN(result != 1); rw_wrlock(&LOCK_grant); VOID(pthread_mutex_lock(&acl_cache->lock)); - LEX_USER *lex_user; + LEX_USER *lex_user, *tmp_lex_user; List_iterator <LEX_USER> user_list(list); - while ((lex_user=user_list++)) + while ((tmp_lex_user= user_list++)) { - if (!check_acl_user(lex_user, &counter)) + if (!(lex_user= get_current_user(thd, tmp_lex_user))) + { + result= -1; + continue; + } + if (!find_acl_user(lex_user->host.str, lex_user->user.str, TRUE)) { result= -1; continue; } if (replace_user_table(thd, tables[0].table, - *lex_user, ~(ulong)0, 1, 0)) + *lex_user, ~(ulong)0, 1, 0, 0)) { result= -1; continue; @@ -3803,13 +5504,13 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) for (counter= 0, revoked= 0 ; counter < acl_dbs.elements ; ) { const char *user,*host; - + acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); if (!(user=acl_db->user)) user= ""; if (!(host=acl_db->host.hostname)) host= ""; - + if (!strcmp(lex_user->user.str,user) && !my_strcasecmp(system_charset_info, lex_user->host.str, host)) { @@ -3840,7 +5541,7 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) user= ""; if (!(host=grant_table->host.hostname)) host= ""; - + if (!strcmp(lex_user->user.str,user) && !my_strcasecmp(system_charset_info, lex_user->host.str, host)) { @@ -3874,15 +5575,218 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) counter++; } } while (revoked); + + /* Remove procedure access */ + for (is_proc=0; is_proc<2; is_proc++) do { + HASH *hash= is_proc ? &proc_priv_hash : &func_priv_hash; + for (counter= 0, revoked= 0 ; counter < hash->records ; ) + { + const char *user,*host; + GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, counter); + if (!(user=grant_proc->user)) + user= ""; + if (!(host=grant_proc->host.hostname)) + host= ""; + + if (!strcmp(lex_user->user.str,user) && + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) + { + if (!replace_routine_table(thd,grant_proc,tables[4].table,*lex_user, + grant_proc->db, + grant_proc->tname, + is_proc, + ~(ulong)0, 1)) + { + revoked= 1; + continue; + } + result= -1; // Something went wrong + } + counter++; + } + } while (revoked); } - + VOID(pthread_mutex_unlock(&acl_cache->lock)); + + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + rw_unlock(&LOCK_grant); close_thread_tables(thd); - + if (result) - my_error(ER_REVOKE_GRANTS, MYF(0)); - + my_message(ER_REVOKE_GRANTS, ER(ER_REVOKE_GRANTS), MYF(0)); + + DBUG_RETURN(result); +} + + +/* + Revoke privileges for all users on a stored procedure + + SYNOPSIS + sp_revoke_privileges() + thd The current thread. + db DB of the stored procedure + name Name of the stored procedure + + RETURN + 0 OK. + < 0 Error. Error message not yet sent. +*/ + +bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name, + bool is_proc) +{ + uint counter, revoked; + int result; + TABLE_LIST tables[GRANT_TABLES]; + HASH *hash= is_proc ? &proc_priv_hash : &func_priv_hash; + DBUG_ENTER("sp_revoke_privileges"); + + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + /* Remove procedure access */ + do + { + for (counter= 0, revoked= 0 ; counter < hash->records ; ) + { + GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(hash, counter); + if (!my_strcasecmp(system_charset_info, grant_proc->db, sp_db) && + !my_strcasecmp(system_charset_info, grant_proc->tname, sp_name)) + { + LEX_USER lex_user; + lex_user.user.str= grant_proc->user; + lex_user.user.length= strlen(grant_proc->user); + lex_user.host.str= grant_proc->host.hostname ? + grant_proc->host.hostname : (char*)""; + lex_user.host.length= grant_proc->host.hostname ? + strlen(grant_proc->host.hostname) : 0; + if (!replace_routine_table(thd,grant_proc,tables[4].table,lex_user, + grant_proc->db, grant_proc->tname, + is_proc, ~(ulong)0, 1)) + { + revoked= 1; + continue; + } + result= -1; // Something went wrong + } + counter++; + } + } while (revoked); + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + rw_unlock(&LOCK_grant); + close_thread_tables(thd); + + if (result) + my_message(ER_REVOKE_GRANTS, ER(ER_REVOKE_GRANTS), MYF(0)); + + DBUG_RETURN(result); +} + + +/* + Grant EXECUTE,ALTER privilege for a stored procedure + + SYNOPSIS + sp_grant_privileges() + thd The current thread. + db DB of the stored procedure + name Name of the stored procedure + + RETURN + 0 OK. + < 0 Error. Error message not yet sent. +*/ + +bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, + bool is_proc) +{ + Security_context *sctx= thd->security_ctx; + LEX_USER *combo; + TABLE_LIST tables[1]; + List<LEX_USER> user_list; + bool result; + ACL_USER *au; + char passwd_buff[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; + DBUG_ENTER("sp_grant_privileges"); + + if (!(combo=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) + DBUG_RETURN(TRUE); + + combo->user.str= sctx->user; + + VOID(pthread_mutex_lock(&acl_cache->lock)); + + if ((au= find_acl_user(combo->host.str=(char*)sctx->host_or_ip,combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,FALSE))) + goto found_acl; + if((au= find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE))) + goto found_acl; + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + DBUG_RETURN(TRUE); + + found_acl: + VOID(pthread_mutex_unlock(&acl_cache->lock)); + + bzero((char*)tables, sizeof(TABLE_LIST)); + user_list.empty(); + + tables->db= (char*)sp_db; + tables->table_name= tables->alias= (char*)sp_name; + + combo->host.length= strlen(combo->host.str); + combo->user.length= strlen(combo->user.str); + combo->host.str= thd->strmake(combo->host.str,combo->host.length); + combo->user.str= thd->strmake(combo->user.str,combo->user.length); + + + if(au && au->salt_len) + { + if (au->salt_len == SCRAMBLE_LENGTH) + { + make_password_from_salt(passwd_buff, au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH; + } + else if (au->salt_len == SCRAMBLE_LENGTH_323) + { + make_password_from_salt_323(passwd_buff, (ulong *) au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; + } + else + { + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); + return -1; + } + combo->password.str= passwd_buff; + } + else + { + combo->password.str= (char*)""; + combo->password.length= 0; + } + + if (user_list.push_back(combo)) + DBUG_RETURN(TRUE); + + thd->lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; + bzero((char*) &thd->lex->mqh, sizeof(thd->lex->mqh)); + + result= mysql_routine_grant(thd, tables, is_proc, user_list, + DEFAULT_CREATE_PROC_ACLS, 0, 1); DBUG_RETURN(result); } @@ -3891,7 +5795,7 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) Instantiate used templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List_iterator<LEX_COLUMN>; template class List_iterator<LEX_USER>; template class List<LEX_COLUMN>; @@ -3944,3 +5848,373 @@ int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr) DBUG_RETURN (*str != '\0'); } + +void update_schema_privilege(TABLE *table, char *buff, const char* db, + const char* t_name, const char* column, + uint col_length, const char *priv, + uint priv_length, const char* is_grantable) +{ + int i= 2; + CHARSET_INFO *cs= system_charset_info; + restore_record(table, s->default_values); + table->field[0]->store(buff, strlen(buff), cs); + if (db) + table->field[i++]->store(db, strlen(db), cs); + if (t_name) + table->field[i++]->store(t_name, strlen(t_name), cs); + if (column) + table->field[i++]->store(column, col_length, cs); + table->field[i++]->store(priv, priv_length, cs); + table->field[i]->store(is_grantable, strlen(is_grantable), cs); + table->file->write_row(table->record[0]); +} + + +int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint counter; + ACL_USER *acl_user; + ulong want_access; + char buff[100]; + TABLE *table= tables->table; + bool no_global_access= check_access(thd, SELECT_ACL, "mysql",0,1,1,0); + char *curr_host= thd->security_ctx->priv_host_name(); + DBUG_ENTER("fill_schema_user_privileges"); + + if (!initialized) + DBUG_RETURN(0); + pthread_mutex_lock(&acl_cache->lock); + + for (counter=0 ; counter < acl_users.elements ; counter++) + { + const char *user,*host, *is_grantable="YES"; + acl_user=dynamic_element(&acl_users,counter,ACL_USER*); + if (!(user=acl_user->user)) + user= ""; + if (!(host=acl_user->host.hostname)) + host= ""; + + if (no_global_access && + (strcmp(thd->security_ctx->priv_user, user) || + my_strcasecmp(system_charset_info, curr_host, host))) + continue; + + want_access= acl_user->access; + if (!(want_access & GRANT_ACL)) + is_grantable= "NO"; + + strxmov(buff,"'",user,"'@'",host,"'",NullS); + if (!(want_access & ~GRANT_ACL)) + update_schema_privilege(table, buff, 0, 0, 0, 0, + STRING_WITH_LEN("USAGE"), is_grantable); + else + { + uint priv_id; + ulong j,test_access= want_access & ~GRANT_ACL; + for (priv_id=0, j = SELECT_ACL;j <= GLOBAL_ACLS; priv_id++,j <<= 1) + { + if (test_access & j) + update_schema_privilege(table, buff, 0, 0, 0, 0, + command_array[priv_id], + command_lengths[priv_id], is_grantable); + } + } + } + + pthread_mutex_unlock(&acl_cache->lock); + + DBUG_RETURN(0); +#else + return(0); +#endif +} + + +int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint counter; + ACL_DB *acl_db; + ulong want_access; + char buff[100]; + TABLE *table= tables->table; + bool no_global_access= check_access(thd, SELECT_ACL, "mysql",0,1,1,0); + char *curr_host= thd->security_ctx->priv_host_name(); + DBUG_ENTER("fill_schema_schema_privileges"); + + if (!initialized) + DBUG_RETURN(0); + pthread_mutex_lock(&acl_cache->lock); + + for (counter=0 ; counter < acl_dbs.elements ; counter++) + { + const char *user, *host, *is_grantable="YES"; + + acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); + if (!(user=acl_db->user)) + user= ""; + if (!(host=acl_db->host.hostname)) + host= ""; + + if (no_global_access && + (strcmp(thd->security_ctx->priv_user, user) || + my_strcasecmp(system_charset_info, curr_host, host))) + continue; + + want_access=acl_db->access; + if (want_access) + { + if (!(want_access & GRANT_ACL)) + { + is_grantable= "NO"; + } + strxmov(buff,"'",user,"'@'",host,"'",NullS); + if (!(want_access & ~GRANT_ACL)) + update_schema_privilege(table, buff, acl_db->db, 0, 0, + 0, STRING_WITH_LEN("USAGE"), is_grantable); + else + { + int cnt; + ulong j,test_access= want_access & ~GRANT_ACL; + for (cnt=0, j = SELECT_ACL; j <= DB_ACLS; cnt++,j <<= 1) + if (test_access & j) + update_schema_privilege(table, buff, acl_db->db, 0, 0, 0, + command_array[cnt], command_lengths[cnt], + is_grantable); + } + } + } + + pthread_mutex_unlock(&acl_cache->lock); + + DBUG_RETURN(0); +#else + return (0); +#endif +} + + +int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint index; + char buff[100]; + TABLE *table= tables->table; + bool no_global_access= check_access(thd, SELECT_ACL, "mysql",0,1,1,0); + char *curr_host= thd->security_ctx->priv_host_name(); + DBUG_ENTER("fill_schema_table_privileges"); + + rw_rdlock(&LOCK_grant); + + for (index=0 ; index < column_priv_hash.records ; index++) + { + const char *user, *host, *is_grantable= "YES"; + GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, + index); + if (!(user=grant_table->user)) + user= ""; + if (!(host= grant_table->host.hostname)) + host= ""; + + if (no_global_access && + (strcmp(thd->security_ctx->priv_user, user) || + my_strcasecmp(system_charset_info, curr_host, host))) + continue; + + ulong table_access= grant_table->privs; + if (table_access) + { + ulong test_access= table_access & ~GRANT_ACL; + /* + We should skip 'usage' privilege on table if + we have any privileges on column(s) of this table + */ + if (!test_access && grant_table->cols) + continue; + if (!(table_access & GRANT_ACL)) + is_grantable= "NO"; + + strxmov(buff, "'", user, "'@'", host, "'", NullS); + if (!test_access) + update_schema_privilege(table, buff, grant_table->db, grant_table->tname, + 0, 0, STRING_WITH_LEN("USAGE"), is_grantable); + else + { + ulong j; + int cnt; + for (cnt= 0, j= SELECT_ACL; j <= TABLE_ACLS; cnt++, j<<= 1) + { + if (test_access & j) + update_schema_privilege(table, buff, grant_table->db, + grant_table->tname, 0, 0, command_array[cnt], + command_lengths[cnt], is_grantable); + } + } + } + } + + rw_unlock(&LOCK_grant); + + DBUG_RETURN(0); +#else + return (0); +#endif +} + + +int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint index; + char buff[100]; + TABLE *table= tables->table; + bool no_global_access= check_access(thd, SELECT_ACL, "mysql",0,1,1,0); + char *curr_host= thd->security_ctx->priv_host_name(); + DBUG_ENTER("fill_schema_table_privileges"); + + rw_rdlock(&LOCK_grant); + + for (index=0 ; index < column_priv_hash.records ; index++) + { + const char *user, *host, *is_grantable= "YES"; + GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, + index); + if (!(user=grant_table->user)) + user= ""; + if (!(host= grant_table->host.hostname)) + host= ""; + + if (no_global_access && + (strcmp(thd->security_ctx->priv_user, user) || + my_strcasecmp(system_charset_info, curr_host, host))) + continue; + + ulong table_access= grant_table->cols; + if (table_access != 0) + { + if (!(grant_table->privs & GRANT_ACL)) + is_grantable= "NO"; + + ulong test_access= table_access & ~GRANT_ACL; + strxmov(buff, "'", user, "'@'", host, "'", NullS); + if (!test_access) + continue; + else + { + ulong j; + int cnt; + for (cnt= 0, j= SELECT_ACL; j <= TABLE_ACLS; cnt++, j<<= 1) + { + if (test_access & j) + { + for (uint col_index=0 ; + col_index < grant_table->hash_columns.records ; + col_index++) + { + GRANT_COLUMN *grant_column = (GRANT_COLUMN*) + hash_element(&grant_table->hash_columns,col_index); + if ((grant_column->rights & j) && (table_access & j)) + update_schema_privilege(table, buff, grant_table->db, + grant_table->tname, + grant_column->column, + grant_column->key_length, + command_array[cnt], + command_lengths[cnt], is_grantable); + } + } + } + } + } + } + + rw_unlock(&LOCK_grant); + + DBUG_RETURN(0); +#else + return (0); +#endif +} + + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +/* + fill effective privileges for table + + SYNOPSIS + fill_effective_table_privileges() + thd thread handler + grant grants table descriptor + db db name + table table name +*/ + +void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, + const char *db, const char *table) +{ + Security_context *sctx= thd->security_ctx; + DBUG_ENTER("fill_effective_table_privileges"); + DBUG_PRINT("enter", ("Host: '%s', Ip: '%s', User: '%s', table: `%s`.`%s`", + sctx->priv_host, (sctx->ip ? sctx->ip : "(NULL)"), + (sctx->priv_user ? sctx->priv_user : "(NULL)"), + db, table)); + /* --skip-grants */ + if (!initialized) + { + DBUG_PRINT("info", ("skip grants")); + grant->privilege= ~NO_ACCESS; // everything is allowed + DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege)); + DBUG_VOID_RETURN; + } + + /* global privileges */ + grant->privilege= sctx->master_access; + + if (!sctx->priv_user) + { + DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege)); + DBUG_VOID_RETURN; // it is slave + } + + /* db privileges */ + grant->privilege|= acl_get(sctx->host, sctx->ip, sctx->priv_user, db, 0); + + if (!grant_option) + { + DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege)); + DBUG_VOID_RETURN; + } + + /* table privileges */ + rw_rdlock(&LOCK_grant); + if (grant->version != grant_version) + { + grant->grant_table= + table_hash_search(sctx->host, sctx->ip, db, + sctx->priv_user, + table, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ + } + if (grant->grant_table != 0) + { + grant->privilege|= grant->grant_table->privs; + } + rw_unlock(&LOCK_grant); + + DBUG_PRINT("info", ("privilege 0x%lx", grant->privilege)); + DBUG_VOID_RETURN; +} + +#else /* NO_EMBEDDED_ACCESS_CHECKS */ + +/**************************************************************************** + Dummy wrappers when we don't have any access checks +****************************************************************************/ + +bool check_routine_level_acl(THD *thd, const char *db, const char *name, + bool is_proc) +{ + return FALSE; +} + +#endif diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 256101ec7d8..d08f2663af5 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -35,54 +34,94 @@ #define EXECUTE_ACL (1L << 18) #define REPL_SLAVE_ACL (1L << 19) #define REPL_CLIENT_ACL (1L << 20) - +#define CREATE_VIEW_ACL (1L << 21) +#define SHOW_VIEW_ACL (1L << 22) +#define CREATE_PROC_ACL (1L << 23) +#define ALTER_PROC_ACL (1L << 24) +#define CREATE_USER_ACL (1L << 25) /* don't forget to update - static struct show_privileges_st sys_privileges[] - in sql_show.cc when adding new privileges! + 1. static struct show_privileges_st sys_privileges[] + 2. static const char *command_array[] and static uint command_lengths[] + 3. mysql_system_tables.sql and mysql_system_tables_fix.sql + 4. acl_init() or whatever - to define behaviour for old privilege tables + 5. sql_yacc.yy - for GRANT/REVOKE to work */ - +#define EXTRA_ACL (1L << 29) +#define NO_ACCESS (1L << 30) #define DB_ACLS \ (UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ - GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | LOCK_TABLES_ACL) + GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | \ + LOCK_TABLES_ACL | EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL | \ + CREATE_PROC_ACL | ALTER_PROC_ACL) #define TABLE_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ - GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL) + GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_VIEW_ACL | \ + SHOW_VIEW_ACL) #define COL_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | REFERENCES_ACL) +#define PROC_ACLS \ +(ALTER_PROC_ACL | EXECUTE_ACL | GRANT_ACL) + +#define SHOW_PROC_ACLS \ +(ALTER_PROC_ACL | EXECUTE_ACL | CREATE_PROC_ACL) + #define GLOBAL_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ RELOAD_ACL | SHUTDOWN_ACL | PROCESS_ACL | FILE_ACL | GRANT_ACL | \ REFERENCES_ACL | INDEX_ACL | ALTER_ACL | SHOW_DB_ACL | SUPER_ACL | \ CREATE_TMP_ACL | LOCK_TABLES_ACL | REPL_SLAVE_ACL | REPL_CLIENT_ACL | \ - EXECUTE_ACL) + EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL | CREATE_PROC_ACL | \ + ALTER_PROC_ACL | CREATE_USER_ACL) -#define EXTRA_ACL (1L << 29) -#define NO_ACCESS (1L << 30) +#define DEFAULT_CREATE_PROC_ACLS \ +(ALTER_PROC_ACL | EXECUTE_ACL) /* Defines to change the above bits to how things are stored in tables This is needed as the 'host' and 'db' table is missing a few privileges */ -/* Continius bit-segments that needs to be shifted */ -#define DB_REL1 (RELOAD_ACL | SHUTDOWN_ACL | PROCESS_ACL | FILE_ACL) -#define DB_REL2 (GRANT_ACL | REFERENCES_ACL) - /* Privileges that needs to be reallocated (in continous chunks) */ +#define DB_CHUNK0 (SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | \ + CREATE_ACL | DROP_ACL) #define DB_CHUNK1 (GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL) #define DB_CHUNK2 (CREATE_TMP_ACL | LOCK_TABLES_ACL) - -#define fix_rights_for_db(A) (((A) & 63) | (((A) & DB_REL1) << 4) | (((A) & DB_REL2) << 6)) -#define get_rights_for_db(A) (((A) & 63) | (((A) & DB_CHUNK1) >> 4) | (((A) & DB_CHUNK2) >> 6)) -#define fix_rights_for_table(A) (((A) & 63) | (((A) & ~63) << 4)) -#define get_rights_for_table(A) (((A) & 63) | (((A) & ~63) >> 4)) +#define DB_CHUNK3 (CREATE_VIEW_ACL | SHOW_VIEW_ACL | \ + CREATE_PROC_ACL | ALTER_PROC_ACL ) +#define DB_CHUNK4 (EXECUTE_ACL) + +#define fix_rights_for_db(A) (((A) & DB_CHUNK0) | \ + (((A) << 4) & DB_CHUNK1) | \ + (((A) << 6) & DB_CHUNK2) | \ + (((A) << 9) & DB_CHUNK3) | \ + (((A) << 2) & DB_CHUNK4)) +#define get_rights_for_db(A) (((A) & DB_CHUNK0) | \ + (((A) & DB_CHUNK1) >> 4) | \ + (((A) & DB_CHUNK2) >> 6) | \ + (((A) & DB_CHUNK3) >> 9) | \ + (((A) & DB_CHUNK4) >> 2)) +#define TBL_CHUNK0 DB_CHUNK0 +#define TBL_CHUNK1 DB_CHUNK1 +#define TBL_CHUNK2 (CREATE_VIEW_ACL | SHOW_VIEW_ACL) +#define fix_rights_for_table(A) (((A) & TBL_CHUNK0) | \ + (((A) << 4) & TBL_CHUNK1) | \ + (((A) << 11) & TBL_CHUNK2)) +#define get_rights_for_table(A) (((A) & TBL_CHUNK0) | \ + (((A) & TBL_CHUNK1) >> 4) | \ + (((A) & TBL_CHUNK2) >> 11)) #define fix_rights_for_column(A) (((A) & 7) | (((A) & ~7) << 8)) #define get_rights_for_column(A) (((A) & 7) | ((A) >> 8)) +#define fix_rights_for_procedure(A) ((((A) << 18) & EXECUTE_ACL) | \ + (((A) << 23) & ALTER_PROC_ACL) | \ + (((A) << 8) & GRANT_ACL)) +#define get_rights_for_procedure(A) ((((A) & EXECUTE_ACL) >> 18) | \ + (((A) & ALTER_PROC_ACL) >> 23) | \ + (((A) & GRANT_ACL) >> 8)) /* Classes */ @@ -141,33 +180,57 @@ ulong acl_get(const char *host, const char *ip, const char *user, const char *db, my_bool db_is_pattern); int acl_getroot(THD *thd, USER_RESOURCES *mqh, const char *passwd, uint passwd_len); +bool acl_getroot_no_password(Security_context *sctx, char *user, char *host, + char *ip, char *db); bool acl_check_host(const char *host, const char *ip); bool check_change_password(THD *thd, const char *host, const char *user, char *password, uint password_len); bool change_password(THD *thd, const char *host, const char *user, char *password); -int mysql_grant(THD *thd, const char *db, List <LEX_USER> &user_list, - ulong rights, bool revoke); -int mysql_table_grant(THD *thd, TABLE_LIST *table, List <LEX_USER> &user_list, - List <LEX_COLUMN> &column_list, ulong rights, - bool revoke); +bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &user_list, + ulong rights, bool revoke); +bool mysql_table_grant(THD *thd, TABLE_LIST *table, List <LEX_USER> &user_list, + List <LEX_COLUMN> &column_list, ulong rights, + bool revoke); +bool mysql_routine_grant(THD *thd, TABLE_LIST *table, bool is_proc, + List <LEX_USER> &user_list, ulong rights, + bool revoke, bool no_error); my_bool grant_init(); void grant_free(void); my_bool grant_reload(THD *thd); bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, uint show_command, uint number, bool dont_print_error); -bool check_grant_column (THD *thd,TABLE *table, const char *name, uint length, - uint show_command=0); -bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table); +bool check_grant_column (THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *name, uint length, Security_context *sctx); +bool check_column_grant_in_table_ref(THD *thd, TABLE_LIST * table_ref, + const char *name, uint length); +bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, + const char* db_name, const char *table_name, + Field_iterator *fields); +bool check_grant_routine(THD *thd, ulong want_access, + TABLE_LIST *procs, bool is_proc, bool no_error); bool check_grant_db(THD *thd,const char *db); ulong get_table_grant(THD *thd, TABLE_LIST *table); -ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field); -int mysql_show_grants(THD *thd, LEX_USER *user); +ulong get_column_grant(THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *field_name); +bool mysql_show_grants(THD *thd, LEX_USER *user); void get_privilege_desc(char *to, uint max_length, ulong access); void get_mqh(const char *user, const char *host, USER_CONN *uc); -int mysql_drop_user(THD *thd, List <LEX_USER> &list); -int mysql_revoke_all(THD *thd, List <LEX_USER> &list); - +bool mysql_create_user(THD *thd, List <LEX_USER> &list); +bool mysql_drop_user(THD *thd, List <LEX_USER> &list); +bool mysql_rename_user(THD *thd, List <LEX_USER> &list); +bool mysql_revoke_all(THD *thd, List <LEX_USER> &list); +void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, + const char *db, const char *table); +bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name, + bool is_proc); +bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, + bool is_proc); +bool check_routine_level_acl(THD *thd, const char *db, const char *name, + bool is_proc); +bool is_acl_user(const char *host, const char *user); #ifdef NO_EMBEDDED_ACCESS_CHECKS #define check_grant(A,B,C,D,E,F) 0 #define check_grant_db(A,B) 0 diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 3420368a026..95a7fb3d34d 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -27,6 +26,8 @@ #pragma implementation // gcc: Class implementation #endif +#define MYSQL_LEX 1 + #include "mysql_priv.h" #include "procedure.h" #include "sql_analyse.h" @@ -59,7 +60,11 @@ int compare_ulonglong2(void* cmp_arg __attribute__((unused)), return compare_ulonglong(s,t); } -static bool append_escaped(String *to_str, String *from_str); +int compare_decimal2(int* len, const char *s, const char *t) +{ + return memcmp(s, t, *len); +} + Procedure * proc_analyse_init(THD *thd, ORDER *param, select_result *result, @@ -81,8 +86,13 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, else if (param->next) { // first parameter + if (!(*param->item)->fixed && (*param->item)->fix_fields(thd, param->item)) + { + DBUG_PRINT("info", ("fix_fields() for the first parameter failed")); + goto err; + } if ((*param->item)->type() != Item::INT_ITEM || - (*param->item)->val() < 0) + (*param->item)->val_real() < 0) { my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); goto err; @@ -95,8 +105,13 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, goto err; } // second parameter + if (!(*param->item)->fixed && (*param->item)->fix_fields(thd, param->item)) + { + DBUG_PRINT("info", ("fix_fields() for the second parameter failed")); + goto err; + } if ((*param->item)->type() != Item::INT_ITEM || - (*param->item)->val() < 0) + (*param->item)->val_real() < 0) { my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); goto err; @@ -104,7 +119,7 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, pc->max_treemem = (uint) (*param->item)->val_int(); } else if ((*param->item)->type() != Item::INT_ITEM || - (*param->item)->val() < 0) + (*param->item)->val_real() < 0) { my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); goto err; @@ -129,20 +144,30 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, Item *item; while ((item = it++)) { - if (item->result_type() == INT_RESULT) - { + field_info *new_field; + switch (item->result_type()) { + case INT_RESULT: // Check if fieldtype is ulonglong if (item->type() == Item::FIELD_ITEM && ((Item_field*) item)->field->type() == FIELD_TYPE_LONGLONG && ((Field_longlong*) ((Item_field*) item)->field)->unsigned_flag) - *f_info++ = new field_ulonglong(item, pc); + new_field= new field_ulonglong(item, pc); else - *f_info++ = new field_longlong(item, pc); + new_field= new field_longlong(item, pc); + break; + case REAL_RESULT: + new_field= new field_real(item, pc); + break; + case DECIMAL_RESULT: + new_field= new field_decimal(item, pc); + break; + case STRING_RESULT: + new_field= new field_str(item, pc); + break; + default: + goto err; } - if (item->result_type() == REAL_RESULT) - *f_info++ = new field_real(item, pc); - if (item->result_type() == STRING_RESULT) - *f_info++ = new field_str(item, pc); + *f_info++= new_field; } } DBUG_RETURN(pc); @@ -262,7 +287,7 @@ bool get_ev_num_info(EV_NUM_INFO *ev_info, NUM_INFO *info, const char *num) } else // ulonglong is as big as bigint in MySQL { - if ((check_ulonglong(num, info->integers) == REAL_NUM)) + if ((check_ulonglong(num, info->integers) == DECIMAL_NUM)) return 0; ev_info->ullval = (ulonglong) max(ev_info->ullval, info->ullval); ev_info->max_dval = (double) max(ev_info->max_dval, info->dval); @@ -370,7 +395,7 @@ void field_str::add() void field_real::add() { char buff[MAX_FIELD_WIDTH], *ptr, *end; - double num = item->val(); + double num= item->val_real(); uint length, zero_count, decs; TREE_ELEMENT *element; @@ -450,6 +475,88 @@ void field_real::add() } // field_real::add +void field_decimal::add() +{ + /*TODO - remove rounding stuff after decimal_div returns proper frac */ + my_decimal dec_buf, *dec= item->val_decimal(&dec_buf); + my_decimal rounded; + uint length; + TREE_ELEMENT *element; + + if (item->null_value) + { + nulls++; + return; + } + + my_decimal_round(E_DEC_FATAL_ERROR, dec, item->decimals, FALSE,&rounded); + dec= &rounded; + + length= my_decimal_string_length(dec); + + if (decimal_is_zero(dec)) + empty++; + + if (room_in_tree) + { + char buf[DECIMAL_MAX_FIELD_SIZE]; + my_decimal2binary(E_DEC_FATAL_ERROR, dec, buf, + item->max_length, item->decimals); + if (!(element = tree_insert(&tree, (void*)buf, 0, tree.custom_arg))) + { + room_in_tree = 0; // Remove tree, out of RAM ? + delete_tree(&tree); + } + /* + if element->count == 1, this element can be found only once from tree + if element->count == 2, or more, this element is already in tree + */ + else if (element->count == 1 && (tree_elements++) >= pc->max_tree_elements) + { + room_in_tree = 0; // Remove tree, too many elements + delete_tree(&tree); + } + } + + if (!found) + { + found = 1; + min_arg = max_arg = sum[0] = *dec; + min_arg.fix_buffer_pointer(); + max_arg.fix_buffer_pointer(); + sum[0].fix_buffer_pointer(); + my_decimal_mul(E_DEC_FATAL_ERROR, sum_sqr, dec, dec); + cur_sum= 0; + min_length = max_length = length; + } + else if (!decimal_is_zero(dec)) + { + int next_cur_sum= cur_sum ^ 1; + my_decimal sqr_buf; + + my_decimal_add(E_DEC_FATAL_ERROR, sum+next_cur_sum, sum+cur_sum, dec); + my_decimal_mul(E_DEC_FATAL_ERROR, &sqr_buf, dec, dec); + my_decimal_add(E_DEC_FATAL_ERROR, + sum_sqr+next_cur_sum, sum_sqr+cur_sum, &sqr_buf); + cur_sum= next_cur_sum; + if (length < min_length) + min_length = length; + if (length > max_length) + max_length = length; + if (my_decimal_cmp(dec, &min_arg) < 0) + { + min_arg= *dec; + min_arg.fix_buffer_pointer(); + } + if (my_decimal_cmp(dec, &max_arg) > 0) + { + max_arg= *dec; + max_arg.fix_buffer_pointer(); + } + } +} + + void field_longlong::add() { char buff[MAX_FIELD_WIDTH]; @@ -637,13 +744,13 @@ bool analyse::end_of_records() tree_info.found = 0; tree_info.item = (*f)->item; - tmp_str.set("ENUM(", 5,&my_charset_bin); + tmp_str.set(STRING_WITH_LEN("ENUM("),&my_charset_bin); tree_walk(&(*f)->tree, (*f)->collect_enum(), (char*) &tree_info, left_root_right); tmp_str.append(')'); if (!(*f)->nulls) - tmp_str.append(" NOT NULL"); + tmp_str.append(STRING_WITH_LEN(" NOT NULL")); output_str_length = tmp_str.length(); func_items[9]->set(tmp_str.ptr(), tmp_str.length(), tmp_str.charset()); if (result->send_data(result_fields)) @@ -653,35 +760,35 @@ bool analyse::end_of_records() ans.length(0); if (!(*f)->treemem && !(*f)->tree_elements) - ans.append("CHAR(0)", 7); + ans.append(STRING_WITH_LEN("CHAR(0)")); else if ((*f)->item->type() == Item::FIELD_ITEM) { switch (((Item_field*) (*f)->item)->field->real_type()) { case FIELD_TYPE_TIMESTAMP: - ans.append("TIMESTAMP", 9); + ans.append(STRING_WITH_LEN("TIMESTAMP")); break; case FIELD_TYPE_DATETIME: - ans.append("DATETIME", 8); + ans.append(STRING_WITH_LEN("DATETIME")); break; case FIELD_TYPE_DATE: case FIELD_TYPE_NEWDATE: - ans.append("DATE", 4); + ans.append(STRING_WITH_LEN("DATE")); break; case FIELD_TYPE_SET: - ans.append("SET", 3); + ans.append(STRING_WITH_LEN("SET")); break; case FIELD_TYPE_YEAR: - ans.append("YEAR", 4); + ans.append(STRING_WITH_LEN("YEAR")); break; case FIELD_TYPE_TIME: - ans.append("TIME", 4); + ans.append(STRING_WITH_LEN("TIME")); break; case FIELD_TYPE_DECIMAL: - ans.append("DECIMAL", 7); + ans.append(STRING_WITH_LEN("DECIMAL")); // if item is FIELD_ITEM, it _must_be_ Field_num in this case if (((Field_num*) ((Item_field*) (*f)->item)->field)->zerofill) - ans.append(" ZEROFILL"); + ans.append(STRING_WITH_LEN(" ZEROFILL")); break; default: (*f)->get_opt_type(&ans, rows); @@ -689,7 +796,7 @@ bool analyse::end_of_records() } } if (!(*f)->nulls) - ans.append(" NOT NULL"); + ans.append(STRING_WITH_LEN(" NOT NULL")); func_items[9]->set(ans.ptr(), ans.length(), ans.charset()); if (result->send_data(result_fields)) return -1; @@ -733,18 +840,18 @@ void field_str::get_opt_type(String *answer, ha_rows total_rows) sprintf(buff, "BIGINT(%d)", num_info.integers); answer->append(buff, (uint) strlen(buff)); if (ev_num_info.llval >= 0 && ev_num_info.min_dval >= 0) - answer->append(" UNSIGNED"); + answer->append(STRING_WITH_LEN(" UNSIGNED")); if (num_info.zerofill) - answer->append(" ZEROFILL"); + answer->append(STRING_WITH_LEN(" ZEROFILL")); } else if (max_length < 256) { if (must_be_blob) { if (item->collation.collation == &my_charset_bin) - answer->append("TINYBLOB", 8); + answer->append(STRING_WITH_LEN("TINYBLOB")); else - answer->append("TINYTEXT", 8); + answer->append(STRING_WITH_LEN("TINYTEXT")); } else if ((max_length * (total_rows - nulls)) < (sum + total_rows)) { @@ -760,23 +867,23 @@ void field_str::get_opt_type(String *answer, ha_rows total_rows) else if (max_length < (1L << 16)) { if (item->collation.collation == &my_charset_bin) - answer->append("BLOB", 4); + answer->append(STRING_WITH_LEN("BLOB")); else - answer->append("TEXT", 4); + answer->append(STRING_WITH_LEN("TEXT")); } else if (max_length < (1L << 24)) { if (item->collation.collation == &my_charset_bin) - answer->append("MEDIUMBLOB", 10); + answer->append(STRING_WITH_LEN("MEDIUMBLOB")); else - answer->append("MEDIUMTEXT", 10); + answer->append(STRING_WITH_LEN("MEDIUMTEXT")); } else { if (item->collation.collation == &my_charset_bin) - answer->append("LONGBLOB", 8); + answer->append(STRING_WITH_LEN("LONGBLOB")); else - answer->append("LONGTEXT", 8); + answer->append(STRING_WITH_LEN("LONGTEXT")); } } // field_str::get_opt_type @@ -806,14 +913,14 @@ void field_real::get_opt_type(String *answer, sprintf(buff, "BIGINT(%d)", len); answer->append(buff, (uint) strlen(buff)); if (min_arg >= 0) - answer->append(" UNSIGNED"); + answer->append(STRING_WITH_LEN(" UNSIGNED")); } else if (item->decimals == NOT_FIXED_DEC) { if (min_arg >= -FLT_MAX && max_arg <= FLT_MAX) - answer->append("FLOAT", 5); + answer->append(STRING_WITH_LEN("FLOAT")); else - answer->append("DOUBLE", 6); + answer->append(STRING_WITH_LEN("DOUBLE")); } else { @@ -830,7 +937,7 @@ void field_real::get_opt_type(String *answer, // a single number shouldn't be zerofill (max_length - (item->decimals + 1)) != 1 && ((Field_num*) ((Item_field*) item)->field)->zerofill) - answer->append(" ZEROFILL"); + answer->append(STRING_WITH_LEN(" ZEROFILL")); } // field_real::get_opt_type @@ -854,14 +961,14 @@ void field_longlong::get_opt_type(String *answer, sprintf(buff, "BIGINT(%d)", (int) max_length); answer->append(buff, (uint) strlen(buff)); if (min_arg >= 0) - answer->append(" UNSIGNED"); + answer->append(STRING_WITH_LEN(" UNSIGNED")); // if item is FIELD_ITEM, it _must_be_ Field_num in this class if ((item->type() == Item::FIELD_ITEM) && // a single number shouldn't be zerofill max_length != 1 && ((Field_num*) ((Item_field*) item)->field)->zerofill) - answer->append(" ZEROFILL"); + answer->append(STRING_WITH_LEN(" ZEROFILL")); } // field_longlong::get_opt_type @@ -886,10 +993,88 @@ void field_ulonglong::get_opt_type(String *answer, // a single number shouldn't be zerofill max_length != 1 && ((Field_num*) ((Item_field*) item)->field)->zerofill) - answer->append(" ZEROFILL"); + answer->append(STRING_WITH_LEN(" ZEROFILL")); } //field_ulonglong::get_opt_type +void field_decimal::get_opt_type(String *answer, + ha_rows total_rows __attribute__((unused))) +{ + my_decimal zero; + char buff[MAX_FIELD_WIDTH]; + uint length; + + my_decimal_set_zero(&zero); + my_bool is_unsigned= (my_decimal_cmp(&zero, &min_arg) >= 0); + + length= my_sprintf(buff, (buff, "DECIMAL(%d, %d)", + (int) (max_length - (item->decimals ? 1 : 0)), + item->decimals)); + if (is_unsigned) + length= (uint) (strmov(buff+length, " UNSIGNED")- buff); + answer->append(buff, length); +} + + +String *field_decimal::get_min_arg(String *str) +{ + my_decimal2string(E_DEC_FATAL_ERROR, &min_arg, 0, 0, '0', str); + return str; +} + + +String *field_decimal::get_max_arg(String *str) +{ + my_decimal2string(E_DEC_FATAL_ERROR, &max_arg, 0, 0, '0', str); + return str; +} + + +String *field_decimal::avg(String *s, ha_rows rows) +{ + if (!(rows - nulls)) + { + s->set((double) 0.0, 1,my_thd_charset); + return s; + } + my_decimal num, avg_val, rounded_avg; + int prec_increment= current_thd->variables.div_precincrement; + + int2my_decimal(E_DEC_FATAL_ERROR, rows - nulls, FALSE, &num); + my_decimal_div(E_DEC_FATAL_ERROR, &avg_val, sum+cur_sum, &num, prec_increment); + /* TODO remove this after decimal_div returns proper frac */ + my_decimal_round(E_DEC_FATAL_ERROR, &avg_val, + min(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE), + FALSE,&rounded_avg); + my_decimal2string(E_DEC_FATAL_ERROR, &rounded_avg, 0, 0, '0', s); + return s; +} + + +String *field_decimal::std(String *s, ha_rows rows) +{ + if (!(rows - nulls)) + { + s->set((double) 0.0, 1,my_thd_charset); + return s; + } + my_decimal num, tmp, sum2, sum2d; + double std_sqr; + int prec_increment= current_thd->variables.div_precincrement; + + int2my_decimal(E_DEC_FATAL_ERROR, rows - nulls, FALSE, &num); + my_decimal_mul(E_DEC_FATAL_ERROR, &sum2, sum+cur_sum, sum+cur_sum); + my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment); + my_decimal_sub(E_DEC_FATAL_ERROR, &sum2, sum_sqr+cur_sum, &tmp); + my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment); + my_decimal2double(E_DEC_FATAL_ERROR, &tmp, &std_sqr); + s->set(((double) std_sqr <= 0.0 ? 0.0 : sqrt(std_sqr)), + min(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset); + + return s; +} + + int collect_string(String *element, element_count count __attribute__((unused)), TREE_INFO *info) @@ -924,6 +1109,28 @@ int collect_real(double *element, element_count count __attribute__((unused)), } // collect_real +int collect_decimal(char *element, element_count count, + TREE_INFO *info) +{ + char buff[DECIMAL_MAX_STR_LENGTH]; + String s(buff, sizeof(buff),&my_charset_bin); + + if (info->found) + info->str->append(','); + else + info->found = 1; + my_decimal dec; + binary2my_decimal(E_DEC_FATAL_ERROR, element, &dec, + info->item->max_length, info->item->decimals); + + info->str->append('\''); + my_decimal2string(E_DEC_FATAL_ERROR, &dec, 0, 0, '0', &s); + info->str->append(s); + info->str->append('\''); + return 0; +} + + int collect_longlong(longlong *element, element_count count __attribute__((unused)), TREE_INFO *info) @@ -1025,12 +1232,12 @@ uint check_ulonglong(const char *str, uint length) bigger = LONG_NUM; } else if (length > ulonglong_len) - return REAL_NUM; + return DECIMAL_NUM; else { cmp = ulonglong_str; smaller = LONG_NUM; - bigger = REAL_NUM; + bigger = DECIMAL_NUM; } while (*cmp && *cmp++ == *str++) ; return ((uchar) str[-1] <= (uchar) cmp[-1]) ? smaller : bigger; @@ -1055,7 +1262,7 @@ uint check_ulonglong(const char *str, uint length) 1 Out of memory */ -static bool append_escaped(String *to_str, String *from_str) +bool append_escaped(String *to_str, String *from_str) { char *from, *end, c; diff --git a/sql/sql_analyse.h b/sql/sql_analyse.h index 8523b05a1de..21a37209e89 100644 --- a/sql/sql_analyse.h +++ b/sql/sql_analyse.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2003, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -61,6 +60,7 @@ int compare_longlong2(void* cmp_arg __attribute__((unused)), int compare_ulonglong(const ulonglong *s, const ulonglong *t); int compare_ulonglong2(void* cmp_arg __attribute__((unused)), const ulonglong *s, const ulonglong *t); +int compare_decimal2(int* len, const char *s, const char *t); Procedure *proc_analyse_init(THD *thd, ORDER *param, select_result *result, List<Item> &field_list); void free_string(String*); @@ -143,6 +143,36 @@ public: }; +int collect_decimal(char *element, element_count count, + TREE_INFO *info); + +class field_decimal :public field_info +{ + my_decimal min_arg, max_arg; + my_decimal sum[2], sum_sqr[2]; + int cur_sum; + int bin_size; +public: + field_decimal(Item* a, analyse* b) :field_info(a,b) + { + bin_size= my_decimal_get_binary_size(a->max_length, a->decimals); + init_tree(&tree, 0, 0, bin_size, (qsort_cmp2)compare_decimal2, + 0, 0, (void *)&bin_size); + }; + + void add(); + void get_opt_type(String*, ha_rows); + String *get_min_arg(String *); + String *get_max_arg(String *); + String *avg(String *s, ha_rows rows); + friend int collect_decimal(char *element, element_count count, + TREE_INFO *info); + tree_walk_action collect_enum() + { return (tree_walk_action) collect_decimal; } + String *std(String *s, ha_rows rows); +}; + + int collect_real(double *element, element_count count, TREE_INFO *info); class field_real: public field_info diff --git a/sql/sql_array.h b/sql/sql_array.h new file mode 100644 index 00000000000..e2e12bee241 --- /dev/null +++ b/sql/sql_array.h @@ -0,0 +1,68 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include <my_sys.h> + +/* + A typesafe wrapper around DYNAMIC_ARRAY +*/ + +template <class Elem> class Dynamic_array +{ + DYNAMIC_ARRAY array; +public: + Dynamic_array(uint prealloc=16, uint increment=16) + { + my_init_dynamic_array(&array, sizeof(Elem), prealloc, increment); + } + + Elem& at(int idx) + { + return *(((Elem*)array.buffer) + idx); + } + + Elem *front() + { + return (Elem*)array.buffer; + } + + Elem *back() + { + return ((Elem*)array.buffer) + array.elements; + } + + bool append(Elem &el) + { + return (insert_dynamic(&array, (gptr)&el)); + } + + int elements() + { + return array.elements; + } + + ~Dynamic_array() + { + delete_dynamic(&array); + } + + typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2); + + void sort(CMP_FUNC cmp_func) + { + qsort(array.buffer, array.elements, sizeof(Elem), (qsort_cmp)cmp_func); + } +}; + diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 2f7661182a6..1689e5c65c0 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -19,29 +18,89 @@ #include "mysql_priv.h" #include "sql_select.h" +#include "sp_head.h" +#include "sp.h" +#include "sql_trigger.h" #include <m_ctype.h> #include <my_dir.h> #include <hash.h> -#include <nisam.h> #ifdef __WIN__ #include <io.h> #endif +/** + This internal handler is used to trap internally + errors that can occur when executing open table + during the prelocking phase. +*/ +class Prelock_error_handler : public Internal_error_handler +{ +public: + Prelock_error_handler() + : m_handled_errors(0), m_unhandled_errors(0) + {} + + virtual ~Prelock_error_handler() {} + + virtual bool handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level, + THD *thd); + + bool safely_trapped_errors(); + +private: + int m_handled_errors; + int m_unhandled_errors; +}; + + +bool +Prelock_error_handler::handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level /* level */, + THD * /* thd */) +{ + if (sql_errno == ER_NO_SUCH_TABLE) + { + m_handled_errors++; + return TRUE; // 'TRUE', as per coding style + } + + m_unhandled_errors++; + return FALSE; // 'FALSE', as per coding style +} + + +bool Prelock_error_handler::safely_trapped_errors() +{ + /* + If m_unhandled_errors != 0, something else, unanticipated, happened, + so the error is not trapped but returned to the caller. + Multiple ER_NO_SUCH_TABLE can be raised in case of views. + */ + return ((m_handled_errors > 0) && (m_unhandled_errors == 0)); +} + + TABLE *unused_tables; /* Used by mysql_test */ HASH open_cache; /* Used by mysql_test */ -HASH assign_cache; -static int open_unireg_entry(THD *thd,TABLE *entry,const char *db, - const char *name, const char *alias); +static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, + const char *name, const char *alias, + TABLE_LIST *table_list, MEM_ROOT *mem_root, + uint flags); static void free_cache_entry(TABLE *entry); - +static bool open_new_frm(THD *thd, const char *path, const char *alias, + const char *db, const char *table_name, + uint db_stat, uint prgflag, + uint ha_open_flags, TABLE *outparam, + TABLE_LIST *table_desc, MEM_ROOT *mem_root); extern "C" byte *table_cache_key(const byte *record,uint *length, my_bool not_used __attribute__((unused))) { TABLE *entry=(TABLE*) record; - *length=entry->key_length; - return (byte*) entry->table_cache_key; + *length= entry->s->key_length; + return (byte*) entry->s->table_cache_key; } bool table_cache_init(void) @@ -121,12 +180,11 @@ static void check_unused(void) # Pointer to list of names of open tables. */ -OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) +OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild) { int result = 0; OPEN_TABLE_LIST **start_list, *open_list; TABLE_LIST table_list; - char name[NAME_LEN*2]; DBUG_ENTER("list_open_tables"); VOID(pthread_mutex_lock(&LOCK_open)); @@ -138,20 +196,19 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) { OPEN_TABLE_LIST *table; TABLE *entry=(TABLE*) hash_element(&open_cache,idx); + TABLE_SHARE *share= entry->s; - DBUG_ASSERT(entry->real_name); - if ((!entry->real_name)) // To be removed + DBUG_ASSERT(share->table_name != 0); + if ((!share->table_name)) // To be removed continue; // Shouldn't happen - if (wild) - { - strxmov(name,entry->table_cache_key,".",entry->real_name,NullS); - if (wild_compare(name,wild,0)) - continue; - } + if (db && my_strcasecmp(system_charset_info, db, share->db)) + continue; + if (wild && wild_compare(share->table_name,wild,0)) + continue; /* Check if user has SELECT privilege for any column in the table */ - table_list.db= (char*) entry->table_cache_key; - table_list.real_name= entry->real_name; + table_list.db= (char*) share->db; + table_list.table_name= (char*) share->table_name; table_list.grant.privilege=0; if (check_table_access(thd,SELECT_ACL | EXTRA_ACL,&table_list,1)) @@ -159,8 +216,8 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) /* need to check if we haven't already listed it */ for (table= open_list ; table ; table=table->next) { - if (!strcmp(table->table,entry->real_name) && - !strcmp(table->db,entry->table_cache_key)) + if (!strcmp(table->table,share->table_name) && + !strcmp(table->db,entry->s->db)) { if (entry->in_use) table->in_use++; @@ -172,15 +229,15 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) if (table) continue; if (!(*start_list = (OPEN_TABLE_LIST *) - sql_alloc(sizeof(**start_list)+entry->key_length))) + sql_alloc(sizeof(**start_list)+share->key_length))) { open_list=0; // Out of memory break; } strmov((*start_list)->table= strmov(((*start_list)->db= (char*) ((*start_list)+1)), - entry->table_cache_key)+1, - entry->real_name); + entry->s->db)+1, + entry->s->table_name); (*start_list)->in_use= entry->in_use ? 1 : 0; (*start_list)->locked= entry->locked_by_name ? 1 : 0; start_list= &(*start_list)->next; @@ -198,6 +255,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) void intern_close_table(TABLE *table) { // Free all structures free_io_cache(table); + delete table->triggers; if (table->file) VOID(closefrm(table)); // close file } @@ -280,9 +338,9 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, else { bool found=0; - for (TABLE_LIST *table=tables ; table ; table=table->next) + for (TABLE_LIST *table= tables; table; table= table->next_local) { - if (remove_table_from_cache(thd, table->db, table->real_name, + if (remove_table_from_cache(thd, table->db, table->table_name, RTFC_OWNED_BY_THD_FLAG)) found=1; } @@ -309,16 +367,17 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, bool found=1; /* Wait until all threads has closed all the tables we had locked */ DBUG_PRINT("info", - ("Waiting for others threads to close their open tables")); + ("Waiting for other threads to close their open tables")); while (found && ! thd->killed) { found=0; for (uint idx=0 ; idx < open_cache.records ; idx++) { TABLE *table=(TABLE*) hash_element(&open_cache,idx); - if ((table->version) < refresh_version && table->db_stat) + if ((table->s->version) < refresh_version && table->db_stat) { found=1; + DBUG_PRINT("signal", ("Waiting for COND_refresh")); pthread_cond_wait(&COND_refresh,&LOCK_open); break; } @@ -333,8 +392,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, result=reopen_tables(thd,1,1); thd->in_lock_tables=0; /* Set version for table */ - for (TABLE *table=thd->open_tables; table ; table=table->next) - table->version=refresh_version; + for (TABLE *table=thd->open_tables; table ; table= table->next) + table->s->version= refresh_version; } VOID(pthread_mutex_unlock(&LOCK_open)); if (if_wait_for_refresh) @@ -350,7 +409,39 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, /* - Close all tables used by thread + Mark all tables in the list which were used by current substatement + as free for reuse. + + SYNOPSIS + mark_used_tables_as_free_for_reuse() + thd - thread context + table - head of the list of tables + + DESCRIPTION + Marks all tables in the list which were used by current substatement + (they are marked by its query_id) as free for reuse. + + NOTE + The reason we reset query_id is that it's not enough to just test + if table->query_id != thd->query_id to know if a table is in use. + + For example + SELECT f1_that_uses_t1() FROM t1; + In f1_that_uses_t1() we will see one instance of t1 where query_id is + set to query_id of original query. +*/ + +static void mark_used_tables_as_free_for_reuse(THD *thd, TABLE *table) +{ + for (; table ; table= table->next) + if (table->query_id == thd->query_id) + table->query_id= 0; +} + + +/* + Close all tables used by the current substatement, or all tables + used by this thread if we are on the upper level. SYNOPSIS close_thread_tables() @@ -359,23 +450,42 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, LOCK_open skip_derived Set to 1 (0 = default) if we should not free derived tables. + stopper When closing tables from thd->open_tables(->next)*, + don't close/remove tables starting from stopper. IMPLEMENTATION Unlocks tables and frees derived tables. Put all normal tables used by thread in free list. + + When in prelocked mode it will only close/mark as free for reuse + tables opened by this substatement, it will also check if we are + closing tables after execution of complete query (i.e. we are on + upper level) and will leave prelocked mode if needed. */ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) { bool found_old_table; + prelocked_mode_type prelocked_mode= thd->prelocked_mode; DBUG_ENTER("close_thread_tables"); + /* + We are assuming here that thd->derived_tables contains ONLY derived + tables for this substatement. i.e. instead of approach which uses + query_id matching for determining which of the derived tables belong + to this substatement we rely on the ability of substatements to + save/restore thd->derived_tables during their execution. + + TODO: Probably even better approach is to simply associate list of + derived tables with (sub-)statement instead of thread and destroy + them at the end of its execution. + */ if (thd->derived_tables && !skip_derived) { TABLE *table, *next; /* - Close all derived tables generated from questions like - SELECT * from (select * from t1)) + Close all derived tables generated in queries like + SELECT * FROM (SELECT * FROM t1) */ for (table= thd->derived_tables ; table ; table= next) { @@ -384,10 +494,55 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) } thd->derived_tables= 0; } - if (thd->locked_tables) + + if (prelocked_mode) { - ha_commit_stmt(thd); // If select statement - DBUG_VOID_RETURN; // LOCK TABLES in use + /* + Mark all temporary tables used by this substatement as free for reuse. + */ + mark_used_tables_as_free_for_reuse(thd, thd->temporary_tables); + } + + if (thd->locked_tables || prelocked_mode) + { + /* + Let us commit transaction for statement. Since in 5.0 we only have + one statement transaction and don't allow several nested statement + transactions this call will do nothing if we are inside of stored + function or trigger (i.e. statement transaction is already active and + does not belong to statement for which we do close_thread_tables()). + TODO: This should be fixed in later releases. + */ + ha_commit_stmt(thd); + + /* We are under simple LOCK TABLES so should not do anything else. */ + if (!prelocked_mode) + DBUG_VOID_RETURN; + + if (!thd->lex->requires_prelocking()) + { + /* + If we are executing one of substatements we have to mark + all tables which it used as free for reuse. + */ + mark_used_tables_as_free_for_reuse(thd, thd->open_tables); + DBUG_VOID_RETURN; + } + + DBUG_ASSERT(prelocked_mode); + /* + We are in prelocked mode, so we have to leave it now with doing + implicit UNLOCK TABLES if need. + */ + DBUG_PRINT("info",("thd->prelocked_mode= NON_PRELOCKED")); + thd->prelocked_mode= NON_PRELOCKED; + + if (prelocked_mode == PRELOCKED_UNDER_LOCK_TABLES) + DBUG_VOID_RETURN; + + thd->lock= thd->locked_tables; + thd->locked_tables= 0; + /* Fallthrough */ } if (thd->lock) @@ -395,14 +550,34 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + /* + assume handlers auto-commit (if some doesn't - transaction handling + in MySQL should be redesigned to support it; it's a big change, + and it's not worth it - better to commit explicitly only writing + transactions, read-only ones should better take care of themselves. + saves some work in 2pc too) + see also sql_parse.cc - dispatch_command() + */ + bzero(&thd->transaction.stmt, sizeof(thd->transaction.stmt)); + if (!thd->active_transaction()) + thd->transaction.xid_state.xid.null(); + /* VOID(pthread_sigmask(SIG_SETMASK,&thd->block_signals,NULL)); */ if (!lock_in_use) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); - DBUG_PRINT("info", ("thd->open_tables=%p", thd->open_tables)); + DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables)); - found_old_table= 0; + + /* + End open index scans and table scans and remove references to the tables + from the handler tables hash. After this preparation it is safe to close + the tables. + */ + mysql_ha_mark_tables_for_reopen(thd, thd->open_tables); + + found_old_table= 0; while (thd->open_tables) found_old_table|=close_thread_table(thd, &thd->open_tables); thd->some_tables_deleted=0; @@ -414,11 +589,22 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) if (found_old_table) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } if (!lock_in_use) VOID(pthread_mutex_unlock(&LOCK_open)); /* VOID(pthread_sigmask(SIG_SETMASK,&thd->signals,NULL)); */ + + if (prelocked_mode == PRELOCKED) + { + /* + If we are here then we are leaving normal prelocked mode, so it is + good idea to turn off OPTION_TABLE_LOCK flag. + */ + DBUG_ASSERT(thd->lex->requires_prelocking()); + thd->options&= ~(ulong) (OPTION_TABLE_LOCK); + } + DBUG_VOID_RETURN; } @@ -426,14 +612,14 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) bool close_thread_table(THD *thd, TABLE **table_ptr) { - DBUG_ENTER("close_thread_table"); - bool found_old_table= 0; TABLE *table= *table_ptr; + DBUG_ENTER("close_thread_table"); DBUG_ASSERT(table->key_read == 0); + DBUG_ASSERT(table->file->inited == handler::NONE); *table_ptr=table->next; - if (table->version != refresh_version || + if (table->s->version != refresh_version || thd->version != refresh_version || !table->db_stat) { VOID(hash_delete(&open_cache,(byte*) table)); @@ -441,9 +627,9 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) } else { - if (table->flush_version != flush_version) + if (table->s->flush_version != flush_version) { - table->flush_version=flush_version; + table->s->flush_version= flush_version; table->file->extra(HA_EXTRA_FLUSH); } else @@ -471,8 +657,8 @@ void close_temporary(TABLE *table,bool delete_table) { DBUG_ENTER("close_temporary"); char path[FN_REFLEN]; - db_type table_type=table->db_type; - strmov(path,table->path); + db_type table_type=table->s->db_type; + strmov(path,table->s->path); free_io_cache(table); closefrm(table); my_free((char*) table,MYF(0)); @@ -484,7 +670,7 @@ void close_temporary(TABLE *table,bool delete_table) /* close_temporary_tables' internal, 4 is due to uint4korr definition */ static inline uint tmpkeyval(THD *thd, TABLE *table) { - return uint4korr(table->table_cache_key + table->key_length - 4); + return uint4korr(table->s->table_cache_key + table->s->key_length - 4); } /* Creates one DROP TEMPORARY TABLE binlog event for each pseudo-thread */ @@ -519,11 +705,10 @@ void close_temporary_tables(THD *thd) bool found_user_tables= false; LINT_INIT(next); - /* - insertion sort of temp tables by pseudo_thread_id to build ordered list + /* + insertion sort of temp tables by pseudo_thread_id to build ordered list of sublists of equal pseudo_thread_id */ - for (prev_table= thd->temporary_tables, table= prev_table->next; table; prev_table= table, table= table->next) @@ -533,7 +718,7 @@ void close_temporary_tables(THD *thd) { if (!found_user_tables) found_user_tables= true; - for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table; + for (prev_sorted= NULL, sorted= thd->temporary_tables; sorted != table; prev_sorted= sorted, sorted= sorted->next) { if (!is_user_table(sorted) || @@ -542,7 +727,7 @@ void close_temporary_tables(THD *thd) /* move into the sorted part of the list from the unsorted */ prev_table->next= table->next; table->next= sorted; - if (prev_sorted) + if (prev_sorted) { prev_sorted->next= table; } @@ -559,15 +744,15 @@ void close_temporary_tables(THD *thd) /* We always quote db,table names though it is slight overkill */ if (found_user_tables && - !(was_quote_show= (thd->options & OPTION_QUOTE_SHOW_CREATE))) + !(was_quote_show= test(thd->options & OPTION_QUOTE_SHOW_CREATE))) { thd->options |= OPTION_QUOTE_SHOW_CREATE; } - + /* scan sorted tmps to generate sequence of DROP */ for (table= thd->temporary_tables; table; table= next) { - if (is_user_table(table)) + if (is_user_table(table)) { /* Set pseudo_thread_id to be that of the processed table */ thd->variables.pseudo_thread_id= tmpkeyval(thd, table); @@ -582,18 +767,21 @@ void close_temporary_tables(THD *thd) We are going to add 4 ` around the db/table names and possible more due to special characters in the names */ - append_identifier(thd, &s_query, table->table_cache_key, strlen(table->table_cache_key)); + append_identifier(thd, &s_query, table->s->db, strlen(table->s->db)); s_query.q_append('.'); - append_identifier(thd, &s_query, table->real_name, - strlen(table->real_name)); + append_identifier(thd, &s_query, table->s->table_name, + strlen(table->s->table_name)); s_query.q_append(','); next= table->next; close_temporary(table, 1); } thd->clear_error(); + CHARSET_INFO *cs_save= thd->variables.character_set_client; + thd->variables.character_set_client= system_charset_info; Query_log_event qinfo(thd, s_query.ptr(), s_query.length() - 1 /* to remove trailing ',' */, 0, FALSE); + thd->variables.character_set_client= cs_save; /* Imagine the thread had created a temp table, then was doing a SELECT, and the SELECT was killed. Then it's not clever to mark the statement above as @@ -604,9 +792,9 @@ void close_temporary_tables(THD *thd) rightfully causing the slave to stop. */ qinfo.error_code= 0; - write_binlog_with_system_charset(thd, &qinfo); + mysql_bin_log.write(&qinfo); } - else + else { next= table->next; close_temporary(table, 1); @@ -617,56 +805,194 @@ void close_temporary_tables(THD *thd) thd->temporary_tables=0; } + /* - Find first suitable table by alias in given list. + Find table in list. SYNOPSIS find_table_in_list() - table - pointer to table list - db_name - data base name or 0 for any - table_name - table name or 0 for any + table Pointer to table list + offset Offset to which list in table structure to use + db_name Data base name + table_name Table name + + NOTES: + This is called by find_table_in_local_list() and + find_table_in_global_list(). RETURN VALUES NULL Table not found # Pointer to found table. */ -TABLE_LIST * find_table_in_list(TABLE_LIST *table, - const char *db_name, const char *table_name) +TABLE_LIST *find_table_in_list(TABLE_LIST *table, + st_table_list *TABLE_LIST::*link, + const char *db_name, + const char *table_name) { - for (; table; table= table->next) - if ((!db_name || !strcmp(table->db, db_name)) && - (!table_name || !my_strcasecmp(table_alias_charset, - table->alias, table_name))) + for (; table; table= table->*link ) + { + if ((table->table == 0 || table->table->s->tmp_table == NO_TMP_TABLE) && + strcmp(table->db, db_name) == 0 && + strcmp(table->table_name, table_name) == 0) break; + } return table; } + /* - Find real table in given list. + Test that table is unique (It's only exists once in the table list) SYNOPSIS - find_real_table_in_list() - table - pointer to table list - db_name - data base name - table_name - table name + unique_table() + thd thread handle + table table which should be checked + table_list list of tables + check_alias whether to check tables' aliases + + NOTE: to exclude derived tables from check we use following mechanism: + a) during derived table processing set THD::derived_tables_processing + b) JOIN::prepare set SELECT::exclude_from_table_unique_test if + THD::derived_tables_processing set. (we can't use JOIN::execute + because for PS we perform only JOIN::prepare, but we can't set this + flag in JOIN::prepare if we are not sure that we are in derived table + processing loop, because multi-update call fix_fields() for some its + items (which mean JOIN::prepare for subqueries) before unique_table + call to detect which tables should be locked for write). + c) unique_table skip all tables which belong to SELECT with + SELECT::exclude_from_table_unique_test set. + Also SELECT::exclude_from_table_unique_test used to exclude from check + tables of main SELECT of multi-delete and multi-update + + We also skip tables with TABLE_LIST::prelocking_placeholder set, + because we want to allow SELECTs from them, and their modification + will rise the error anyway. + + TODO: when we will have table/view change detection we can do this check + only once for PS/SP - RETURN VALUES - NULL Table not found - # Pointer to found table. + RETURN + found duplicate + 0 if table is unique */ -TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, - const char *db_name, - const char *table_name) +TABLE_LIST* unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list, + bool check_alias) { - for (; table; table= table->next) - if (!strcmp(table->db, db_name) && - !strcmp(table->real_name, table_name)) + TABLE_LIST *res; + const char *d_name, *t_name, *t_alias; + DBUG_ENTER("unique_table"); + DBUG_PRINT("enter", ("table alias: %s", table->alias)); + + /* + If this function called for query which update table (INSERT/UPDATE/...) + then we have in table->table pointer to TABLE object which we are + updating even if it is VIEW so we need TABLE_LIST of this TABLE object + to get right names (even if lower_case_table_names used). + + If this function called for CREATE command that we have not opened table + (table->table equal to 0) and right names is in current TABLE_LIST + object. + */ + if (table->table) + { + /* temporary table is always unique */ + if (table->table && table->table->s->tmp_table != NO_TMP_TABLE) + DBUG_RETURN(0); + table= table->find_underlying_table(table->table); + /* + as far as we have table->table we have to find real TABLE_LIST of + it in underlying tables + */ + DBUG_ASSERT(table); + } + d_name= table->db; + t_name= table->table_name; + t_alias= table->alias; + + DBUG_PRINT("info", ("real table: %s.%s", d_name, t_name)); + for (;;) + { + if (((! (res= find_table_in_global_list(table_list, d_name, t_name))) && + (! (res= mysql_lock_have_duplicate(thd, table, table_list)))) || + ((!res->table || res->table != table->table) && + (!check_alias || !(lower_case_table_names ? + my_strcasecmp(files_charset_info, t_alias, res->alias) : + strcmp(t_alias, res->alias))) && + res->select_lex && !res->select_lex->exclude_from_table_unique_test && + !res->prelocking_placeholder)) break; - return table; + /* + If we found entry of this table or table of SELECT which already + processed in derived table or top select of multi-update/multi-delete + (exclude_from_table_unique_test) or prelocking placeholder. + */ + table_list= res->next_global; + DBUG_PRINT("info", + ("found same copy of table or table which we should skip")); + } + DBUG_RETURN(res); } + +/* + Issue correct error message in case we found 2 duplicate tables which + prevent some update operation + + SYNOPSIS + update_non_unique_table_error() + update table which we try to update + operation name of update operation + duplicate duplicate table which we found + + NOTE: + here we hide view underlying tables if we have them +*/ + +void update_non_unique_table_error(TABLE_LIST *update, + const char *operation, + TABLE_LIST *duplicate) +{ + update= update->top_table(); + duplicate= duplicate->top_table(); + if (!update->view || !duplicate->view || + update->view == duplicate->view || + update->view_name.length != duplicate->view_name.length || + update->view_db.length != duplicate->view_db.length || + my_strcasecmp(table_alias_charset, + update->view_name.str, duplicate->view_name.str) != 0 || + my_strcasecmp(table_alias_charset, + update->view_db.str, duplicate->view_db.str) != 0) + { + /* + it is not the same view repeated (but it can be parts of the same copy + of view), so we have to hide underlying tables. + */ + if (update->view) + { + /* Issue the ER_NON_INSERTABLE_TABLE error for an INSERT */ + if (update->view == duplicate->view) + my_error(!strncmp(operation, "INSERT", 6) ? + ER_NON_INSERTABLE_TABLE : ER_NON_UPDATABLE_TABLE, MYF(0), + update->alias, operation); + else + my_error(ER_VIEW_PREVENT_UPDATE, MYF(0), + (duplicate->view ? duplicate->alias : update->alias), + operation, update->alias); + return; + } + if (duplicate->view) + { + my_error(ER_VIEW_PREVENT_UPDATE, MYF(0), duplicate->alias, operation, + update->alias); + return; + } + } + my_error(ER_UPDATE_TABLE_USED, MYF(0), update->alias); +} + + TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) { char key[MAX_DBKEY_LENGTH]; @@ -681,8 +1007,8 @@ TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) prev= &thd->temporary_tables; for (table=thd->temporary_tables ; table ; table=table->next) { - if (table->key_length == key_length && - !memcmp(table->table_cache_key,key,key_length)) + if (table->s->key_length == key_length && + !memcmp(table->s->table_cache_key,key,key_length)) return prev; prev= &table->next; } @@ -697,7 +1023,7 @@ bool close_temporary_table(THD *thd, const char *db, const char *table_name) return 1; table= *prev; *prev= table->next; - close_temporary(table); + close_temporary(table, 1); if (thd->slave_thread) --slave_open_temp_tables; return 0; @@ -710,22 +1036,26 @@ bool close_temporary_table(THD *thd, const char *db, const char *table_name) Prepares a table cache key, which is the concatenation of db, table_name and thd->slave_proxy_id, separated by '\0'. */ + bool rename_temporary_table(THD* thd, TABLE *table, const char *db, const char *table_name) { char *key; + TABLE_SHARE *share= table->s; + if (!(key=(char*) alloc_root(&table->mem_root, (uint) strlen(db)+ (uint) strlen(table_name)+6+4))) return 1; /* purecov: inspected */ - table->key_length=(uint) - (strmov((table->real_name=strmov(table->table_cache_key=key, - db)+1), - table_name) - table->table_cache_key)+1; - int4store(key+table->key_length,thd->server_id); - table->key_length += 4; - int4store(key+table->key_length,thd->variables.pseudo_thread_id); - table->key_length += 4; + share->key_length= (uint) + (strmov((char*) (share->table_name= strmov(share->table_cache_key= key, + db)+1), + table_name) - share->table_cache_key)+1; + share->db= share->table_cache_key; + int4store(key+share->key_length, thd->server_id); + share->key_length+= 4; + int4store(key+share->key_length, thd->variables.pseudo_thread_id); + share->key_length+= 4; return 0; } @@ -756,15 +1086,16 @@ static void relink_unused(TABLE *table) TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find) { char key[MAX_DBKEY_LENGTH]; - uint key_length=find->key_length; + uint key_length= find->s->key_length; TABLE *start=list,**prev,*next; prev= &start; - memcpy(key,find->table_cache_key,key_length); + + memcpy(key, find->s->table_cache_key, key_length); for (; list ; list=next) { next=list->next; - if (list->key_length == key_length && - !memcmp(list->table_cache_key,key,key_length)) + if (list->s->key_length == key_length && + !memcmp(list->s->table_cache_key, key, key_length)) { if (thd->locked_tables) mysql_lock_remove(thd, thd->locked_tables,list); @@ -778,7 +1109,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find) } *prev=0; // Notify any 'refresh' threads - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); return start; } @@ -790,6 +1121,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find) void wait_for_refresh(THD *thd) { + DBUG_ENTER("wait_for_refresh"); safe_mutex_assert_owner(&LOCK_open); /* Wait until the current table is up to date */ @@ -807,158 +1139,404 @@ void wait_for_refresh(THD *thd) thd->mysys_var->current_cond= 0; thd->proc_info= proc_info; pthread_mutex_unlock(&thd->mysys_var->mutex); + DBUG_VOID_RETURN; } -TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) +/* + Open table which is already name-locked by this thread. + + SYNOPSIS + reopen_name_locked_table() + thd Thread handle + table_list TABLE_LIST object for table to be open, TABLE_LIST::table + member should point to TABLE object which was used for + name-locking. + + NOTE + This function assumes that its caller already acquired LOCK_open mutex. + + RETURN VALUE + FALSE - Success + TRUE - Error +*/ + +bool reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) { + TABLE *table= table_list->table; + TABLE_SHARE *share; + char *db= table_list->db; + char *table_name= table_list->table_name; + char key[MAX_DBKEY_LENGTH]; + uint key_length; + TABLE orig_table; DBUG_ENTER("reopen_name_locked_table"); - if (thd->killed) - DBUG_RETURN(0); - TABLE* table; - if (!(table = table_list->table)) - DBUG_RETURN(0); - char *db= table_list->db; - char* table_name = table_list->real_name; - char key[MAX_DBKEY_LENGTH]; - uint key_length; + safe_mutex_assert_owner(&LOCK_open); + + if (thd->killed || !table) + DBUG_RETURN(TRUE); + + orig_table= *table; key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; - pthread_mutex_lock(&LOCK_open); - if (open_unireg_entry(thd, table, db, table_name, table_name) || - !(table->table_cache_key =memdup_root(&table->mem_root,(char*) key, - key_length))) + if (open_unireg_entry(thd, table, db, table_name, table_name, 0, + thd->mem_root, 0) || + !(table->s->table_cache_key= memdup_root(&table->mem_root, (char*) key, + key_length))) { - closefrm(table); - pthread_mutex_unlock(&LOCK_open); - DBUG_RETURN(0); + intern_close_table(table); + /* + If there was an error during opening of table (for example if it + does not exist) '*table' object can be wiped out. To be able + properly release name-lock in this case we should restore this + object to its original state. + */ + *table= orig_table; + DBUG_RETURN(TRUE); } - table->key_length=key_length; - table->version=0; - table->flush_version=0; + share= table->s; + share->db= share->table_cache_key; + share->key_length=key_length; + share->version=0; + share->flush_version=0; table->in_use = thd; check_unused(); - pthread_mutex_unlock(&LOCK_open); table->next = thd->open_tables; thd->open_tables = table; table->tablenr=thd->current_tablenr++; table->used_fields=0; table->const_table=0; - table->outer_join= table->null_row= table->maybe_null= table->force_index= 0; + table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= table->keys_in_use; - table->used_keys= table->keys_for_keyread; - DBUG_RETURN(table); + table->keys_in_use_for_query= share->keys_in_use; + table->used_keys= share->keys_for_keyread; + DBUG_RETURN(FALSE); } -/****************************************************************************** -** open a table -** Uses a cache of open tables to find a table not in use. -** If refresh is a NULL pointer, then the is no version number checking and -** the table is not put in the thread-open-list -** If the return value is NULL and refresh is set then one must close -** all tables and retry the open -******************************************************************************/ +/* + Open a table. + + SYNOPSIS + open_table() + thd Thread context. + table_list Open first table in list. + refresh INOUT Pointer to memory that will be set to 1 if + we need to close all tables and reopen them. + If this is a NULL pointer, then the table is not + put in the thread-open-list. + flags Bitmap of flags to modify how open works: + MYSQL_LOCK_IGNORE_FLUSH - Open table even if + someone has done a flush or namelock on it. + No version number checking is done. + MYSQL_OPEN_IGNORE_LOCKED_TABLES - Open table + ignoring set of locked tables and prelocked mode. + + IMPLEMENTATION + Uses a cache of open tables to find a table not in use. + + RETURN + NULL Open failed. If refresh is set then one should close + all other tables and retry the open. + # Success. Pointer to TABLE object for open table. +*/ -TABLE *open_table(THD *thd,const char *db,const char *table_name, - const char *alias,bool *refresh) +TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, + bool *refresh, uint flags) { reg1 TABLE *table; char key[MAX_DBKEY_LENGTH]; uint key_length; + char *alias= table_list->alias; HASH_SEARCH_STATE state; DBUG_ENTER("open_table"); /* find a unused table in the open table cache */ if (refresh) *refresh=0; + + /* an open table operation needs a lot of the stack space */ + if (check_stack_overrun(thd, STACK_MIN_SIZE_FOR_OPEN, (char *)&alias)) + DBUG_RETURN(0); + if (thd->killed) DBUG_RETURN(0); - key_length= (uint) (strmov(strmov(key,db)+1,table_name)-key)+1; + key_length= (uint) (strmov(strmov(key, table_list->db)+1, + table_list->table_name)-key)+1; int4store(key + key_length, thd->server_id); int4store(key + key_length + 4, thd->variables.pseudo_thread_id); - for (table=thd->temporary_tables; table ; table=table->next) + /* + Unless requested otherwise, try to resolve this table in the list + of temporary tables of this thread. In MySQL temporary tables + are always thread-local and "shadow" possible base tables with the + same name. This block implements the behaviour. + TODO: move this block into a separate function. + */ + if (!table_list->skip_temporary) { - if (table->key_length == key_length + TMP_TABLE_KEY_EXTRA && - !memcmp(table->table_cache_key, key, - key_length + TMP_TABLE_KEY_EXTRA)) + for (table= thd->temporary_tables; table ; table=table->next) { - if (table->query_id == thd->query_id) + if (table->s->key_length == key_length + TMP_TABLE_KEY_EXTRA && + !memcmp(table->s->table_cache_key, key, + key_length + TMP_TABLE_KEY_EXTRA)) { - my_printf_error(ER_CANT_REOPEN_TABLE, - ER(ER_CANT_REOPEN_TABLE),MYF(0),table->table_name); - DBUG_RETURN(0); + /* + We're trying to use the same temporary table twice in a query. + Right now we don't support this because a temporary table + is always represented by only one TABLE object in THD, and + it can not be cloned. Emit an error for an unsupported behaviour. + */ + if (table->query_id == thd->query_id || + thd->prelocked_mode && table->query_id) + { + my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias); + DBUG_RETURN(0); + } + table->query_id= thd->query_id; + table->clear_query_id= 1; + thd->tmp_table_used= 1; + DBUG_PRINT("info",("Using temporary table")); + goto reset; } - table->query_id=thd->query_id; - table->clear_query_id=1; - thd->tmp_table_used= 1; - DBUG_PRINT("info",("Using temporary table")); - goto reset; } } - if (thd->locked_tables) + /* + The table is not temporary - if we're in pre-locked or LOCK TABLES + mode, let's try to find the requested table in the list of pre-opened + and locked tables. If the table is not there, return an error - we can't + open not pre-opened tables in pre-locked/LOCK TABLES mode. + TODO: move this block into a separate function. + */ + if (!(flags & MYSQL_OPEN_IGNORE_LOCKED_TABLES) && + (thd->locked_tables || thd->prelocked_mode)) { // Using table locks + TABLE *best_table= 0; + int best_distance= INT_MIN; + bool check_if_used= thd->prelocked_mode && + ((int) table_list->lock_type >= + (int) TL_WRITE_ALLOW_WRITE); for (table=thd->open_tables; table ; table=table->next) { - if (table->key_length == key_length && - !memcmp(table->table_cache_key,key,key_length) && - !my_strcasecmp(system_charset_info, table->table_name, alias) && - table->query_id != thd->query_id) + if (table->s->key_length == key_length && + !memcmp(table->s->table_cache_key, key, key_length)) { - table->query_id=thd->query_id; - DBUG_PRINT("info",("Using locked table")); - goto reset; + if (check_if_used && table->query_id && + table->query_id != thd->query_id) + { + /* + If we are in stored function or trigger we should ensure that + we won't change table that is already used by calling statement. + So if we are opening table for writing, we should check that it + is not already open by some calling stamement. + */ + my_error(ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG, MYF(0), + table->s->table_name); + DBUG_RETURN(0); + } + if (!my_strcasecmp(system_charset_info, table->alias, alias) && + table->query_id != thd->query_id && /* skip tables already used */ + !(thd->prelocked_mode && table->query_id)) + { + int distance= ((int) table->reginfo.lock_type - + (int) table_list->lock_type); + /* + Find a table that either has the exact lock type requested, + or has the best suitable lock. In case there is no locked + table that has an equal or higher lock than requested, + we us the closest matching lock to be able to produce an error + message about wrong lock mode on the table. The best_table + is changed if bd < 0 <= d or bd < d < 0 or 0 <= d < bd. + + distance < 0 - No suitable lock found + distance > 0 - we have lock mode higher then we require + distance == 0 - we have lock mode exactly which we need + */ + if (best_distance < 0 && distance > best_distance || + distance >= 0 && distance < best_distance) + { + best_distance= distance; + best_table= table; + if (best_distance == 0 && !check_if_used) + { + /* + If we have found perfect match and we don't need to check that + table is not used by one of calling statements (assuming that + we are inside of function or trigger) we can finish iterating + through open tables list. + */ + break; + } + } + } } } - my_printf_error(ER_TABLE_NOT_LOCKED,ER(ER_TABLE_NOT_LOCKED),MYF(0),alias); + if (best_table) + { + table= best_table; + table->query_id= thd->query_id; + DBUG_PRINT("info",("Using locked table")); + goto reset; + } + /* + Is this table a view and not a base table? + (it is work around to allow to open view with locked tables, + real fix will be made after definition cache will be made) + */ + { + char path[FN_REFLEN]; + db_type not_used; + strxnmov(path, FN_REFLEN, mysql_data_home, "/", table_list->db, "/", + table_list->table_name, reg_ext, NullS); + (void) unpack_filename(path, path); + if (mysql_frm_type(thd, path, ¬_used) == FRMTYPE_VIEW) + { + /* + Will not be used (because it's VIEW) but has to be passed. + Also we will not free it (because it is a stack variable). + */ + TABLE tab; + table= &tab; + VOID(pthread_mutex_lock(&LOCK_open)); + if (!open_unireg_entry(thd, table, table_list->db, + table_list->table_name, + alias, table_list, mem_root, 0)) + { + DBUG_ASSERT(table_list->view != 0); + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(0); // VIEW + } + VOID(pthread_mutex_unlock(&LOCK_open)); + } + } + if ((thd->locked_tables) && (thd->locked_tables->lock_count > 0)) + my_error(ER_TABLE_NOT_LOCKED, MYF(0), alias); + else + my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->alias); DBUG_RETURN(0); } + /* + Non pre-locked/LOCK TABLES mode, and the table is not temporary: + this is the normal use case. + Now we should: + - try to find the table in the table cache. + - if one of the discovered TABLE instances is name-locked + (table->s->version == 0) or some thread has started FLUSH TABLES + (refresh_version > table->s->version), back off -- we have to wait + until no one holds a name lock on the table. + - if there is no such TABLE in the name cache, read the table definition + and insert it into the cache. + We perform all of the above under LOCK_open which currently protects + the open cache (also known as table cache) and table definitions stored + on disk. + */ + VOID(pthread_mutex_lock(&LOCK_open)); + /* + If it's the first table from a list of tables used in a query, + remember refresh_version (the version of open_cache state). + If the version changes while we're opening the remaining tables, + we will have to back off, close all the tables opened-so-far, + and try to reopen them. + Note: refresh_version is currently changed only during FLUSH TABLES. + */ if (!thd->open_tables) thd->version=refresh_version; - else if (thd->version != refresh_version && refresh) + else if ((thd->version != refresh_version) && + ! (flags & MYSQL_LOCK_IGNORE_FLUSH)) { /* Someone did a refresh while thread was opening tables */ - *refresh=1; + if (refresh) + *refresh=1; VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(0); } /* close handler tables which are marked for flush */ - mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE, TRUE); + if (thd->handler_tables) + mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE, TRUE); + /* + Actually try to find the table in the open_cache. + The cache may contain several "TABLE" instances for the same + physical table. The instances that are currently "in use" by + some thread have their "in_use" member != NULL. + There is no good reason for having more than one entry in the + hash for the same physical table, except that we use this as + an implicit "pending locks queue" - see + wait_for_locked_table_names for details. + */ for (table= (TABLE*) hash_first(&open_cache, (byte*) key, key_length, &state); table && table->in_use ; table= (TABLE*) hash_next(&open_cache, (byte*) key, key_length, &state)) { - if (table->version != refresh_version) + /* + Normally, table->s->version contains the value of + refresh_version from the moment when this table was + (re-)opened and added to the cache. + If since then we did (or just started) FLUSH TABLES + statement, refresh_version has been increased. + For "name-locked" TABLE instances, table->s->version is set + to 0 (see lock_table_name for details). + In case there is a pending FLUSH TABLES or a name lock, we + need to back off and re-start opening tables. + If we do not back off now, we may dead lock in case of lock + order mismatch with some other thread: + c1: name lock t1; -- sort of exclusive lock + c2: open t2; -- sort of shared lock + c1: name lock t2; -- blocks + c2: open t1; -- blocks + */ + if (table->s->version != refresh_version) { - if (! refresh) + DBUG_PRINT("note", + ("Found table '%s.%s' with different refresh version", + table_list->db, table_list->table_name)); + if (flags & MYSQL_LOCK_IGNORE_FLUSH) { - /* Ignore flush for now, but force close after usage. */ - thd->version= table->version; + /* Force close at once after usage */ + thd->version= table->s->version; continue; } /* - ** There is a refresh in progress for this table - ** Wait until the table is freed or the thread is killed. + Back off, part 1: mark the table as "unused" for the + purpose of name-locking by setting table->db_stat to 0. Do + that only for the tables in this thread that have an old + table->s->version (this is an optimization (?)). + table->db_stat == 0 signals wait_for_locked_table_names + that the tables in question are not used any more. See + table_is_used call for details. */ close_old_data_files(thd,thd->open_tables,0,0); + /* + Back-off part 2: try to avoid "busy waiting" on the table: + if the table is in use by some other thread, we suspend + and wait till the operation is complete: when any + operation that juggles with table->s->version completes, + it broadcasts COND_refresh condition variable. + */ if (table->in_use != thd) + { wait_for_refresh(thd); + /* wait_for_refresh will unlock LOCK_open for us */ + } else + { VOID(pthread_mutex_unlock(&LOCK_open)); + } + /* + There is a refresh in progress for this table. + Signal the caller that it has to try again. + */ if (refresh) *refresh=1; DBUG_RETURN(0); @@ -966,6 +1544,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, } if (table) { + /* Unlink the table from "unused_tables" list. */ if (table == unused_tables) { // First unused unused_tables=unused_tables->next; // Remove from link @@ -974,10 +1553,13 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, } table->prev->next=table->next; /* Remove from unused list */ table->next->prev=table->prev; - + table->in_use= thd; } else { + /* Insert a new TABLE instance into the open cache */ + TABLE_SHARE *share; + int error; /* Free cache if too big */ while (open_cache.records > table_cache_size && unused_tables) VOID(hash_delete(&open_cache,(byte*) unused_tables)); /* purecov: tested */ @@ -988,25 +1570,45 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(NULL); } - if (open_unireg_entry(thd, table,db,table_name,alias) || - !(table->table_cache_key=memdup_root(&table->mem_root,(char*) key, - key_length))) + error= open_unireg_entry(thd, table, table_list->db, + table_list->table_name, + alias, table_list, mem_root, + (flags & OPEN_VIEW_NO_PARSE)); + if ((error > 0) || + (!table_list->view && !error && + !(table->s->table_cache_key= memdup_root(&table->mem_root, + (char*) key, + key_length)))) { table->next=table->prev=table; free_cache_entry(table); VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(NULL); } - table->key_length=key_length; - table->version=refresh_version; - table->flush_version=flush_version; + if (table_list->view || error < 0) + { + /* + VIEW not really opened, only frm were read. + Set 1 as a flag here + */ + if (error < 0) + table_list->view= (st_lex*)1; + + my_free((gptr)table, MYF(0)); + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(0); // VIEW + } + share= table->s; + share->db= share->table_cache_key; + share->key_length= key_length; + share->version= refresh_version; + share->flush_version= flush_version; DBUG_PRINT("info", ("inserting table %p into the cache", table)); VOID(my_hash_insert(&open_cache,(byte*) table)); } - table->in_use=thd; check_unused(); // Debugging call - + VOID(pthread_mutex_unlock(&LOCK_open)); if (refresh) { @@ -1016,55 +1618,36 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, table->reginfo.lock_type=TL_READ; /* Assume read */ reset: + if (thd->lex->need_correct_ident()) + table->alias_name_used= my_strcasecmp(table_alias_charset, + table->s->table_name, alias); /* Fix alias if table name changes */ - if (strcmp(table->table_name,alias)) + if (strcmp(table->alias, alias)) { uint length=(uint) strlen(alias)+1; - table->table_name= (char*) my_realloc(table->table_name,length, - MYF(MY_WME)); - memcpy(table->table_name,alias,length); - for (uint i=0 ; i < table->fields ; i++) - table->field[i]->table_name=table->table_name; - } -#if MYSQL_VERSION_ID < 40100 - /* - If per-connection "new" variable (represented by variables.new_mode) - is set then we should pretend that the length of TIMESTAMP field is 19. - The cheapest (from perfomance viewpoint) way to achieve that is to set - field_length of all Field_timestamp objects in a table after opening - it (to 19 if new_mode is true or to original field length otherwise). - We save value of new_mode variable in TABLE::timestamp_mode to - not perform this setup if new_mode value is the same between sequential - table opens. - */ - my_bool new_mode= thd->variables.new_mode; - if (table->timestamp_mode != new_mode) - { - for (uint i=0 ; i < table->fields ; i++) - { - Field *field= table->field[i]; - - if (field->type() == FIELD_TYPE_TIMESTAMP) - field->field_length= new_mode ? 19 : - ((Field_timestamp *)(field))->orig_field_length; - } - table->timestamp_mode= new_mode; + table->alias= (char*) my_realloc((char*) table->alias, length, + MYF(MY_WME)); + memcpy((char*) table->alias, alias, length); } -#endif /* These variables are also set in reopen_table() */ table->tablenr=thd->current_tablenr++; table->used_fields=0; table->const_table=0; - table->outer_join= table->null_row= table->maybe_null= table->force_index= 0; + table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; - table->keys_in_use_for_query= table->keys_in_use; - table->used_keys= table->keys_for_keyread; - table->file->ft_handler=0; - table->fulltext_searched=0; + table->keys_in_use_for_query= table->s->keys_in_use; + table->insert_values= 0; + table->used_keys= table->s->keys_for_keyread; + table->fulltext_searched= 0; + table->file->ft_handler= 0; + /* Catch wrong handling of the auto_increment_field_not_null. */ + DBUG_ASSERT(!table->auto_increment_field_not_null); + table->auto_increment_field_not_null= FALSE; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); + table->pos_in_table_list= table_list; + table_list->updatable= 1; // It is not derived table nor non-updatable VIEW DBUG_ASSERT(table->key_read == 0); - DBUG_ASSERT(table->insert_values == 0); DBUG_RETURN(table); } @@ -1076,8 +1659,8 @@ TABLE *find_locked_table(THD *thd, const char *db,const char *table_name) for (TABLE *table=thd->open_tables; table ; table=table->next) { - if (table->key_length == key_length && - !memcmp(table->table_cache_key,key,key_length)) + if (table->s->key_length == key_length && + !memcmp(table->s->table_cache_key,key,key_length)) return table; } return(0); @@ -1104,9 +1687,9 @@ TABLE *find_locked_table(THD *thd, const char *db,const char *table_name) bool reopen_table(TABLE *table,bool locked) { TABLE tmp; - char *db=table->table_cache_key; - char *table_name=table->real_name; - bool error=1; + char *db= table->s->table_cache_key; + const char *table_name= table->s->table_name; + bool error= 1; Field **field; uint key,part; DBUG_ENTER("reopen_table"); @@ -1114,65 +1697,71 @@ bool reopen_table(TABLE *table,bool locked) #ifdef EXTRA_DEBUG if (table->db_stat) sql_print_error("Table %s had a open data handler in reopen_table", - table->table_name); + table->alias); #endif if (!locked) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); - if (open_unireg_entry(current_thd,&tmp,db,table_name,table->table_name)) + if (open_unireg_entry(table->in_use, &tmp, db, table_name, + table->alias, 0, table->in_use->mem_root, 0)) goto end; free_io_cache(table); - if (!(tmp.table_cache_key= memdup_root(&tmp.mem_root,db, - table->key_length))) + if (!(tmp.s->table_cache_key= memdup_root(&tmp.mem_root,db, + table->s->key_length))) { + delete tmp.triggers; closefrm(&tmp); // End of memory goto end; } + tmp.s->db= tmp.s->table_cache_key; /* This list copies variables set by open_table */ tmp.tablenr= table->tablenr; tmp.used_fields= table->used_fields; tmp.const_table= table->const_table; - tmp.outer_join= table->outer_join; tmp.null_row= table->null_row; tmp.maybe_null= table->maybe_null; tmp.status= table->status; - tmp.keys_in_use_for_query= tmp.keys_in_use; - tmp.used_keys= tmp.keys_for_keyread; - tmp.force_index= tmp.force_index; + tmp.keys_in_use_for_query= tmp.s->keys_in_use; + tmp.used_keys= tmp.s->keys_for_keyread; /* Get state */ - tmp.key_length= table->key_length; + tmp.s->key_length= table->s->key_length; tmp.in_use= table->in_use; tmp.reginfo.lock_type=table->reginfo.lock_type; - tmp.version= refresh_version; - tmp.tmp_table= table->tmp_table; + tmp.s->version= refresh_version; + tmp.s->tmp_table= table->s->tmp_table; tmp.grant= table->grant; /* Replace table in open list */ tmp.next= table->next; tmp.prev= table->prev; + delete table->triggers; if (table->file) VOID(closefrm(table)); // close file, free everything - *table=tmp; + *table= tmp; + table->s= &table->share_not_to_be_used; table->file->change_table_ptr(table); - DBUG_ASSERT(table->table_name); + DBUG_ASSERT(table->alias != 0); for (field=table->field ; *field ; field++) { (*field)->table= (*field)->orig_table= table; - (*field)->table_name=table->table_name; + (*field)->table_name= &table->alias; } - for (key=0 ; key < table->keys ; key++) + for (key=0 ; key < table->s->keys ; key++) { for (part=0 ; part < table->key_info[key].usable_key_parts ; part++) table->key_info[key].key_part[part].field->table= table; } - VOID(pthread_cond_broadcast(&COND_refresh)); + if (table->triggers) + table->triggers->set_table(table); + + broadcast_refresh(); error=0; end: @@ -1193,8 +1782,8 @@ bool close_data_tables(THD *thd,const char *db, const char *table_name) TABLE *table; for (table=thd->open_tables; table ; table=table->next) { - if (!strcmp(table->real_name,table_name) && - !strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->table_name, table_name) && + !strcmp(table->s->db, db)) { mysql_lock_remove(thd, thd->locked_tables,table); table->file->close(); @@ -1220,7 +1809,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) TABLE *table,*next,**prev; TABLE **tables,**tables_ptr; // For locks - bool error=0; + bool error=0, not_used; if (get_locks) { /* The ptr is checked later */ @@ -1239,7 +1828,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) next=table->next; if (!tables || (!db_stat && reopen_table(table,1))) { - my_error(ER_CANT_REOPEN_TABLE,MYF(0),table->table_name); + my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias); VOID(hash_delete(&open_cache,(byte*) table)); error=1; } @@ -1251,7 +1840,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) *tables_ptr++= table; // need new lock on this if (in_refresh) { - table->version=0; + table->s->version=0; table->locked_by_flush=0; } } @@ -1261,7 +1850,8 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) MYSQL_LOCK *lock; /* We should always get these locks */ thd->some_tables_deleted=0; - if ((lock= mysql_lock_tables(thd, tables, (uint) (tables_ptr-tables), 0))) + if ((lock= mysql_lock_tables(thd, tables, (uint) (tables_ptr - tables), + 0, ¬_used))) { thd->locked_tables=mysql_lock_merge(thd->locked_tables,lock); } @@ -1272,7 +1862,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) { my_afree((gptr) tables); } - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); *prev=0; DBUG_RETURN(error); } @@ -1290,11 +1880,11 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, bool found=send_refresh; for (; table ; table=table->next) { - if (table->version != refresh_version) + if (table->s->version != refresh_version) { found=1; if (!abort_locks) // If not from flush tables - table->version = refresh_version; // Let other threads use table + table->s->version= refresh_version; // Let other threads use table if (table->db_stat) { if (abort_locks) @@ -1309,7 +1899,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, } } if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -1324,18 +1914,18 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock) { do { + char *key= table->s->table_cache_key; + uint key_length= table->s->key_length; HASH_SEARCH_STATE state; - char *key= table->table_cache_key; - uint key_length=table->key_length; for (TABLE *search= (TABLE*) hash_first(&open_cache, (byte*) key, - key_length, &state); + key_length, &state); search ; search= (TABLE*) hash_next(&open_cache, (byte*) key, key_length, &state)) { if (search->locked_by_flush || search->locked_by_name && wait_for_name_lock || - search->db_stat && search->version < refresh_version) + search->db_stat && search->s->version < refresh_version) return 1; // Table is used } } while ((table=table->next)); @@ -1383,11 +1973,11 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name) TABLE *table,*next,**prev; bool found=0; prev= &thd->open_tables; - for (table=thd->open_tables; table ; table=next) + for (table= thd->open_tables; table ; table=next) { next=table->next; - if (!strcmp(table->real_name,table_name) && - !strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->table_name, table_name) && + !strcmp(table->s->db, db)) { mysql_lock_remove(thd, thd->locked_tables,table); VOID(hash_delete(&open_cache,(byte*) table)); @@ -1401,7 +1991,7 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name) } *prev=0; if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); if (thd->locked_tables && thd->locked_tables->table_count == 0) { my_free((gptr) thd->locked_tables,MYF(0)); @@ -1422,8 +2012,8 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) TABLE *table; for (table= thd->open_tables; table ; table= table->next) { - if (!strcmp(table->real_name,table_name) && - !strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->table_name,table_name) && + !strcmp(table->s->db, db)) { mysql_lock_abort(thd,table); break; @@ -1442,6 +2032,10 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) db Database name name Table name alias Alias name + table_desc TABLE_LIST descriptor (used with views) + mem_root temporary mem_root for parsing + flags the OPEN_VIEW_NO_PARSE flag to be passed to + openfrm()/open_new_frm() NOTES Extra argument for open is taken from thd->open_options @@ -1450,9 +2044,10 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) 0 ok # Error */ - static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, - const char *name, const char *alias) + const char *name, const char *alias, + TABLE_LIST *table_desc, MEM_ROOT *mem_root, + uint flags) { char path[FN_REFLEN]; int error; @@ -1460,19 +2055,30 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, DBUG_ENTER("open_unireg_entry"); strxmov(path, mysql_data_home, "/", db, "/", name, NullS); - while (openfrm(path,alias, - (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | - HA_TRY_READ_ONLY), - READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, - thd->open_options, entry)) + while ((error= openfrm(thd, path, alias, + (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | + HA_GET_INDEX | HA_TRY_READ_ONLY | + NO_ERR_ON_NEW_FRM), + READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD | + (flags & OPEN_VIEW_NO_PARSE), + thd->open_options, entry)) && + (error != 5 || + (fn_format(path, path, 0, reg_ext, MY_UNPACK_FILENAME), + open_new_frm(thd, path, alias, db, name, + (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | + HA_GET_INDEX | HA_TRY_READ_ONLY), + READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD | + (flags & OPEN_VIEW_NO_PARSE), + thd->open_options, entry, table_desc, mem_root)))) + { - if (!entry->crashed) + if (!entry->s || !entry->s->crashed) { /* - Frm file could not be found on disk - Since it does not exist, no one can be using it - LOCK_open has been locked to protect from someone else - trying to discover the table at the same time. + Frm file could not be found on disk + Since it does not exist, no one can be using it + LOCK_open has been locked to protect from someone else + trying to discover the table at the same time. */ if (discover_retry_count++ != 0) goto err; @@ -1480,7 +2086,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, { /* Give right error message */ thd->clear_error(); - DBUG_PRINT("error", ("Dicovery of %s/%s failed", db, name)); + DBUG_PRINT("error", ("Discovery of %s/%s failed", db, name)); my_printf_error(ER_UNKNOWN_ERROR, "Failed to open '%-.64s', error while " "unpacking from engine", @@ -1489,7 +2095,8 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, goto err; } - thd->clear_error(); // Clear error message + mysql_reset_errors(thd, 1); // Clear warnings + thd->clear_error(); // Clear error message continue; } @@ -1497,7 +2104,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, TABLE_LIST table_list; bzero((char*) &table_list, sizeof(table_list)); // just for safe table_list.db=(char*) db; - table_list.real_name=(char*) name; + table_list.table_name=(char*) name; safe_mutex_assert_owner(&LOCK_open); @@ -1516,7 +2123,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, pthread_mutex_unlock(&LOCK_open); thd->clear_error(); // Clear error message error= 0; - if (openfrm(path,alias, + if (openfrm(thd, path, alias, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | HA_TRY_READ_ONLY), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, @@ -1541,6 +2148,22 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, goto err; break; } + + if (error == 5) + DBUG_RETURN((flags & OPEN_VIEW_NO_PARSE)? -1 : 0); // we have just opened VIEW + + /* + We can't mark all tables in 'mysql' database as system since we don't + allow to lock such tables for writing with any other tables (even with + other system tables) and some privilege tables need this. + */ + if (!my_strcasecmp(system_charset_info, db, "mysql") && + !my_strcasecmp(system_charset_info, name, "proc")) + entry->s->system_table= 1; + + if (Table_triggers_list::check_n_load(thd, db, name, entry, 0)) + goto err; + /* If we are here, there was no fatal error (but error may be still unitialized). @@ -1569,6 +2192,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, */ sql_print_error("When opening HEAP table, could not allocate \ memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name); + delete entry->triggers; if (entry->file) closefrm(entry); goto err; @@ -1577,91 +2201,295 @@ memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name); } DBUG_RETURN(0); err: + /* Hide "Table doesn't exist" errors if table belong to view */ + if (thd->net.last_errno == ER_NO_SUCH_TABLE && + table_desc && table_desc->belong_to_view) + { + TABLE_LIST *view= table_desc->belong_to_view; + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), view->view_db.str, view->view_name.str); + } DBUG_RETURN(1); } + /* Open all tables in list SYNOPSIS open_tables() thd - thread handler - start - list of tables + start - list of tables in/out counter - number of opened tables will be return using this parameter + flags - bitmap of flags to modify how the tables will be open: + MYSQL_LOCK_IGNORE_FLUSH - open table even if someone has + done a flush or namelock on it. + + NOTE + Unless we are already in prelocked mode, this function will also precache + all SP/SFs explicitly or implicitly (via views and triggers) used by the + query and add tables needed for their execution to table list. If resulting + tables list will be non empty it will mark query as requiring precaching. + Prelocked mode will be enabled for such query during lock_tables() call. + + If query for which we are opening tables is already marked as requiring + prelocking it won't do such precaching and will simply reuse table list + which is already built. RETURN 0 - OK -1 - error */ -int open_tables(THD *thd, TABLE_LIST *start, uint *counter) +int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags) { TABLE_LIST *tables; bool refresh; int result=0; + MEM_ROOT new_frm_mem; + /* Also used for indicating that prelocking is need */ + TABLE_LIST **query_tables_last_own; + bool safe_to_ignore_table; + DBUG_ENTER("open_tables"); + /* + temporary mem_root for new .frm parsing. + TODO: variables for size + */ + init_alloc_root(&new_frm_mem, 8024, 8024); thd->current_tablenr= 0; restart: *counter= 0; + query_tables_last_own= 0; thd->proc_info="Opening tables"; - for (tables=start ; tables ; tables=tables->next) + + /* + If we are not already executing prelocked statement and don't have + statement for which table list for prelocking is already built, let + us cache routines and try to build such table list. + + NOTE: We will mark statement as requiring prelocking only if we will + have non empty table list. But this does not guarantee that in prelocked + mode we will have some locked tables, because queries which use only + derived/information schema tables and views possible. Thus "counter" + may be still zero for prelocked statement... + */ + + if (!thd->prelocked_mode && !thd->lex->requires_prelocking() && + thd->lex->sroutines_list.elements) { + bool first_no_prelocking, need_prelocking, tabs_changed; + TABLE_LIST **save_query_tables_last= thd->lex->query_tables_last; + + DBUG_ASSERT(thd->lex->query_tables == *start); + sp_get_prelocking_info(thd, &need_prelocking, &first_no_prelocking); + + if (sp_cache_routines_and_add_tables(thd, thd->lex, + first_no_prelocking, + &tabs_changed)) + { + /* + Serious error during reading stored routines from mysql.proc table. + Something's wrong with the table or its contents, and an error has + been emitted; we must abort. + */ + result= -1; + goto err; + } + else if ((tabs_changed || *start) && need_prelocking) + { + query_tables_last_own= save_query_tables_last; + *start= thd->lex->query_tables; + } + } + + /* + For every table in the list of tables to open, try to find or open + a table. + */ + for (tables= *start; tables ;tables= tables->next_global) + { + safe_to_ignore_table= FALSE; // 'FALSE', as per coding style /* Ignore placeholders for derived tables. After derived tables processing, link to created temporary table will be put here. + If this is derived table for view then we still want to process + routines used by this view. */ if (tables->derived) + { + if (tables->view) + goto process_view_routines; continue; + } + /* + If this TABLE_LIST object is a placeholder for an information_schema + table, create a temporary table to represent the information_schema + table in the query. Do not fill it yet - will be filled during + execution. + */ + if (tables->schema_table) + { + if (!mysql_schema_table(thd, thd->lex, tables)) + continue; + DBUG_RETURN(-1); + } (*counter)++; - if (!tables->table && - !(tables->table= open_table(thd, - tables->db, - tables->real_name, - tables->alias, &refresh))) + + /* + Not a placeholder: must be a base table or a view, and the table is + not opened yet. Try to open the table. + */ + if (!tables->table) { + if (tables->prelocking_placeholder) + { + /* + For the tables added by the pre-locking code, attempt to open + the table but fail silently if the table does not exist. + The real failure will occur when/if a statement attempts to use + that table. + */ + Prelock_error_handler prelock_handler; + thd->push_internal_handler(& prelock_handler); + tables->table= open_table(thd, tables, &new_frm_mem, &refresh, flags); + thd->pop_internal_handler(); + safe_to_ignore_table= prelock_handler.safely_trapped_errors(); + } + else + tables->table= open_table(thd, tables, &new_frm_mem, &refresh, flags); + } + + if (!tables->table) + { + free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC)); + + if (tables->view) + { + /* VIEW placeholder */ + (*counter)--; + + /* + tables->next_global list consists of two parts: + 1) Query tables and underlying tables of views. + 2) Tables used by all stored routines that this statement invokes on + execution. + We need to know where the bound between these two parts is. If we've + just opened a view, which was the last table in part #1, and it + has added its base tables after itself, adjust the boundary pointer + accordingly. + */ + if (query_tables_last_own == &(tables->next_global) && + tables->view->query_tables) + query_tables_last_own= tables->view->query_tables_last; + /* + Let us free memory used by 'sroutines' hash here since we never + call destructor for this LEX. + */ + hash_free(&tables->view->sroutines); + goto process_view_routines; + } + if (refresh) // Refresh in progress { - /* close all 'old' tables used by this thread */ - pthread_mutex_lock(&LOCK_open); - // if query_id is not reset, we will get an error - // re-opening a temp table - thd->version=refresh_version; - TABLE **prev_table= &thd->open_tables; - bool found=0; - for (TABLE_LIST *tmp=start ; tmp ; tmp=tmp->next) - { - /* Close normal (not temporary) changed tables */ - if (tmp->table && ! tmp->table->tmp_table) - { - if (tmp->table->version != refresh_version || - ! tmp->table->db_stat) - { - VOID(hash_delete(&open_cache,(byte*) tmp->table)); - tmp->table=0; - found=1; - } - else - { - *prev_table= tmp->table; // Relink open list - prev_table= &tmp->table->next; - } - } - } - *prev_table=0; - pthread_mutex_unlock(&LOCK_open); - if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + /* + We have met name-locked or old version of table. Now we have + to close all tables which are not up to date. We also have to + throw away set of prelocked tables (and thus close tables from + this set that were open by now) since it possible that one of + tables which determined its content was changed. + + Instead of implementing complex/non-robust logic mentioned + above we simply close and then reopen all tables. + + In order to prepare for recalculation of set of prelocked tables + we pretend that we have finished calculation which we were doing + currently. + */ + if (query_tables_last_own) + thd->lex->mark_as_requiring_prelocking(query_tables_last_own); + close_tables_for_reopen(thd, start); goto restart; } + + if (safe_to_ignore_table) + { + DBUG_PRINT("info", ("open_table: ignoring table '%s'.'%s'", + tables->db, tables->alias)); + continue; + } + result= -1; // Fatal error break; } + else + { + /* + If we are not already in prelocked mode and extended table list is not + yet built and we have trigger for table being opened then we should + cache all routines used by its triggers and add their tables to + prelocking list. + If we lock table for reading we won't update it so there is no need to + process its triggers since they never will be activated. + */ + if (!thd->prelocked_mode && !thd->lex->requires_prelocking() && + tables->table->triggers && + tables->lock_type >= TL_WRITE_ALLOW_WRITE) + { + if (!query_tables_last_own) + query_tables_last_own= thd->lex->query_tables_last; + if (sp_cache_routines_and_add_tables_for_triggers(thd, thd->lex, + tables)) + { + /* + Serious error during reading stored routines from mysql.proc table. + Something's wrong with the table or its contents, and an error has + been emitted; we must abort. + */ + result= -1; + goto err; + } + } + free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC)); + } + if (tables->lock_type != TL_UNLOCK && ! thd->locked_tables) tables->table->reginfo.lock_type=tables->lock_type; tables->table->grant= tables->grant; + +process_view_routines: + /* + Again we may need cache all routines used by this view and add + tables used by them to table list. + */ + if (tables->view && !thd->prelocked_mode && + !thd->lex->requires_prelocking() && + tables->view->sroutines_list.elements) + { + /* We have at least one table in TL here. */ + if (!query_tables_last_own) + query_tables_last_own= thd->lex->query_tables_last; + if (sp_cache_routines_and_add_tables_for_view(thd, thd->lex, tables)) + { + /* + Serious error during reading stored routines from mysql.proc table. + Something is wrong with the table or its contents, and an error has + been emitted; we must abort. + */ + result= -1; + goto err; + } + } } + + err: thd->proc_info=0; + free_root(&new_frm_mem, MYF(0)); // Free pre-alloced block + + if (query_tables_last_own) + thd->lex->mark_as_requiring_prelocking(query_tables_last_own); + DBUG_RETURN(result); } @@ -1689,12 +2517,10 @@ static bool check_lock_and_start_stmt(THD *thd, TABLE *table, if ((int) lock_type >= (int) TL_WRITE_ALLOW_READ && (int) table->reginfo.lock_type < (int) TL_WRITE_ALLOW_READ) { - my_printf_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, - ER(ER_TABLE_NOT_LOCKED_FOR_WRITE), - MYF(0),table->table_name); + my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0),table->alias); DBUG_RETURN(1); } - if ((error=table->file->start_stmt(thd))) + if ((error=table->file->start_stmt(thd, lock_type))) { table->file->print_error(error,MYF(0)); DBUG_RETURN(1); @@ -1712,6 +2538,11 @@ static bool check_lock_and_start_stmt(THD *thd, TABLE *table, table_list Table to open is first table in this list lock_type Lock to use for open + NOTE + This function don't do anything like SP/SF/views/triggers analysis done + in open_tables(). It is intended for opening of only one concrete table. + And used only in special contexts. + RETURN VALUES table Opened table 0 Error @@ -1729,9 +2560,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) thd->proc_info="Opening table"; thd->current_tablenr= 0; - while (!(table=open_table(thd,table_list->db, - table_list->real_name,table_list->alias, - &refresh)) && refresh) ; + /* open_ltable can be used only for BASIC TABLEs */ + table_list->required_type= FRMTYPE_TABLE; + while (!(table= open_table(thd, table_list, thd->mem_root, &refresh, 0)) && + refresh) + ; if (table) { @@ -1754,7 +2587,8 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) { DBUG_ASSERT(thd->lock == 0); // You must lock everything at once if ((table->reginfo.lock_type= lock_type) != TL_UNLOCK) - if (! (thd->lock= mysql_lock_tables(thd, &table_list->table, 1, 0))) + if (! (thd->lock= mysql_lock_tables(thd, &table_list->table, 1, 0, + &refresh))) table= 0; } } @@ -1777,15 +2611,25 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) -1 - error NOTE - The lock will automaticly be freed by close_thread_tables() + The lock will automaticaly be freed by close_thread_tables() */ int simple_open_n_lock_tables(THD *thd, TABLE_LIST *tables) { - DBUG_ENTER("simple_open_n_lock_tables"); uint counter; - if (open_tables(thd, tables, &counter) || lock_tables(thd, tables, counter)) - DBUG_RETURN(-1); /* purecov: inspected */ + bool need_reopen; + DBUG_ENTER("simple_open_n_lock_tables"); + + for ( ; ; ) + { + if (open_tables(thd, &tables, &counter, 0)) + DBUG_RETURN(-1); + if (!lock_tables(thd, tables, counter, &need_reopen)) + break; + if (!need_reopen) + DBUG_RETURN(-1); + close_tables_for_reopen(thd, &tables); + } DBUG_RETURN(0); } @@ -1800,21 +2644,34 @@ int simple_open_n_lock_tables(THD *thd, TABLE_LIST *tables) tables - list of tables for open&locking RETURN - 0 - ok - -1 - error + FALSE - ok + TRUE - error NOTE - The lock will automaticly be freed by close_thread_tables() + The lock will automaticaly be freed by close_thread_tables() */ -int open_and_lock_tables(THD *thd, TABLE_LIST *tables) +bool open_and_lock_tables(THD *thd, TABLE_LIST *tables) { - DBUG_ENTER("open_and_lock_tables"); uint counter; - if (open_tables(thd, tables, &counter) || lock_tables(thd, tables, counter)) - DBUG_RETURN(-1); /* purecov: inspected */ - relink_tables_for_derived(thd); - DBUG_RETURN(mysql_handle_derived(thd->lex)); + bool need_reopen; + DBUG_ENTER("open_and_lock_tables"); + + for ( ; ; ) + { + if (open_tables(thd, &tables, &counter, 0)) + DBUG_RETURN(-1); + if (!lock_tables(thd, tables, counter, &need_reopen)) + break; + if (!need_reopen) + DBUG_RETURN(-1); + close_tables_for_reopen(thd, &tables); + } + if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) || + (thd->fill_derived_tables() && + mysql_handle_derived(thd->lex, &mysql_derived_filling))) + DBUG_RETURN(TRUE); /* purecov: inspected */ + DBUG_RETURN(0); } @@ -1825,6 +2682,9 @@ int open_and_lock_tables(THD *thd, TABLE_LIST *tables) open_normal_and_derived_tables thd - thread handler tables - list of tables for open + flags - bitmap of flags to modify how the tables will be open: + MYSQL_LOCK_IGNORE_FLUSH - open table even if someone has + done a flush or namelock on it. RETURN FALSE - ok @@ -1835,36 +2695,36 @@ int open_and_lock_tables(THD *thd, TABLE_LIST *tables) data from the tables. */ -int open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables) +bool open_normal_and_derived_tables(THD *thd, TABLE_LIST *tables, uint flags) { uint counter; DBUG_ENTER("open_normal_and_derived_tables"); - if (open_tables(thd, tables, &counter)) - DBUG_RETURN(-1); /* purecov: inspected */ - relink_tables_for_derived(thd); - DBUG_RETURN(mysql_handle_derived(thd->lex)); + DBUG_ASSERT(!thd->fill_derived_tables()); + if (open_tables(thd, &tables, &counter, flags) || + mysql_handle_derived(thd->lex, &mysql_derived_prepare)) + DBUG_RETURN(TRUE); /* purecov: inspected */ + DBUG_RETURN(0); } /* - Let us propagate pointers to open tables from global table list - to table lists in particular selects if needed. + Mark all real tables in the list as free for reuse. + + SYNOPSIS + mark_real_tables_as_free_for_reuse() + thd - thread context + table - head of the list of tables + + DESCRIPTION + Marks all real tables in the list (i.e. not views, derived + or schema tables) as free for reuse. */ -void relink_tables_for_derived(THD *thd) +static void mark_real_tables_as_free_for_reuse(TABLE_LIST *table) { - if (thd->lex->all_selects_list->next_select_in_list() || - thd->lex->time_zone_tables_used) - { - for (SELECT_LEX *sl= thd->lex->all_selects_list; - sl; - sl= sl->next_select_in_list()) - for (TABLE_LIST *cursor= (TABLE_LIST *) sl->table_list.first; - cursor; - cursor=cursor->next) - if (cursor->table_list) - cursor->table= cursor->table_list->table; - } + for (; table; table= table->next_global) + if (!table->placeholder()) + table->table->query_id= 0; } @@ -1875,51 +2735,183 @@ void relink_tables_for_derived(THD *thd) lock_tables() thd Thread handler tables Tables to lock - count umber of opened tables + count Number of opened tables + need_reopen Out parameter which if TRUE indicates that some + tables were dropped or altered during this call + and therefore invoker should reopen tables and + try to lock them once again (in this case + lock_tables() will also return error). NOTES You can't call lock_tables twice, as this would break the dead-lock-free handling thr_lock gives us. You most always get all needed locks at once. + If query for which we are calling this function marked as requring + prelocking, this function will do implicit LOCK TABLES and change + thd::prelocked_mode accordingly. + RETURN VALUES 0 ok -1 Error */ -int lock_tables(THD *thd, TABLE_LIST *tables, uint count) +int lock_tables(THD *thd, TABLE_LIST *tables, uint count, bool *need_reopen) { TABLE_LIST *table; + + DBUG_ENTER("lock_tables"); + /* + We can't meet statement requiring prelocking if we already + in prelocked mode. + */ + DBUG_ASSERT(!thd->prelocked_mode || !thd->lex->requires_prelocking()); + /* + If statement requires prelocking then it has non-empty table list. + So it is safe to shortcut. + */ + DBUG_ASSERT(!thd->lex->requires_prelocking() || tables); + + *need_reopen= FALSE; + if (!tables) - return 0; + DBUG_RETURN(0); - if (!thd->locked_tables) + /* + We need this extra check for thd->prelocked_mode because we want to avoid + attempts to lock tables in substatements. Checking for thd->locked_tables + is not enough in some situations. For example for SP containing + "drop table t3; create temporary t3 ..; insert into t3 ...;" + thd->locked_tables may be 0 after drop tables, and without this extra + check insert will try to lock temporary table t3, that will lead + to memory leak... + */ + if (!thd->locked_tables && !thd->prelocked_mode) { DBUG_ASSERT(thd->lock == 0); // You must lock everything at once TABLE **start,**ptr; - if (!(ptr=start=(TABLE**) sql_alloc(sizeof(TABLE*)*count))) - return -1; - for (table = tables ; table ; table=table->next) + + if (!(ptr=start=(TABLE**) thd->alloc(sizeof(TABLE*)*count))) + DBUG_RETURN(-1); + for (table= tables; table; table= table->next_global) { - if (!table->derived) + if (!table->placeholder()) *(ptr++)= table->table; } - if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start), 0))) - return -1; /* purecov: inspected */ + + /* We have to emulate LOCK TABLES if we are statement needs prelocking. */ + if (thd->lex->requires_prelocking()) + { + thd->in_lock_tables=1; + thd->options|= OPTION_TABLE_LOCK; + } + + if (! (thd->lock= mysql_lock_tables(thd, start, (uint) (ptr - start), + MYSQL_LOCK_NOTIFY_IF_NEED_REOPEN, + need_reopen))) + { + if (thd->lex->requires_prelocking()) + { + thd->options&= ~(ulong) (OPTION_TABLE_LOCK); + thd->in_lock_tables=0; + } + DBUG_RETURN(-1); + } + if (thd->lex->requires_prelocking() && + thd->lex->sql_command != SQLCOM_LOCK_TABLES) + { + TABLE_LIST *first_not_own= thd->lex->first_not_own_table(); + /* + We just have done implicit LOCK TABLES, and now we have + to emulate first open_and_lock_tables() after it. + + Note that "LOCK TABLES" can also be marked as requiring prelocking + (e.g. if one locks view which uses functions). We should not emulate + such open_and_lock_tables() in this case. We also should not set + THD::prelocked_mode or first close_thread_tables() call will do + "UNLOCK TABLES". + */ + thd->locked_tables= thd->lock; + thd->lock= 0; + thd->in_lock_tables=0; + + for (table= tables; table != first_not_own; table= table->next_global) + { + if (!table->placeholder()) + { + table->table->query_id= thd->query_id; + if (check_lock_and_start_stmt(thd, table->table, table->lock_type)) + { + ha_rollback_stmt(thd); + mysql_unlock_tables(thd, thd->locked_tables); + thd->locked_tables= 0; + thd->options&= ~(ulong) (OPTION_TABLE_LOCK); + DBUG_RETURN(-1); + } + } + } + /* + Let us mark all tables which don't belong to the statement itself, + and was marked as occupied during open_tables() as free for reuse. + */ + mark_real_tables_as_free_for_reuse(first_not_own); + DBUG_PRINT("info",("prelocked_mode= PRELOCKED")); + thd->prelocked_mode= PRELOCKED; + } } else { - for (table = tables ; table ; table=table->next) + TABLE_LIST *first_not_own= thd->lex->first_not_own_table(); + for (table= tables; table != first_not_own; table= table->next_global) { - if (!table->derived && + if (!table->placeholder() && check_lock_and_start_stmt(thd, table->table, table->lock_type)) { ha_rollback_stmt(thd); - return -1; + DBUG_RETURN(-1); } } + /* + If we are under explicit LOCK TABLES and our statement requires + prelocking, we should mark all "additional" tables as free for use + and enter prelocked mode. + */ + if (thd->lex->requires_prelocking()) + { + mark_real_tables_as_free_for_reuse(first_not_own); + DBUG_PRINT("info", ("thd->prelocked_mode= PRELOCKED_UNDER_LOCK_TABLES")); + thd->prelocked_mode= PRELOCKED_UNDER_LOCK_TABLES; + } } - return 0; + DBUG_RETURN(0); +} + + +/* + Prepare statement for reopening of tables and recalculation of set of + prelocked tables. + + SYNOPSIS + close_tables_for_reopen() + thd in Thread context + tables in/out List of tables which we were trying to open and lock + +*/ + +void close_tables_for_reopen(THD *thd, TABLE_LIST **tables) +{ + /* + If table list consists only from tables from prelocking set, table list + for new attempt should be empty, so we have to update list's root pointer. + */ + if (thd->lex->first_not_own_table() == *tables) + *tables= 0; + thd->lex->chop_off_not_own_tables(); + sp_remove_not_own_routines(thd->lex); + for (TABLE_LIST *tmp= *tables; tmp; tmp= tmp->next_global) + tmp->table= 0; + mark_used_tables_as_free_for_reuse(thd, thd->temporary_tables); + close_thread_tables(thd); } @@ -1933,6 +2925,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, const char *table_name, bool link_in_list) { TABLE *tmp_table; + TABLE_SHARE *share; DBUG_ENTER("open_temporary_table"); /* @@ -1947,7 +2940,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, MYF(MY_WME)))) DBUG_RETURN(0); /* purecov: inspected */ - if (openfrm(path, table_name, + if (openfrm(thd, path, table_name, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, ha_open_options, @@ -1957,21 +2950,22 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, DBUG_RETURN(0); } + share= tmp_table->s; tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked - tmp_table->in_use= thd; - tmp_table->tmp_table = (tmp_table->file->has_transactions() ? - TRANSACTIONAL_TMP_TABLE : TMP_TABLE); - tmp_table->table_cache_key=(char*) (tmp_table+1); - tmp_table->key_length= (uint) (strmov((tmp_table->real_name= - strmov(tmp_table->table_cache_key,db) - +1), table_name) - - tmp_table->table_cache_key)+1; - int4store(tmp_table->table_cache_key + tmp_table->key_length, - thd->server_id); - tmp_table->key_length += 4; - int4store(tmp_table->table_cache_key + tmp_table->key_length, + share->tmp_table= (tmp_table->file->has_transactions() ? + TRANSACTIONAL_TMP_TABLE : TMP_TABLE); + share->table_cache_key= (char*) (tmp_table+1); + share->db= share->table_cache_key; + share->key_length= (uint) (strmov(((char*) (share->table_name= + strmov(share->table_cache_key, + db)+1)), + table_name) - + share->table_cache_key) +1; + int4store(share->table_cache_key + share->key_length, thd->server_id); + share->key_length+= 4; + int4store(share->table_cache_key + share->key_length, thd->variables.pseudo_thread_id); - tmp_table->key_length += 4; + share->key_length+= 4; if (link_in_list) { @@ -1980,6 +2974,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, if (thd->slave_thread) slave_open_temp_tables++; } + tmp_table->pos_in_table_list= 0; DBUG_RETURN(tmp_table); } @@ -1994,7 +2989,7 @@ bool rm_temporary_table(enum db_type base, char *path) if (my_delete(path,MYF(0))) error=1; /* purecov: inspected */ *fn_ext(path)='\0'; // remove extension - handler *file=get_new_handler((TABLE*) 0, base); + handler *file= get_new_handler((TABLE*) 0, current_thd->mem_root, base); if (file && file->delete_table(path)) { error=1; @@ -2007,31 +3002,276 @@ bool rm_temporary_table(enum db_type base, char *path) /***************************************************************************** -** find field in list or tables. if field is unqualifed and unique, -** return unique field +* The following find_field_in_XXX procedures implement the core of the +* name resolution functionality. The entry point to resolve a column name in a +* list of tables is 'find_field_in_tables'. It calls 'find_field_in_table_ref' +* for each table reference. In turn, depending on the type of table reference, +* 'find_field_in_table_ref' calls one of the 'find_field_in_XXX' procedures +* below specific for the type of table reference. ******************************************************************************/ +/* Special Field pointers as return values of find_field_in_XXX functions. */ +Field *not_found_field= (Field*) 0x1; +Field *view_ref_found= (Field*) 0x2; + #define WRONG_GRANT (Field*) -1 -Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grants, bool allow_rowid, - uint *cached_field_index_ptr) +static void update_field_dependencies(THD *thd, Field *field, TABLE *table) +{ + if (thd->set_query_id) + { + if (field->query_id != thd->query_id) + { + field->query_id= thd->query_id; + table->used_fields++; + table->used_keys.intersect(field->part_of_key); + } + else + thd->dupp_field= field; + } +} + + +/* + Find a field by name in a view that uses merge algorithm. + + SYNOPSIS + find_field_in_view() + thd thread handler + table_list view to search for 'name' + name name of field + length length of name + item_name name of item if it will be created (VIEW) + ref expression substituted in VIEW should be passed + using this reference (return view_ref_found) + register_tree_change TRUE if ref is not stack variable and we + need register changes in item tree + + RETURN + 0 field is not found + view_ref_found found value in VIEW (real result is in *ref) + # pointer to field - only for schema table fields +*/ + +static Field * +find_field_in_view(THD *thd, TABLE_LIST *table_list, + const char *name, uint length, + const char *item_name, Item **ref, + bool register_tree_change) +{ + DBUG_ENTER("find_field_in_view"); + DBUG_PRINT("enter", + ("view: '%s', field name: '%s', item name: '%s', ref 0x%lx", + table_list->alias, name, item_name, (ulong) ref)); + Field_iterator_view field_it; + field_it.set(table_list); + Query_arena *arena, backup; + + DBUG_ASSERT(table_list->schema_table_reformed || + (ref != 0 && table_list->view != 0)); + for (; !field_it.end_of_fields(); field_it.next()) + { + if (!my_strcasecmp(system_charset_info, field_it.name(), name)) + { + // in PS use own arena or data will be freed after prepare + if (register_tree_change) + arena= thd->activate_stmt_arena_if_needed(&backup); + /* + create_item() may, or may not create a new Item, depending on + the column reference. See create_view_field() for details. + */ + Item *item= field_it.create_item(thd); + if (register_tree_change && arena) + thd->restore_active_arena(arena, &backup); + + if (!item) + DBUG_RETURN(0); + /* + *ref != NULL means that *ref contains the item that we need to + replace. If the item was aliased by the user, set the alias to + the replacing item. + We need to set alias on both ref itself and on ref real item. + */ + if (*ref && !(*ref)->is_autogenerated_name) + { + item->set_name((*ref)->name, (*ref)->name_length, + system_charset_info); + item->real_item()->set_name((*ref)->name, (*ref)->name_length, + system_charset_info); + } + if (register_tree_change) + thd->change_item_tree(ref, item); + else + *ref= item; + DBUG_RETURN((Field*) view_ref_found); + } + } + DBUG_RETURN(0); +} + + +/* + Find field by name in a NATURAL/USING join table reference. + + SYNOPSIS + find_field_in_natural_join() + thd [in] thread handler + table_ref [in] table reference to search + name [in] name of field + length [in] length of name + ref [in/out] if 'name' is resolved to a view field, ref is + set to point to the found view field + register_tree_change [in] TRUE if ref is not stack variable and we + need register changes in item tree + actual_table [out] the original table reference where the field + belongs - differs from 'table_list' only for + NATURAL/USING joins + + DESCRIPTION + Search for a field among the result fields of a NATURAL/USING join. + Notice that this procedure is called only for non-qualified field + names. In the case of qualified fields, we search directly the base + tables of a natural join. + + RETURN + NULL if the field was not found + WRONG_GRANT if no access rights to the found field + # Pointer to the found Field +*/ + +static Field * +find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name, + uint length, Item **ref, bool register_tree_change, + TABLE_LIST **actual_table) +{ + List_iterator_fast<Natural_join_column> + field_it(*(table_ref->join_columns)); + Natural_join_column *nj_col, *curr_nj_col; + Field *found_field; + Query_arena *arena, backup; + DBUG_ENTER("find_field_in_natural_join"); + DBUG_PRINT("enter", ("field name: '%s', ref 0x%lx", + name, (ulong) ref)); + DBUG_ASSERT(table_ref->is_natural_join && table_ref->join_columns); + DBUG_ASSERT(*actual_table == NULL); + + LINT_INIT(found_field); + + for (nj_col= NULL, curr_nj_col= field_it++; curr_nj_col; + curr_nj_col= field_it++) + { + if (!my_strcasecmp(system_charset_info, curr_nj_col->name(), name)) + { + if (nj_col) + { + my_error(ER_NON_UNIQ_ERROR, MYF(0), name, thd->where); + DBUG_RETURN(NULL); + } + nj_col= curr_nj_col; + } + } + if (!nj_col) + DBUG_RETURN(NULL); + + if (nj_col->view_field) + { + Item *item; + LINT_INIT(arena); + if (register_tree_change) + arena= thd->activate_stmt_arena_if_needed(&backup); + /* + create_item() may, or may not create a new Item, depending on the + column reference. See create_view_field() for details. + */ + item= nj_col->create_item(thd); + /* + *ref != NULL means that *ref contains the item that we need to + replace. If the item was aliased by the user, set the alias to + the replacing item. + We need to set alias on both ref itself and on ref real item. + */ + if (*ref && !(*ref)->is_autogenerated_name) + { + item->set_name((*ref)->name, (*ref)->name_length, + system_charset_info); + item->real_item()->set_name((*ref)->name, (*ref)->name_length, + system_charset_info); + } + if (register_tree_change && arena) + thd->restore_active_arena(arena, &backup); + + if (!item) + DBUG_RETURN(NULL); + DBUG_ASSERT(nj_col->table_field == NULL); + if (nj_col->table_ref->schema_table_reformed) + { + /* + Translation table items are always Item_fields and fixed + already('mysql_schema_table' function). So we can return + ->field. It is used only for 'show & where' commands. + */ + DBUG_RETURN(((Item_field*) (nj_col->view_field->item))->field); + } + if (register_tree_change) + thd->change_item_tree(ref, item); + else + *ref= item; + found_field= (Field*) view_ref_found; + } + else + { + /* This is a base table. */ + DBUG_ASSERT(nj_col->view_field == NULL); + DBUG_ASSERT(nj_col->table_ref->table == nj_col->table_field->table); + found_field= nj_col->table_field; + update_field_dependencies(thd, found_field, nj_col->table_ref->table); + } + + *actual_table= nj_col->table_ref; + + DBUG_RETURN(found_field); +} + + +/* + Find field by name in a base table or a view with temp table algorithm. + + SYNOPSIS + find_field_in_table() + thd thread handler + table table where to search for the field + name name of field + length length of name + allow_rowid do allow finding of "_rowid" field? + cached_field_index_ptr cached position in field list (used to speedup + lookup for fields in prepared tables) + + RETURN + 0 field is not found + # pointer to field +*/ + +Field * +find_field_in_table(THD *thd, TABLE *table, const char *name, uint length, + bool allow_rowid, uint *cached_field_index_ptr) { Field **field_ptr, *field; uint cached_field_index= *cached_field_index_ptr; + DBUG_ENTER("find_field_in_table"); + DBUG_PRINT("enter", ("table: '%s', field name: '%s'", table->alias, name)); /* We assume here that table->field < NO_CACHED_FIELD_INDEX = UINT_MAX */ - if (cached_field_index < table->fields && - !my_strcasecmp(system_charset_info, + if (cached_field_index < table->s->fields && + !my_strcasecmp(system_charset_info, table->field[cached_field_index]->field_name, name)) field_ptr= table->field + cached_field_index; - else if (table->name_hash.records) - field_ptr= (Field**)hash_search(&table->name_hash,(byte*) name, - length); + else if (table->s->name_hash.records) + field_ptr= (Field**) hash_search(&table->s->name_hash, (byte*) name, + length); else { if (!(field_ptr= table->field)) - return (Field *)0; + DBUG_RETURN((Field *)0); for (; *field_ptr; ++field_ptr) if (!my_strcasecmp(system_charset_info, (*field_ptr)->field_name, name)) break; @@ -2047,25 +3287,163 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, if (!allow_rowid || my_strcasecmp(system_charset_info, name, "_rowid") || !(field=table->rowid_field)) - return (Field*) 0; + DBUG_RETURN((Field*) 0); } - if (thd->set_query_id) + update_field_dependencies(thd, field, table); + + DBUG_RETURN(field); +} + + +/* + Find field in a table reference. + + SYNOPSIS + find_field_in_table_ref() + thd [in] thread handler + table_list [in] table reference to search + name [in] name of field + length [in] field length of name + item_name [in] name of item if it will be created (VIEW) + db_name [in] optional database name that qualifies the + table_name [in] optional table name that qualifies the field + ref [in/out] if 'name' is resolved to a view field, ref + is set to point to the found view field + check_privileges [in] check privileges + allow_rowid [in] do allow finding of "_rowid" field? + cached_field_index_ptr [in] cached position in field list (used to + speedup lookup for fields in prepared tables) + register_tree_change [in] TRUE if ref is not stack variable and we + need register changes in item tree + actual_table [out] the original table reference where the field + belongs - differs from 'table_list' only for + NATURAL_USING joins. + + DESCRIPTION + Find a field in a table reference depending on the type of table + reference. There are three types of table references with respect + to the representation of their result columns: + - an array of Field_translator objects for MERGE views and some + information_schema tables, + - an array of Field objects (and possibly a name hash) for stored + tables, + - a list of Natural_join_column objects for NATURAL/USING joins. + This procedure detects the type of the table reference 'table_list' + and calls the corresponding search routine. + + RETURN + 0 field is not found + view_ref_found found value in VIEW (real result is in *ref) + # pointer to field +*/ + +Field * +find_field_in_table_ref(THD *thd, TABLE_LIST *table_list, + const char *name, uint length, + const char *item_name, const char *db_name, + const char *table_name, Item **ref, + bool check_privileges, bool allow_rowid, + uint *cached_field_index_ptr, + bool register_tree_change, TABLE_LIST **actual_table) +{ + Field *fld; + DBUG_ENTER("find_field_in_table_ref"); + DBUG_PRINT("enter", + ("table: '%s' field name: '%s' item name: '%s' ref 0x%lx", + table_list->alias, name, item_name, (ulong) ref)); + + /* + Check that the table and database that qualify the current field name + are the same as the table reference we are going to search for the field. + + Exclude from the test below nested joins because the columns in a + nested join generally originate from different tables. Nested joins + also have no table name, except when a nested join is a merge view + or an information schema table. + + We include explicitly table references with a 'field_translation' table, + because if there are views over natural joins we don't want to search + inside the view, but we want to search directly in the view columns + which are represented as a 'field_translation'. + + TODO: Ensure that table_name, db_name and tables->db always points to + something ! + */ + if (/* Exclude nested joins. */ + (!table_list->nested_join || + /* Include merge views and information schema tables. */ + table_list->field_translation) && + /* + Test if the field qualifiers match the table reference we plan + to search. + */ + table_name && table_name[0] && + (my_strcasecmp(table_alias_charset, table_list->alias, table_name) || + (db_name && db_name[0] && table_list->db && table_list->db[0] && + strcmp(db_name, table_list->db)))) + DBUG_RETURN(0); + + *actual_table= NULL; + + if (table_list->field_translation) { - if (field->query_id != thd->query_id) + /* 'table_list' is a view or an information schema table. */ + if ((fld= find_field_in_view(thd, table_list, name, length, item_name, ref, + register_tree_change))) + *actual_table= table_list; + } + else if (!table_list->nested_join) + { + /* 'table_list' is a stored table. */ + DBUG_ASSERT(table_list->table); + if ((fld= find_field_in_table(thd, table_list->table, name, length, + allow_rowid, + cached_field_index_ptr))) + *actual_table= table_list; + } + else + { + /* + 'table_list' is a NATURAL/USING join, or an operand of such join that + is a nested join itself. + + If the field name we search for is qualified, then search for the field + in the table references used by NATURAL/USING the join. + */ + if (table_name && table_name[0]) { - field->query_id=thd->query_id; - table->used_fields++; - table->used_keys.intersect(field->part_of_key); + List_iterator<TABLE_LIST> it(table_list->nested_join->join_list); + TABLE_LIST *table; + while ((table= it++)) + { + if ((fld= find_field_in_table_ref(thd, table, name, length, item_name, + db_name, table_name, ref, + check_privileges, allow_rowid, + cached_field_index_ptr, + register_tree_change, actual_table))) + DBUG_RETURN(fld); + } + DBUG_RETURN(0); } - else - thd->dupp_field=field; + /* + Non-qualified field, search directly in the result columns of the + natural join. The condition of the outer IF is true for the top-most + natural join, thus if the field is not qualified, we will search + directly the top-most NATURAL/USING join. + */ + fld= find_field_in_natural_join(thd, table_list, name, length, ref, + register_tree_change, actual_table); } + #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (check_grants && check_grant_column(thd,table,name,length)) - return WRONG_GRANT; + /* Check if there are sufficient access rights to the found field. */ + if (fld && check_privileges && + check_column_grant_in_table_ref(thd, *actual_table, name, length)) + fld= WRONG_GRANT; #endif - return field; + + DBUG_RETURN(fld); } @@ -2074,58 +3452,106 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, SYNOPSIS find_field_in_tables() - thd Pointer to current thread structure - item Field item that should be found - tables Tables for scanning - where Table where field found will be returned via - this parameter - report_error If FALSE then do not report error if item not found - and return not_found_field + thd pointer to current thread structure + item field item that should be found + first_table list of tables to be searched for item + last_table end of the list of tables to search for item. If NULL + then search to the end of the list 'first_table'. + ref if 'item' is resolved to a view field, ref is set to + point to the found view field + report_error Degree of error reporting: + - IGNORE_ERRORS then do not report any error + - IGNORE_EXCEPT_NON_UNIQUE report only non-unique + fields, suppress all other errors + - REPORT_EXCEPT_NON_UNIQUE report all other errors + except when non-unique fields were found + - REPORT_ALL_ERRORS + check_privileges need to check privileges + register_tree_change TRUE if ref is not a stack variable and we + to need register changes in item tree RETURN VALUES - 0 Field is not found or field is not unique- error - message is reported - not_found_field Function was called with report_error == FALSE and - field was not found. no error message reported. - found field + 0 If error: the found field is not unique, or there are + no sufficient access priviliges for the found field, + or the field is qualified with non-existing table. + not_found_field The function was called with report_error == + (IGNORE_ERRORS || IGNORE_EXCEPT_NON_UNIQUE) and a + field was not found. + view_ref_found View field is found, item passed through ref parameter + found field If a item was resolved to some field */ -// Special Field pointer for find_field_in_tables returning -const Field *not_found_field= (Field*) 0x1; - Field * -find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, - TABLE_LIST **where, bool report_error) +find_field_in_tables(THD *thd, Item_ident *item, + TABLE_LIST *first_table, TABLE_LIST *last_table, + Item **ref, find_item_error_report_type report_error, + bool check_privileges, bool register_tree_change) { Field *found=0; - const char *db=item->db_name; - const char *table_name=item->table_name; - const char *name=item->field_name; + const char *db= item->db_name; + const char *table_name= item->table_name; + const char *name= item->field_name; uint length=(uint) strlen(name); char name_buff[NAME_LEN+1]; + TABLE_LIST *cur_table= first_table; + TABLE_LIST *actual_table; bool allow_rowid; + if (!table_name || !table_name[0]) + { + table_name= 0; // For easier test + db= 0; + } + + allow_rowid= table_name || (cur_table && !cur_table->next_local); + if (item->cached_table) { /* - This shortcut is used by prepared statements. We assuming that - TABLE_LIST *tables is not changed during query execution (which - is true for all queries except RENAME but luckily RENAME doesn't + This shortcut is used by prepared statements. We assume that + TABLE_LIST *first_table is not changed during query execution (which + is true for all queries except RENAME but luckily RENAME doesn't use fields...) so we can rely on reusing pointer to its member. - With this optimisation we also miss case when addition of one more - field makes some prepared query ambiguous and so erronous, but we + With this optimization we also miss case when addition of one more + field makes some prepared query ambiguous and so erroneous, but we accept this trade off. */ - found= find_field_in_table(thd, item->cached_table->table, name, length, - test(item->cached_table-> - table->grant.want_privilege), - 1, &(item->cached_field_index)); - + TABLE_LIST *table_ref= item->cached_table; + /* + The condition (table_ref->view == NULL) ensures that we will call + find_field_in_table even in the case of information schema tables + when table_ref->field_translation != NULL. + */ + if (table_ref->table && !table_ref->view) + found= find_field_in_table(thd, table_ref->table, name, length, + TRUE, &(item->cached_field_index)); + else + found= find_field_in_table_ref(thd, table_ref, name, length, item->name, + NULL, NULL, ref, check_privileges, + TRUE, &(item->cached_field_index), + register_tree_change, + &actual_table); if (found) { - (*where)= tables; if (found == WRONG_GRANT) - return (Field*) 0; + return (Field*) 0; + + /* + Only views fields should be marked as dependent, not an underlying + fields. + */ + if (!table_ref->belong_to_view) + { + SELECT_LEX *current_sel= thd->lex->current_select; + SELECT_LEX *last_select= table_ref->select_lex; + /* + If the field was an outer referencee, mark all selects using this + sub query as dependent on the outer query + */ + if (current_sel != last_select) + mark_select_range_as_dependent(thd, last_select, current_sel, + found, *ref, item); + } return found; } } @@ -2133,7 +3559,7 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, if (db && lower_case_table_names) { /* - convert database to lower case for comparision. + convert database to lower case for comparison. We can't do this in Item_field as this would change the 'name' of the item which may be used in the select list */ @@ -2142,99 +3568,81 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, db= name_buff; } - if (table_name && table_name[0]) - { /* Qualified field */ - bool found_table=0; - for (; tables ; tables=tables->next) - { - if (!my_strcasecmp(table_alias_charset, tables->alias, table_name) && - (!db || !tables->db || !tables->db[0] || !strcmp(db,tables->db))) - { - found_table=1; - Field *find=find_field_in_table(thd,tables->table,name,length, - test(tables->table->grant. - want_privilege), - 1, &(item->cached_field_index)); - if (find) - { - (*where)= item->cached_table= tables; - if (!tables->cacheable_table) - item->cached_table= 0; - if (find == WRONG_GRANT) - return (Field*) 0; - if (db || !thd->where) - return find; - if (found) - { - my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), - item->full_name(),thd->where); - return (Field*) 0; - } - found=find; - } - } - } - if (found) - return found; - if (!found_table && report_error) - { - char buff[NAME_LEN*2+1]; - if (db && db[0]) - { - strxnmov(buff,sizeof(buff)-1,db,".",table_name,NullS); - table_name=buff; - } - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - table_name, thd->where); - } - else - if (report_error) - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), - item->full_name(),thd->where); - else - return (Field*) not_found_field; - return (Field*) 0; - } - allow_rowid= tables && !tables->next; // Only one table - for (; tables ; tables=tables->next) - { - if (!tables->table) - { - if (report_error) - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), - item->full_name(),thd->where); - return (Field*) not_found_field; - } + if (last_table) + last_table= last_table->next_name_resolution_table; - Field *field=find_field_in_table(thd,tables->table,name,length, - test(tables->table->grant.want_privilege), - allow_rowid, &(item->cached_field_index)); - if (field) + for (; cur_table != last_table ; + cur_table= cur_table->next_name_resolution_table) + { + Field *cur_field= find_field_in_table_ref(thd, cur_table, name, length, + item->name, db, table_name, ref, + check_privileges, allow_rowid, + &(item->cached_field_index), + register_tree_change, + &actual_table); + if (cur_field) { - if (field == WRONG_GRANT) + if (cur_field == WRONG_GRANT) return (Field*) 0; - (*where)= item->cached_table= tables; - if (!tables->cacheable_table) - item->cached_table= 0; + + /* + Store the original table of the field, which may be different from + cur_table in the case of NATURAL/USING join. + */ + item->cached_table= (!actual_table->cacheable_table || found) ? + 0 : actual_table; + + DBUG_ASSERT(thd->where); + /* + If we found a fully qualified field we return it directly as it can't + have duplicates. + */ + if (db) + return cur_field; + if (found) { - if (!thd->where) // Returns first found - break; - my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), - name,thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == IGNORE_EXCEPT_NON_UNIQUE) + my_error(ER_NON_UNIQ_ERROR, MYF(0), + table_name ? item->full_name() : name, thd->where); return (Field*) 0; } - found= field; + found= cur_field; } } + if (found) return found; - if (report_error) - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), - MYF(0), item->full_name(), thd->where); + + /* + If the field was qualified and there were no tables to search, issue + an error that an unknown table was given. The situation is detected + as follows: if there were no tables we wouldn't go through the loop + and cur_table wouldn't be updated by the loop increment part, so it + will be equal to the first table. + */ + if (table_name && (cur_table == first_table) && + (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE)) + { + char buff[NAME_LEN*2+1]; + if (db && db[0]) + { + strxnmov(buff,sizeof(buff)-1,db,".",table_name,NullS); + table_name=buff; + } + my_error(ER_UNKNOWN_TABLE, MYF(0), table_name, thd->where); + } else - return (Field*) not_found_field; - return (Field*) 0; + { + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) + my_error(ER_BAD_FIELD_ERROR, MYF(0), item->full_name(), thd->where); + else + found= not_found_field; + } + return found; } @@ -2255,10 +3663,13 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, return not_found_item, report other errors, return 0 IGNORE_ERRORS Do not report errors, return 0 if error - unaliased Set to true if item is field which was found - by original field name and not by its alias - in item list. Set to false otherwise. - + resolution Set to the resolution type if the item is found + (it says whether the item is resolved + against an alias name, + or as a field name without alias, + or as a field hidden by alias, + or ignoring alias) + RETURN VALUES 0 Item is not found or item is not unique, error message is reported @@ -2268,13 +3679,14 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, found field */ -// Special Item pointer for find_item_in_list returning -const Item **not_found_item= (const Item**) 0x1; +/* Special Item pointer to serve as a return value from find_item_in_list(). */ +Item **not_found_item= (Item**) 0x1; Item ** find_item_in_list(Item *find, List<Item> &items, uint *counter, - find_item_error_report_type report_error, bool *unaliased) + find_item_error_report_type report_error, + enum_resolution_type *resolution) { List_iterator<Item> li(items); Item **found=0, **found_unaliased= 0, *item; @@ -2288,9 +3700,9 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, */ bool is_ref_by_name= 0; uint unaliased_counter; + LINT_INIT(unaliased_counter); // Dependent on found_unaliased - LINT_INIT(unaliased_counter); - *unaliased= FALSE; + *resolution= NOT_RESOLVED; is_ref_by_name= (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM); @@ -2303,9 +3715,9 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, for (uint i= 0; (item=li++); i++) { - if (field_name && item->type() == Item::FIELD_ITEM) + if (field_name && item->real_item()->type() == Item::FIELD_ITEM) { - Item_field *item_field= (Item_field*) item; + Item_ident *item_field= (Item_ident*) item; /* In case of group_concat() with ORDER BY condition in the QUERY @@ -2351,84 +3763,98 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, unaliased names only and will have duplicate error anyway. */ if (report_error != IGNORE_ERRORS) - my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), - MYF(0), find->full_name(), current_thd->where); + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); return (Item**) 0; } found_unaliased= li.ref(); unaliased_counter= i; + *resolution= RESOLVED_IGNORING_ALIAS; if (db_name) break; // Perfect match } } - else if (!my_strcasecmp(system_charset_info, item_field->name, - field_name)) - { - /* - If table name was not given we should scan through aliases - (or non-aliased fields) first. We are also checking unaliased - name of the field in then next else-if, to be able to find - instantly field (hidden by alias) if no suitable alias (or - non-aliased field) was found. - */ - if (found) - { - if ((*found)->eq(item, 0)) - continue; // Same field twice - if (report_error != IGNORE_ERRORS) - my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), - MYF(0), find->full_name(), current_thd->where); - return (Item**) 0; - } - found= li.ref(); - *counter= i; - } - else if (!my_strcasecmp(system_charset_info, item_field->field_name, - field_name)) + else { - /* - We will use un-aliased field or react on such ambiguities only if - we won't be able to find aliased field. - Again if we have ambiguity with field outside of select list - we should prefer fields from select list. - */ - if (found_unaliased) + int fname_cmp= my_strcasecmp(system_charset_info, + item_field->field_name, + field_name); + if (!my_strcasecmp(system_charset_info, + item_field->name,field_name)) { - if ((*found_unaliased)->eq(item, 0)) - continue; // Same field twice - found_unaliased_non_uniq= 1; + /* + If table name was not given we should scan through aliases + and non-aliased fields first. We are also checking unaliased + name of the field in then next else-if, to be able to find + instantly field (hidden by alias) if no suitable alias or + non-aliased field was found. + */ + if (found) + { + if ((*found)->eq(item, 0)) + continue; // Same field twice + if (report_error != IGNORE_ERRORS) + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); + return (Item**) 0; + } + found= li.ref(); + *counter= i; + *resolution= fname_cmp ? RESOLVED_AGAINST_ALIAS: + RESOLVED_WITH_NO_ALIAS; } - else + else if (!fname_cmp) { + /* + We will use non-aliased field or react on such ambiguities only if + we won't be able to find aliased field. + Again if we have ambiguity with field outside of select list + we should prefer fields from select list. + */ + if (found_unaliased) + { + if ((*found_unaliased)->eq(item, 0)) + continue; // Same field twice + found_unaliased_non_uniq= 1; + } found_unaliased= li.ref(); unaliased_counter= i; } } } - else if (!table_name && (item->eq(find,0) || - is_ref_by_name && find->name && item->name && - !my_strcasecmp(system_charset_info, - item->name,find->name))) - { - found= li.ref(); - *counter= i; - break; - } + else if (!table_name) + { + if (is_ref_by_name && find->name && item->name && + !my_strcasecmp(system_charset_info,item->name,find->name)) + { + found= li.ref(); + *counter= i; + *resolution= RESOLVED_AGAINST_ALIAS; + break; + } + else if (find->eq(item,0)) + { + found= li.ref(); + *counter= i; + *resolution= RESOLVED_IGNORING_ALIAS; + break; + } + } } if (!found) { if (found_unaliased_non_uniq) { if (report_error != IGNORE_ERRORS) - my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), MYF(0), - find->full_name(), current_thd->where); + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); return (Item **) 0; } if (found_unaliased) { found= found_unaliased; *counter= unaliased_counter; - *unaliased= TRUE; + *resolution= RESOLVED_BEHIND_ALIAS; } } if (found) @@ -2436,14 +3862,689 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, if (report_error != REPORT_EXCEPT_NOT_FOUND) { if (report_error == REPORT_ALL_ERRORS) - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), - find->full_name(), current_thd->where); + my_error(ER_BAD_FIELD_ERROR, MYF(0), + find->full_name(), current_thd->where); return (Item **) 0; } else return (Item **) not_found_item; } + +/* + Test if a string is a member of a list of strings. + + SYNOPSIS + test_if_string_in_list() + find the string to look for + str_list a list of strings to be searched + + DESCRIPTION + Sequentially search a list of strings for a string, and test whether + the list contains the same string. + + RETURN + TRUE if find is in str_list + FALSE otherwise +*/ + +static bool +test_if_string_in_list(const char *find, List<String> *str_list) +{ + List_iterator<String> str_list_it(*str_list); + String *curr_str; + size_t find_length= strlen(find); + while ((curr_str= str_list_it++)) + { + if (find_length != curr_str->length()) + continue; + if (!my_strcasecmp(system_charset_info, find, curr_str->ptr())) + return TRUE; + } + return FALSE; +} + + +/* + Create a new name resolution context for an item so that it is + being resolved in a specific table reference. + + SYNOPSIS + set_new_item_local_context() + thd pointer to current thread + item item for which new context is created and set + table_ref table ref where an item showld be resolved + + DESCRIPTION + Create a new name resolution context for an item, so that the item + is resolved only the supplied 'table_ref'. + + RETURN + FALSE if all OK + TRUE otherwise +*/ + +static bool +set_new_item_local_context(THD *thd, Item_ident *item, TABLE_LIST *table_ref) +{ + Name_resolution_context *context; + if (!(context= new (thd->mem_root) Name_resolution_context)) + return TRUE; + context->init(); + context->first_name_resolution_table= + context->last_name_resolution_table= table_ref; + item->context= context; + return FALSE; +} + + +/* + Find and mark the common columns of two table references. + + SYNOPSIS + mark_common_columns() + thd [in] current thread + table_ref_1 [in] the first (left) join operand + table_ref_2 [in] the second (right) join operand + using_fields [in] if the join is JOIN...USING - the join columns, + if NATURAL join, then NULL + found_using_fields [out] number of fields from the USING clause that were + found among the common fields + + DESCRIPTION + The procedure finds the common columns of two relations (either + tables or intermediate join results), and adds an equi-join condition + to the ON clause of 'table_ref_2' for each pair of matching columns. + If some of table_ref_XXX represents a base table or view, then we + create new 'Natural_join_column' instances for each column + reference and store them in the 'join_columns' of the table + reference. + + IMPLEMENTATION + The procedure assumes that store_natural_using_join_columns() was + called for the previous level of NATURAL/USING joins. + + RETURN + TRUE error when some common column is non-unique, or out of memory + FALSE OK +*/ + +static bool +mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2, + List<String> *using_fields, uint *found_using_fields) +{ + Field_iterator_table_ref it_1, it_2; + Natural_join_column *nj_col_1, *nj_col_2; + Query_arena *arena, backup; + bool result= TRUE; + bool first_outer_loop= TRUE; + /* + Leaf table references to which new natural join columns are added + if the leaves are != NULL. + */ + TABLE_LIST *leaf_1= (table_ref_1->nested_join && + !table_ref_1->is_natural_join) ? + NULL : table_ref_1; + TABLE_LIST *leaf_2= (table_ref_2->nested_join && + !table_ref_2->is_natural_join) ? + NULL : table_ref_2; + + DBUG_ENTER("mark_common_columns"); + DBUG_PRINT("info", ("operand_1: %s operand_2: %s", + table_ref_1->alias, table_ref_2->alias)); + + *found_using_fields= 0; + arena= thd->activate_stmt_arena_if_needed(&backup); + + for (it_1.set(table_ref_1); !it_1.end_of_fields(); it_1.next()) + { + bool found= FALSE; + const char *field_name_1; + /* true if field_name_1 is a member of using_fields */ + bool is_using_column_1; + if (!(nj_col_1= it_1.get_or_create_column_ref(leaf_1))) + goto err; + field_name_1= nj_col_1->name(); + is_using_column_1= using_fields && + test_if_string_in_list(field_name_1, using_fields); + DBUG_PRINT ("info", ("field_name_1=%s.%s", + nj_col_1->table_name() ? nj_col_1->table_name() : "", + field_name_1)); + + /* + Find a field with the same name in table_ref_2. + + Note that for the second loop, it_2.set() will iterate over + table_ref_2->join_columns and not generate any new elements or + lists. + */ + nj_col_2= NULL; + for (it_2.set(table_ref_2); !it_2.end_of_fields(); it_2.next()) + { + Natural_join_column *cur_nj_col_2; + const char *cur_field_name_2; + if (!(cur_nj_col_2= it_2.get_or_create_column_ref(leaf_2))) + goto err; + cur_field_name_2= cur_nj_col_2->name(); + DBUG_PRINT ("info", ("cur_field_name_2=%s.%s", + cur_nj_col_2->table_name() ? + cur_nj_col_2->table_name() : "", + cur_field_name_2)); + + /* + Compare the two columns and check for duplicate common fields. + A common field is duplicate either if it was already found in + table_ref_2 (then found == TRUE), or if a field in table_ref_2 + was already matched by some previous field in table_ref_1 + (then cur_nj_col_2->is_common == TRUE). + Note that it is too early to check the columns outside of the + USING list for ambiguity because they are not actually "referenced" + here. These columns must be checked only on unqualified reference + by name (e.g. in SELECT list). + */ + if (!my_strcasecmp(system_charset_info, field_name_1, cur_field_name_2)) + { + DBUG_PRINT ("info", ("match c1.is_common=%d", nj_col_1->is_common)); + if (cur_nj_col_2->is_common || + (found && (!using_fields || is_using_column_1))) + { + my_error(ER_NON_UNIQ_ERROR, MYF(0), field_name_1, thd->where); + goto err; + } + nj_col_2= cur_nj_col_2; + found= TRUE; + } + } + if (first_outer_loop && leaf_2) + { + /* + Make sure that the next inner loop "knows" that all columns + are materialized already. + */ + leaf_2->is_join_columns_complete= TRUE; + first_outer_loop= FALSE; + } + if (!found) + continue; // No matching field + + /* + field_1 and field_2 have the same names. Check if they are in the USING + clause (if present), mark them as common fields, and add a new + equi-join condition to the ON clause. + */ + if (nj_col_2 && (!using_fields ||is_using_column_1)) + { + Item *item_1= nj_col_1->create_item(thd); + Item *item_2= nj_col_2->create_item(thd); + Field *field_1= nj_col_1->field(); + Field *field_2= nj_col_2->field(); + Item_ident *item_ident_1, *item_ident_2; + Item_func_eq *eq_cond; + + if (!item_1 || !item_2) + goto err; // out of memory + + /* + The following assert checks that the two created items are of + type Item_ident. + */ + DBUG_ASSERT(!thd->lex->current_select->no_wrap_view_item); + /* + In the case of no_wrap_view_item == 0, the created items must be + of sub-classes of Item_ident. + */ + DBUG_ASSERT(item_1->type() == Item::FIELD_ITEM || + item_1->type() == Item::REF_ITEM); + DBUG_ASSERT(item_2->type() == Item::FIELD_ITEM || + item_2->type() == Item::REF_ITEM); + + /* + We need to cast item_1,2 to Item_ident, because we need to hook name + resolution contexts specific to each item. + */ + item_ident_1= (Item_ident*) item_1; + item_ident_2= (Item_ident*) item_2; + /* + Create and hook special name resolution contexts to each item in the + new join condition . We need this to both speed-up subsequent name + resolution of these items, and to enable proper name resolution of + the items during the execute phase of PS. + */ + if (set_new_item_local_context(thd, item_ident_1, nj_col_1->table_ref) || + set_new_item_local_context(thd, item_ident_2, nj_col_2->table_ref)) + goto err; + + if (!(eq_cond= new Item_func_eq(item_ident_1, item_ident_2))) + goto err; /* Out of memory. */ + + /* + Add the new equi-join condition to the ON clause. Notice that + fix_fields() is applied to all ON conditions in setup_conds() + so we don't do it here. + */ + add_join_on((table_ref_1->outer_join & JOIN_TYPE_RIGHT ? + table_ref_1 : table_ref_2), + eq_cond); + + nj_col_1->is_common= nj_col_2->is_common= TRUE; + DBUG_PRINT ("info", ("%s.%s and %s.%s are common", + nj_col_1->table_name() ? + nj_col_1->table_name() : "", + nj_col_1->name(), + nj_col_2->table_name() ? + nj_col_2->table_name() : "", + nj_col_2->name())); + + if (field_1) + { + /* Mark field_1 used for table cache. */ + field_1->query_id= thd->query_id; + nj_col_1->table_ref->table->used_keys.intersect(field_1->part_of_key); + } + if (field_2) + { + /* Mark field_2 used for table cache. */ + field_2->query_id= thd->query_id; + nj_col_2->table_ref->table->used_keys.intersect(field_2->part_of_key); + } + + if (using_fields != NULL) + ++(*found_using_fields); + } + } + if (leaf_1) + leaf_1->is_join_columns_complete= TRUE; + + /* + Everything is OK. + Notice that at this point there may be some column names in the USING + clause that are not among the common columns. This is an SQL error and + we check for this error in store_natural_using_join_columns() when + (found_using_fields < length(join_using_fields)). + */ + result= FALSE; + +err: + if (arena) + thd->restore_active_arena(arena, &backup); + DBUG_RETURN(result); +} + + + +/* + Materialize and store the row type of NATURAL/USING join. + + SYNOPSIS + store_natural_using_join_columns() + thd current thread + natural_using_join the table reference of the NATURAL/USING join + table_ref_1 the first (left) operand (of a NATURAL/USING join). + table_ref_2 the second (right) operand (of a NATURAL/USING join). + using_fields if the join is JOIN...USING - the join columns, + if NATURAL join, then NULL + found_using_fields number of fields from the USING clause that were + found among the common fields + + DESCRIPTION + Iterate over the columns of both join operands and sort and store + all columns into the 'join_columns' list of natural_using_join + where the list is formed by three parts: + part1: The coalesced columns of table_ref_1 and table_ref_2, + sorted according to the column order of the first table. + part2: The other columns of the first table, in the order in + which they were defined in CREATE TABLE. + part3: The other columns of the second table, in the order in + which they were defined in CREATE TABLE. + Time complexity - O(N1+N2), where Ni = length(table_ref_i). + + IMPLEMENTATION + The procedure assumes that mark_common_columns() has been called + for the join that is being processed. + + RETURN + TRUE error: Some common column is ambiguous + FALSE OK +*/ + +static bool +store_natural_using_join_columns(THD *thd, TABLE_LIST *natural_using_join, + TABLE_LIST *table_ref_1, + TABLE_LIST *table_ref_2, + List<String> *using_fields, + uint found_using_fields) +{ + Field_iterator_table_ref it_1, it_2; + Natural_join_column *nj_col_1, *nj_col_2; + Query_arena *arena, backup; + bool result= TRUE; + List<Natural_join_column> *non_join_columns; + DBUG_ENTER("store_natural_using_join_columns"); + + DBUG_ASSERT(!natural_using_join->join_columns); + + arena= thd->activate_stmt_arena_if_needed(&backup); + + if (!(non_join_columns= new List<Natural_join_column>) || + !(natural_using_join->join_columns= new List<Natural_join_column>)) + goto err; + + /* Append the columns of the first join operand. */ + for (it_1.set(table_ref_1); !it_1.end_of_fields(); it_1.next()) + { + nj_col_1= it_1.get_natural_column_ref(); + if (nj_col_1->is_common) + { + natural_using_join->join_columns->push_back(nj_col_1); + /* Reset the common columns for the next call to mark_common_columns. */ + nj_col_1->is_common= FALSE; + } + else + non_join_columns->push_back(nj_col_1); + } + + /* + Check that all columns in the USING clause are among the common + columns. If this is not the case, report the first one that was + not found in an error. + */ + if (using_fields && found_using_fields < using_fields->elements) + { + String *using_field_name; + List_iterator_fast<String> using_fields_it(*using_fields); + while ((using_field_name= using_fields_it++)) + { + const char *using_field_name_ptr= using_field_name->c_ptr(); + List_iterator_fast<Natural_join_column> + it(*(natural_using_join->join_columns)); + Natural_join_column *common_field; + + for (;;) + { + /* If reached the end of fields, and none was found, report error. */ + if (!(common_field= it++)) + { + my_error(ER_BAD_FIELD_ERROR, MYF(0), using_field_name_ptr, + current_thd->where); + goto err; + } + if (!my_strcasecmp(system_charset_info, + common_field->name(), using_field_name_ptr)) + break; // Found match + } + } + } + + /* Append the non-equi-join columns of the second join operand. */ + for (it_2.set(table_ref_2); !it_2.end_of_fields(); it_2.next()) + { + nj_col_2= it_2.get_natural_column_ref(); + if (!nj_col_2->is_common) + non_join_columns->push_back(nj_col_2); + else + { + /* Reset the common columns for the next call to mark_common_columns. */ + nj_col_2->is_common= FALSE; + } + } + + if (non_join_columns->elements > 0) + natural_using_join->join_columns->concat(non_join_columns); + natural_using_join->is_join_columns_complete= TRUE; + + result= FALSE; + +err: + if (arena) + thd->restore_active_arena(arena, &backup); + DBUG_RETURN(result); +} + + +/* + Precompute and store the row types of the top-most NATURAL/USING joins. + + SYNOPSIS + store_top_level_join_columns() + thd current thread + table_ref nested join or table in a FROM clause + left_neighbor neighbor table reference to the left of table_ref at the + same level in the join tree + right_neighbor neighbor table reference to the right of table_ref at the + same level in the join tree + + DESCRIPTION + The procedure performs a post-order traversal of a nested join tree + and materializes the row types of NATURAL/USING joins in a + bottom-up manner until it reaches the TABLE_LIST elements that + represent the top-most NATURAL/USING joins. The procedure should be + applied to each element of SELECT_LEX::top_join_list (i.e. to each + top-level element of the FROM clause). + + IMPLEMENTATION + Notice that the table references in the list nested_join->join_list + are in reverse order, thus when we iterate over it, we are moving + from the right to the left in the FROM clause. + + RETURN + TRUE Error + FALSE OK +*/ + +static bool +store_top_level_join_columns(THD *thd, TABLE_LIST *table_ref, + TABLE_LIST *left_neighbor, + TABLE_LIST *right_neighbor) +{ + Query_arena *arena, backup; + bool result= TRUE; + + DBUG_ENTER("store_top_level_join_columns"); + + arena= thd->activate_stmt_arena_if_needed(&backup); + + /* Call the procedure recursively for each nested table reference. */ + if (table_ref->nested_join) + { + List_iterator_fast<TABLE_LIST> nested_it(table_ref->nested_join->join_list); + TABLE_LIST *same_level_left_neighbor= nested_it++; + TABLE_LIST *same_level_right_neighbor= NULL; + /* Left/right-most neighbors, possibly at higher levels in the join tree. */ + TABLE_LIST *real_left_neighbor, *real_right_neighbor; + + while (same_level_left_neighbor) + { + TABLE_LIST *cur_table_ref= same_level_left_neighbor; + same_level_left_neighbor= nested_it++; + /* + The order of RIGHT JOIN operands is reversed in 'join list' to + transform it into a LEFT JOIN. However, in this procedure we need + the join operands in their lexical order, so below we reverse the + join operands. Notice that this happens only in the first loop, + and not in the second one, as in the second loop + same_level_left_neighbor == NULL. + This is the correct behavior, because the second loop sets + cur_table_ref reference correctly after the join operands are + swapped in the first loop. + */ + if (same_level_left_neighbor && + cur_table_ref->outer_join & JOIN_TYPE_RIGHT) + { + /* This can happen only for JOIN ... ON. */ + DBUG_ASSERT(table_ref->nested_join->join_list.elements == 2); + swap_variables(TABLE_LIST*, same_level_left_neighbor, cur_table_ref); + } + + /* + Pick the parent's left and right neighbors if there are no immediate + neighbors at the same level. + */ + real_left_neighbor= (same_level_left_neighbor) ? + same_level_left_neighbor : left_neighbor; + real_right_neighbor= (same_level_right_neighbor) ? + same_level_right_neighbor : right_neighbor; + + if (cur_table_ref->nested_join && + store_top_level_join_columns(thd, cur_table_ref, + real_left_neighbor, real_right_neighbor)) + goto err; + same_level_right_neighbor= cur_table_ref; + } + } + + /* + If this is a NATURAL/USING join, materialize its result columns and + convert to a JOIN ... ON. + */ + if (table_ref->is_natural_join) + { + DBUG_ASSERT(table_ref->nested_join && + table_ref->nested_join->join_list.elements == 2); + List_iterator_fast<TABLE_LIST> operand_it(table_ref->nested_join->join_list); + /* + Notice that the order of join operands depends on whether table_ref + represents a LEFT or a RIGHT join. In a RIGHT join, the operands are + in inverted order. + */ + TABLE_LIST *table_ref_2= operand_it++; /* Second NATURAL join operand.*/ + TABLE_LIST *table_ref_1= operand_it++; /* First NATURAL join operand. */ + List<String> *using_fields= table_ref->join_using_fields; + uint found_using_fields; + + /* + The two join operands were interchanged in the parser, change the order + back for 'mark_common_columns'. + */ + if (table_ref_2->outer_join & JOIN_TYPE_RIGHT) + swap_variables(TABLE_LIST*, table_ref_1, table_ref_2); + if (mark_common_columns(thd, table_ref_1, table_ref_2, + using_fields, &found_using_fields)) + goto err; + + /* + Swap the join operands back, so that we pick the columns of the second + one as the coalesced columns. In this way the coalesced columns are the + same as of an equivalent LEFT JOIN. + */ + if (table_ref_1->outer_join & JOIN_TYPE_RIGHT) + swap_variables(TABLE_LIST*, table_ref_1, table_ref_2); + if (store_natural_using_join_columns(thd, table_ref, table_ref_1, + table_ref_2, using_fields, + found_using_fields)) + goto err; + + /* + Change NATURAL JOIN to JOIN ... ON. We do this for both operands + because either one of them or the other is the one with the + natural join flag because RIGHT joins are transformed into LEFT, + and the two tables may be reordered. + */ + table_ref_1->natural_join= table_ref_2->natural_join= NULL; + + /* Add a TRUE condition to outer joins that have no common columns. */ + if (table_ref_2->outer_join && + !table_ref_1->on_expr && !table_ref_2->on_expr) + table_ref_2->on_expr= new Item_int((longlong) 1,1); /* Always true. */ + + /* Change this table reference to become a leaf for name resolution. */ + if (left_neighbor) + { + TABLE_LIST *last_leaf_on_the_left; + last_leaf_on_the_left= left_neighbor->last_leaf_for_name_resolution(); + last_leaf_on_the_left->next_name_resolution_table= table_ref; + } + if (right_neighbor) + { + TABLE_LIST *first_leaf_on_the_right; + first_leaf_on_the_right= right_neighbor->first_leaf_for_name_resolution(); + table_ref->next_name_resolution_table= first_leaf_on_the_right; + } + else + table_ref->next_name_resolution_table= NULL; + } + result= FALSE; /* All is OK. */ + +err: + if (arena) + thd->restore_active_arena(arena, &backup); + DBUG_RETURN(result); +} + + +/* + Compute and store the row types of the top-most NATURAL/USING joins + in a FROM clause. + + SYNOPSIS + setup_natural_join_row_types() + thd current thread + from_clause list of top-level table references in a FROM clause + + DESCRIPTION + Apply the procedure 'store_top_level_join_columns' to each of the + top-level table referencs of the FROM clause. Adjust the list of tables + for name resolution - context->first_name_resolution_table to the + top-most, lef-most NATURAL/USING join. + + IMPLEMENTATION + Notice that the table references in 'from_clause' are in reverse + order, thus when we iterate over it, we are moving from the right + to the left in the FROM clause. + + RETURN + TRUE Error + FALSE OK +*/ +static bool setup_natural_join_row_types(THD *thd, + List<TABLE_LIST> *from_clause, + Name_resolution_context *context) +{ + thd->where= "from clause"; + if (from_clause->elements == 0) + return FALSE; /* We come here in the case of UNIONs. */ + + List_iterator_fast<TABLE_LIST> table_ref_it(*from_clause); + TABLE_LIST *table_ref; /* Current table reference. */ + /* Table reference to the left of the current. */ + TABLE_LIST *left_neighbor; + /* Table reference to the right of the current. */ + TABLE_LIST *right_neighbor= NULL; + + /* Note that tables in the list are in reversed order */ + for (left_neighbor= table_ref_it++; left_neighbor ; ) + { + table_ref= left_neighbor; + left_neighbor= table_ref_it++; + /* For stored procedures do not redo work if already done. */ + if (context->select_lex->first_execution) + { + if (store_top_level_join_columns(thd, table_ref, + left_neighbor, right_neighbor)) + return TRUE; + if (left_neighbor) + { + TABLE_LIST *first_leaf_on_the_right; + first_leaf_on_the_right= table_ref->first_leaf_for_name_resolution(); + left_neighbor->next_name_resolution_table= first_leaf_on_the_right; + } + } + right_neighbor= table_ref; + } + + /* + Store the top-most, left-most NATURAL/USING join, so that we start + the search from that one instead of context->table_list. At this point + right_neighbor points to the left-most top-level table reference in the + FROM clause. + */ + DBUG_ASSERT(right_neighbor); + context->first_name_resolution_table= + right_neighbor->first_leaf_for_name_resolution(); + + return FALSE; +} + + /**************************************************************************** ** Expand all '*' in given fields ****************************************************************************/ @@ -2452,27 +4553,29 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, List<Item> *sum_func_list, uint wild_num) { - DBUG_ENTER("setup_wild"); if (!wild_num) - DBUG_RETURN(0); + return(0); - reg2 Item *item; + Item *item; List_iterator<Item> it(fields); - Item_arena *arena, backup; + Query_arena *arena, backup; + DBUG_ENTER("setup_wild"); + /* - If we are in preparing prepared statement phase then we have change - temporary mem_root to statement mem root to save changes of SELECT list + Don't use arena if we are not in prepared statements or stored procedures + For PS/SP we have to use arena to remember the changes */ - arena= thd->change_arena_if_needed(&backup); + arena= thd->activate_stmt_arena_if_needed(&backup); while (wild_num && (item= it++)) - { + { if (item->type() == Item::FIELD_ITEM && ((Item_field*) item)->field_name && ((Item_field*) item)->field_name[0] == '*' && !((Item_field*) item)->field) { uint elem= fields.elements; + bool any_privileges= ((Item_field *) item)->any_privileges; Item_subselect *subsel= thd->lex->current_select->master_unit()->item; if (subsel && subsel->substype() == Item_subselect::EXISTS_SUBS) @@ -2482,13 +4585,16 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, Item_int do not need fix_fields() because it is basic constant. */ - it.replace(new Item_int("Not_used", (longlong) 1, 21)); + it.replace(new Item_int("Not_used", (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); } - else if (insert_fields(thd,tables,((Item_field*) item)->db_name, - ((Item_field*) item)->table_name, &it)) + else if (insert_fields(thd, ((Item_field*) item)->context, + ((Item_field*) item)->db_name, + ((Item_field*) item)->table_name, &it, + any_privileges)) { - if (arena) - thd->restore_backup_item_arena(arena, &backup); + if (arena) + thd->restore_active_arena(arena, &backup); DBUG_RETURN(-1); } if (sum_func_list) @@ -2504,7 +4610,14 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, } } if (arena) - thd->restore_backup_item_arena(arena, &backup); + { + /* make * substituting permanent */ + SELECT_LEX *select_lex= thd->lex->current_select; + select_lex->with_wild= 0; + select_lex->item_list= fields; + + thd->restore_active_arena(arena, &backup); + } DBUG_RETURN(0); } @@ -2512,18 +4625,21 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, ** Check that all given fields exists and fill struct with current data ****************************************************************************/ -int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, - List<Item> &fields, bool set_query_id, - List<Item> *sum_func_list, bool allow_sum_func) +bool setup_fields(THD *thd, Item **ref_pointer_array, + List<Item> &fields, bool set_query_id, + List<Item> *sum_func_list, bool allow_sum_func) { reg2 Item *item; + bool save_set_query_id= thd->set_query_id; + nesting_map save_allow_sum_func= thd->lex->allow_sum_func; List_iterator<Item> it(fields); bool save_is_item_list_lookup; DBUG_ENTER("setup_fields"); thd->set_query_id=set_query_id; - thd->allow_sum_func= allow_sum_func; - thd->where="field list"; + if (allow_sum_func) + thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + thd->where= THD::DEFAULT_WHERE; save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; thd->lex->current_select->is_item_list_lookup= 0; @@ -2542,57 +4658,128 @@ int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, bzero(ref_pointer_array, sizeof(Item *) * fields.elements); Item **ref= ref_pointer_array; + thd->lex->current_select->cur_pos_in_select_list= 0; while ((item= it++)) { - if (!item->fixed && item->fix_fields(thd, tables, it.ref()) || + if (!item->fixed && item->fix_fields(thd, it.ref()) || (item= *(it.ref()))->check_cols(1)) { thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; - DBUG_RETURN(-1); /* purecov: inspected */ + thd->lex->allow_sum_func= save_allow_sum_func; + thd->set_query_id= save_set_query_id; + DBUG_RETURN(TRUE); /* purecov: inspected */ } if (ref) *(ref++)= item; if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM && sum_func_list) item->split_sum_func(thd, ref_pointer_array, *sum_func_list); - thd->used_tables|=item->used_tables(); + thd->used_tables|= item->used_tables(); + thd->lex->current_select->cur_pos_in_select_list++; } thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; + thd->lex->current_select->cur_pos_in_select_list= UNDEF_POS; + + thd->lex->allow_sum_func= save_allow_sum_func; + thd->set_query_id= save_set_query_id; DBUG_RETURN(test(thd->net.report_error)); } /* + make list of leaves of join table tree + + SYNOPSIS + make_leaves_list() + list pointer to pointer on list first element + tables table list + + RETURN pointer on pointer to next_leaf of last element +*/ + +TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables) +{ + for (TABLE_LIST *table= tables; table; table= table->next_local) + { + if (table->merge_underlying_list) + { + DBUG_ASSERT(table->view && + table->effective_algorithm == VIEW_ALGORITHM_MERGE); + list= make_leaves_list(list, table->merge_underlying_list); + } + else + { + *list= table; + list= &table->next_leaf; + } + } + return list; +} + +/* prepare tables SYNOPSIS setup_tables() - tables table list + thd Thread handler + context name resolution contest to setup table list there + from_clause Top-level list of table references in the FROM clause + tables Table list (select_lex->table_list) + conds Condition of current SELECT (can be changed by VIEW) + leaves List of join table leaves list (select_lex->leaf_tables) + refresh It is onle refresh for subquery + select_insert It is SELECT ... INSERT command + NOTE + Check also that the 'used keys' and 'ignored keys' exists and set up the + table structure accordingly. + Create a list of leaf tables. For queries with NATURAL/USING JOINs, + compute the row types of the top most natural/using join table references + and link these into a list of table references for name resolution. - NOTE - Remap table numbers if INSERT ... SELECT - Check also that the 'used keys' and 'ignored keys' exists and set up the - table structure accordingly - - This has to be called for all tables that are used by items, as otherwise - table->map is not set and all Item_field will be regarded as const items. + This has to be called for all tables that are used by items, as otherwise + table->map is not set and all Item_field will be regarded as const items. - RETURN - 0 ok; In this case *map will includes the choosed index - 1 error + RETURN + FALSE ok; In this case *map will includes the chosen index + TRUE error */ -bool setup_tables(TABLE_LIST *tables) +bool setup_tables(THD *thd, Name_resolution_context *context, + List<TABLE_LIST> *from_clause, TABLE_LIST *tables, + Item **conds, TABLE_LIST **leaves, bool select_insert) { + uint tablenr= 0; DBUG_ENTER("setup_tables"); - uint tablenr=0; - for (TABLE_LIST *table_list=tables ; table_list ; - table_list=table_list->next,tablenr++) + + DBUG_ASSERT ((select_insert && !tables->next_name_resolution_table) || !tables || + (context->table_list && context->first_name_resolution_table)); + /* + this is used for INSERT ... SELECT. + For select we setup tables except first (and its underlying tables) + */ + TABLE_LIST *first_select_table= (select_insert ? + tables->next_local: + 0); + if (!(*leaves)) + make_leaves_list(leaves, tables); + + TABLE_LIST *table_list; + for (table_list= *leaves; + table_list; + table_list= table_list->next_leaf, tablenr++) { TABLE *table= table_list->table; + table->pos_in_table_list= table_list; + if (first_select_table && + table_list->top_table() == first_select_table) + { + /* new counting for SELECT of INSERT ... SELECT command */ + first_select_table= 0; + tablenr= 0; + } setup_table_map(table, table_list, tablenr); - table->used_keys= table->keys_for_keyread; + table->used_keys= table->s->keys_for_keyread; if (table_list->use_index) { key_map map; @@ -2616,11 +4803,94 @@ bool setup_tables(TABLE_LIST *tables) my_error(ER_TOO_MANY_TABLES,MYF(0),MAX_TABLES); DBUG_RETURN(1); } + for (table_list= tables; + table_list; + table_list= table_list->next_local) + { + if (table_list->merge_underlying_list) + { + DBUG_ASSERT(table_list->view && + table_list->effective_algorithm == VIEW_ALGORITHM_MERGE); + Query_arena *arena= thd->stmt_arena, backup; + bool res; + if (arena->is_conventional()) + arena= 0; // For easier test + else + thd->set_n_backup_active_arena(arena, &backup); + res= table_list->setup_underlying(thd); + if (arena) + thd->restore_active_arena(arena, &backup); + if (res) + DBUG_RETURN(1); + } + } + + /* Precompute and store the row types of NATURAL/USING joins. */ + if (setup_natural_join_row_types(thd, from_clause, context)) + DBUG_RETURN(1); + DBUG_RETURN(0); } /* + prepare tables and check access for the view tables + + SYNOPSIS + setup_tables_and_check_view_access() + thd Thread handler + context name resolution contest to setup table list there + from_clause Top-level list of table references in the FROM clause + tables Table list (select_lex->table_list) + conds Condition of current SELECT (can be changed by VIEW) + leaves List of join table leaves list (select_lex->leaf_tables) + refresh It is onle refresh for subquery + select_insert It is SELECT ... INSERT command + want_access what access is needed + + NOTE + a wrapper for check_tables that will also check the resulting + table leaves list for access to all the tables that belong to a view + + RETURN + FALSE ok; In this case *map will include the chosen index + TRUE error +*/ +bool setup_tables_and_check_access(THD *thd, + Name_resolution_context *context, + List<TABLE_LIST> *from_clause, + TABLE_LIST *tables, + Item **conds, TABLE_LIST **leaves, + bool select_insert, + ulong want_access_first, + ulong want_access) +{ + TABLE_LIST *leaves_tmp = NULL; + bool first_table= true; + + if (setup_tables (thd, context, from_clause, tables, conds, + &leaves_tmp, select_insert)) + return TRUE; + + if (leaves) + *leaves = leaves_tmp; + + for (; leaves_tmp; leaves_tmp= leaves_tmp->next_leaf) + { + if (leaves_tmp->belong_to_view && + check_single_table_access(thd, first_table ? want_access_first : + want_access, leaves_tmp)) + { + tables->hide_view_error(thd); + return TRUE; + } + first_table= false; + } + return FALSE; +} + + +/* Create a key_map from a list of index names SYNOPSIS @@ -2644,11 +4914,13 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, map->clear_all(); while ((name=it++)) { - if ((pos= find_type(&table->keynames, name->ptr(), name->length(), 1)) <= - 0) + if (table->s->keynames.type_names == 0 || + (pos= find_type(&table->s->keynames, name->ptr(), + name->length(), 1)) <= + 0) { my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), name->c_ptr(), - table->real_name); + table->pos_in_table_list->alias); map->set_all(); return 1; } @@ -2658,18 +4930,34 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, } -/**************************************************************************** - This just drops in all fields instead of current '*' field - Returns pointer to last inserted field if ok -****************************************************************************/ +/* + Drops in all fields instead of current '*' field + + SYNOPSIS + insert_fields() + thd Thread handler + context Context for name resolution + db_name Database name in case of 'database_name.table_name.*' + table_name Table name in case of 'table_name.*' + it Pointer to '*' + any_privileges 0 If we should ensure that we have SELECT privileges + for all columns + 1 If any privilege is ok + RETURN + 0 ok 'it' is updated to point at last inserted + 1 error. Error message is generated but not sent to client +*/ bool -insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, - const char *table_name, List_iterator<Item> *it) +insert_fields(THD *thd, Name_resolution_context *context, const char *db_name, + const char *table_name, List_iterator<Item> *it, + bool any_privileges) { + Field_iterator_table_ref field_iterator; + bool found; char name_buff[NAME_LEN+1]; - uint found; DBUG_ENTER("insert_fields"); + DBUG_PRINT("arena", ("stmt arena: 0x%lx", (ulong)thd->stmt_arena)); if (db_name && lower_case_table_names) { @@ -2683,238 +4971,290 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, db_name= name_buff; } + found= FALSE; - found=0; - for (; tables ; tables=tables->next) + /* + If table names are qualified, then loop over all tables used in the query, + else treat natural joins as leaves and do not iterate over their underlying + tables. + */ + for (TABLE_LIST *tables= (table_name ? context->table_list : + context->first_name_resolution_table); + tables; + tables= (table_name ? tables->next_local : + tables->next_name_resolution_table) + ) { - TABLE *table=tables->table; - if (!table_name || (!my_strcasecmp(table_alias_charset, table_name, - tables->alias) && - (!db_name || !strcmp(tables->db,db_name)))) - { + Field *field; + TABLE *table= tables->table; + + DBUG_ASSERT(tables->is_leaf_for_name_resolution()); + + if (table_name && my_strcasecmp(table_alias_charset, table_name, + tables->alias) || + (db_name && strcmp(tables->db,db_name))) + continue; + #ifndef NO_EMBEDDED_ACCESS_CHECKS - /* Ensure that we have access right to all columns */ - if (!(table->grant.privilege & SELECT_ACL) && - check_grant_all_columns(thd,SELECT_ACL,table)) - DBUG_RETURN(-1); + /* Ensure that we have access rights to all fields to be inserted. */ + if (!((table && (table->grant.privilege & SELECT_ACL) || + tables->view && (tables->grant.privilege & SELECT_ACL))) && + !any_privileges) + { + field_iterator.set(tables); + if (check_grant_all_columns(thd, SELECT_ACL, field_iterator.grant(), + field_iterator.db_name(), + field_iterator.table_name(), + &field_iterator)) + DBUG_RETURN(TRUE); + } #endif - Field **ptr=table->field,*field; - TABLE *natural_join_table= 0; - thd->used_tables|=table->map; - if (!table->outer_join && - tables->natural_join && - !tables->natural_join->table->outer_join) - natural_join_table= tables->natural_join->table; - while ((field = *ptr++)) + /* + Update the tables used in the query based on the referenced fields. For + views and natural joins this update is performed inside the loop below. + */ + if (table) + thd->used_tables|= table->map; + + /* + Initialize a generic field iterator for the current table reference. + Notice that it is guaranteed that this iterator will iterate over the + fields of a single table reference, because 'tables' is a leaf (for + name resolution purposes). + */ + field_iterator.set(tables); + + for (; !field_iterator.end_of_fields(); field_iterator.next()) + { + Item *item; + + if (!(item= field_iterator.create_item(thd))) + DBUG_RETURN(TRUE); + + if (!found) { - uint not_used_field_index= NO_CACHED_FIELD_INDEX; - /* Skip duplicate field names if NATURAL JOIN is used */ - if (!natural_join_table || - !find_field_in_table(thd, natural_join_table, field->field_name, - strlen(field->field_name), 0, 0, - ¬_used_field_index)) + found= TRUE; + it->replace(item); /* Replace '*' with the first found item. */ + } + else + it->after(item); /* Add 'item' to the SELECT list. */ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Set privilege information for the fields of newly created views. + We have that (any_priviliges == TRUE) if and only if we are creating + a view. In the time of view creation we can't use the MERGE algorithm, + therefore if 'tables' is itself a view, it is represented by a + temporary table. Thus in this case we can be sure that 'item' is an + Item_field. + */ + if (any_privileges) + { + DBUG_ASSERT(tables->field_translation == NULL && table || + tables->is_natural_join); + DBUG_ASSERT(item->type() == Item::FIELD_ITEM); + Item_field *fld= (Item_field*) item; + const char *field_table_name= field_iterator.table_name(); + + if (!tables->schema_table && + !(fld->have_privileges= + (get_column_grant(thd, field_iterator.grant(), + field_iterator.db_name(), + field_table_name, fld->field_name) & + VIEW_ANY_ACL))) { - Item_field *item= new Item_field(thd, field); - if (!found++) - (void) it->replace(item); // Replace '*' - else - it->after(item); + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), "ANY", + thd->security_ctx->priv_user, + thd->security_ctx->host_or_ip, + fld->field_name, field_table_name); + DBUG_RETURN(TRUE); } - /* - Mark if field used before in this select. - Used by 'insert' to verify if a field name is used twice - */ - if (field->query_id == thd->query_id) - thd->dupp_field=field; - field->query_id=thd->query_id; - table->used_keys.intersect(field->part_of_key); } - /* All fields are used */ - table->used_fields=table->fields; +#endif + + if ((field= field_iterator.field())) + { + /* + Mark if field used before in this select. + Used by 'insert' to verify if a field name is used twice. + */ + if (field->query_id == thd->query_id) + thd->dupp_field= field; + field->query_id= thd->query_id; + + if (table) + table->used_keys.intersect(field->part_of_key); + + if (tables->is_natural_join) + { + TABLE *field_table; + /* + In this case we are sure that the column ref will not be created + because it was already created and stored with the natural join. + */ + Natural_join_column *nj_col; + if (!(nj_col= field_iterator.get_natural_column_ref())) + DBUG_RETURN(TRUE); + DBUG_ASSERT(nj_col->table_field); + field_table= nj_col->table_ref->table; + if (field_table) + { + thd->used_tables|= field_table->map; + field_table->used_keys.intersect(field->part_of_key); + field_table->used_fields++; + } + } + } + else + { + thd->used_tables|= item->used_tables(); + item->walk(&Item::reset_query_id_processor, + (byte *)(&thd->query_id)); + } } + /* + In case of stored tables, all fields are considered as used, + while in the case of views, the fields considered as used are the + ones marked in setup_tables during fix_fields of view columns. + For NATURAL joins, used_tables is updated in the IF above. + */ + if (table) + table->used_fields= table->s->fields; } - if (!found) - { - if (!table_name) - my_error(ER_NO_TABLES_USED,MYF(0)); - else - my_error(ER_BAD_TABLE_ERROR,MYF(0),table_name); - } - DBUG_RETURN(!found); + if (found) + DBUG_RETURN(FALSE); + + /* + TODO: in the case when we skipped all columns because there was a + qualified '*', and all columns were coalesced, we have to give a more + meaningful message than ER_BAD_TABLE_ERROR. + */ + if (!table_name) + my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0)); + else + my_error(ER_BAD_TABLE_ERROR, MYF(0), table_name); + + DBUG_RETURN(TRUE); } /* -** Fix all conditions and outer join expressions + Fix all conditions and outer join expressions. + + SYNOPSIS + setup_conds() + thd thread handler + tables list of tables for name resolving (select_lex->table_list) + leaves list of leaves of join table tree (select_lex->leaf_tables) + conds WHERE clause + + DESCRIPTION + TODO + + RETURN + TRUE if some error occured (e.g. out of memory) + FALSE if all is OK */ -int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) +int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, + COND **conds) { - table_map not_null_tables= 0; - Item_arena *arena= 0, backup; - bool save_is_item_list_lookup= thd->lex->current_select->is_item_list_lookup; - thd->lex->current_select->is_item_list_lookup= 0; + SELECT_LEX *select_lex= thd->lex->current_select; + Query_arena *arena= thd->stmt_arena, backup; + TABLE_LIST *table= NULL; // For HP compilers + /* + it_is_update set to TRUE when tables of primary SELECT_LEX (SELECT_LEX + which belong to LEX, i.e. most up SELECT) will be updated by + INSERT/UPDATE/LOAD + NOTE: using this condition helps to prevent call of prepare_check_option() + from subquery of VIEW, because tables of subquery belongs to VIEW + (see condition before prepare_check_option() call) + */ + bool it_is_update= (select_lex == &thd->lex->select_lex) && + thd->lex->which_check_option_applicable(); + bool save_is_item_list_lookup= select_lex->is_item_list_lookup; + select_lex->is_item_list_lookup= 0; DBUG_ENTER("setup_conds"); + if (select_lex->conds_processed_with_permanent_arena || + arena->is_conventional()) + arena= 0; // For easier test + thd->set_query_id=1; - thd->lex->current_select->cond_count= 0; + select_lex->cond_count= 0; + select_lex->between_count= 0; + + for (table= tables; table; table= table->next_local) + { + if (table->prepare_where(thd, conds, FALSE)) + goto err_no_arena; + } + if (*conds) { thd->where="where clause"; - if (!(*conds)->fixed && (*conds)->fix_fields(thd, tables, conds) || + if (!(*conds)->fixed && (*conds)->fix_fields(thd, conds) || (*conds)->check_cols(1)) - { - thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; - DBUG_RETURN(1); - } - not_null_tables= (*conds)->not_null_tables(); + goto err_no_arena; } - - /* Check if we are using outer joins */ - for (TABLE_LIST *table=tables ; table ; table=table->next) + /* + Apply fix_fields() to all ON clauses at all levels of nesting, + including the ones inside view definitions. + */ + for (table= leaves; table; table= table->next_leaf) { - if (table->on_expr) + TABLE_LIST *embedded; /* The table at the current level of nesting. */ + TABLE_LIST *embedding= table; /* The parent nested table reference. */ + do { - /* Make a join an a expression */ - thd->where="on clause"; - - if (!table->on_expr->fixed && - table->on_expr->fix_fields(thd, tables, &table->on_expr) || - table->on_expr->check_cols(1)) - { - thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; - DBUG_RETURN(1); - } - thd->lex->current_select->cond_count++; - - /* - If it's a normal join or a LEFT JOIN which can be optimized away - add the ON/USING expression to the WHERE - */ - if (!table->outer_join || - ((table->table->map & not_null_tables) && - !(specialflag & SPECIAL_NO_NEW_FUNC))) + embedded= embedding; + if (embedded->on_expr) { - table->outer_join= 0; - arena= thd->change_arena_if_needed(&backup); - *conds= and_conds(*conds, table->on_expr); - table->on_expr=0; - if (arena) - { - thd->restore_backup_item_arena(arena, &backup); - arena= 0; // Safety if goto err - } - if ((*conds) && !(*conds)->fixed && - (*conds)->fix_fields(thd, tables, conds)) - { - thd->lex->current_select->is_item_list_lookup= - save_is_item_list_lookup; - DBUG_RETURN(1); - } + /* Make a join an a expression */ + thd->where="on clause"; + if (!embedded->on_expr->fixed && + embedded->on_expr->fix_fields(thd, &embedded->on_expr) || + embedded->on_expr->check_cols(1)) + goto err_no_arena; + select_lex->cond_count++; } + embedding= embedded->embedding; } - if (table->natural_join) - { - arena= thd->change_arena_if_needed(&backup); - /* Make a join of all fields with have the same name */ - TABLE *t1= table->table; - TABLE *t2= table->natural_join->table; - Item_cond_and *cond_and= new Item_cond_and(); - if (!cond_and) // If not out of memory - goto err; - cond_and->top_level_item(); - - Field **t1_field, *t2_field; - for (t1_field= t1->field; (*t1_field); t1_field++) - { - const char *t1_field_name= (*t1_field)->field_name; - uint not_used_field_index= NO_CACHED_FIELD_INDEX; + while (embedding && + embedding->nested_join->join_list.head() == embedded); - if ((t2_field= find_field_in_table(thd, t2, t1_field_name, - strlen(t1_field_name), 0, 0, - ¬_used_field_index))) - { - Item_func_eq *tmp=new Item_func_eq(new Item_field(thd, *t1_field), - new Item_field(thd, t2_field)); - if (!tmp) - goto err; - /* Mark field used for table cache */ - (*t1_field)->query_id= t2_field->query_id= thd->query_id; - cond_and->list.push_back(tmp); - t1->used_keys.intersect((*t1_field)->part_of_key); - t2->used_keys.intersect(t2_field->part_of_key); - } - } - thd->lex->current_select->cond_count+= cond_and->list.elements; - - // to prevent natural join processing during PS re-execution - table->natural_join= 0; - - if (cond_and->list.elements) - { - if (!table->outer_join) // Not left join - { - *conds= and_conds(*conds, cond_and); - // fix_fields() should be made with temporary memory pool - if (arena) - thd->restore_backup_item_arena(arena, &backup); - if (*conds && !(*conds)->fixed) - { - if (!(*conds)->fixed && - (*conds)->fix_fields(thd, tables, conds)) - { - thd->lex->current_select->is_item_list_lookup= - save_is_item_list_lookup; - DBUG_RETURN(1); - } - } - } - else - { - table->on_expr= and_conds(table->on_expr, cond_and); - // fix_fields() should be made with temporary memory pool - if (arena) - thd->restore_backup_item_arena(arena, &backup); - if (table->on_expr && !table->on_expr->fixed) - { - if (!table->on_expr->fixed && - table->on_expr->fix_fields(thd, tables, &table->on_expr)) - { - thd->lex->current_select->is_item_list_lookup= - save_is_item_list_lookup; - DBUG_RETURN(1); - } - } - } - } - else if (arena) + /* process CHECK OPTION */ + if (it_is_update) + { + TABLE_LIST *view= table->top_table(); + if (view->effective_with_check) { - thd->restore_backup_item_arena(arena, &backup); - arena= 0; // Safety if goto err + if (view->prepare_check_option(thd)) + goto err_no_arena; + thd->change_item_tree(&table->check_option, view->check_option); } } } - if (thd->current_arena->is_stmt_prepare()) + if (!thd->stmt_arena->is_conventional()) { /* We are in prepared statement preparation code => we should store WHERE clause changing for next executions. - We do this ON -> WHERE transformation only once per PS statement. + We do this ON -> WHERE transformation only once per PS/SP statement. */ - thd->lex->current_select->where= *conds; + select_lex->where= *conds; + select_lex->conds_processed_with_permanent_arena= 1; } thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; DBUG_RETURN(test(thd->net.report_error)); -err: - thd->lex->current_select->is_item_list_lookup= save_is_item_list_lookup; - if (arena) - thd->restore_backup_item_arena(arena, &backup); +err_no_arena: + select_lex->is_item_list_lookup= save_is_item_list_lookup; DBUG_RETURN(1); } @@ -2924,46 +5264,210 @@ err: ** Returns : 1 if some field has wrong type ******************************************************************************/ -int -fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors) + +/* + Fill fields with given items. + + SYNOPSIS + fill_record() + thd thread handler + fields Item_fields list to be filled + values values to fill with + ignore_errors TRUE if we should ignore errors + + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + + RETURN + FALSE OK + TRUE error occured +*/ + +static bool +fill_record(THD * thd, List<Item> &fields, List<Item> &values, + bool ignore_errors) { List_iterator_fast<Item> f(fields),v(values); - Item *value; + Item *value, *fld; Item_field *field; + TABLE *table= 0; DBUG_ENTER("fill_record"); - while ((field=(Item_field*) f++)) + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (fields.elements) { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + fld= (Item_field*)f++; + if (!(field= fld->filed_for_view_update())) + { + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); + goto err; + } + table= field->field->table; + table->auto_increment_field_not_null= FALSE; + f.rewind(); + } + while ((fld= f++)) + { + if (!(field= fld->filed_for_view_update())) + { + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name); + goto err; + } value=v++; Field *rfield= field->field; - TABLE *table= rfield->table; + table= rfield->table; if (rfield == table->next_number_field) table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors) - DBUG_RETURN(1); + { + my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0)); + goto err; + } } - DBUG_RETURN(0); + DBUG_RETURN(thd->net.report_error); +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); } -int -fill_record(Field **ptr,List<Item> &values, bool ignore_errors) +/* + Fill fields in list with values from the list of items and invoke + before triggers. + + SYNOPSIS + fill_record_n_invoke_before_triggers() + thd thread context + fields Item_fields list to be filled + values values to fill with + ignore_errors TRUE if we should ignore errors + triggers object holding list of triggers to be invoked + event event type for triggers to be invoked + + NOTE + This function assumes that fields which values will be set and triggers + to be invoked belong to the same table, and that TABLE::record[0] and + record[1] buffers correspond to new and old versions of row respectively. + + RETURN + FALSE OK + TRUE error occured +*/ + +bool +fill_record_n_invoke_before_triggers(THD *thd, List<Item> &fields, + List<Item> &values, bool ignore_errors, + Table_triggers_list *triggers, + enum trg_event_type event) +{ + return (fill_record(thd, fields, values, ignore_errors) || + triggers && triggers->process_triggers(thd, event, + TRG_ACTION_BEFORE, TRUE)); +} + + +/* + Fill field buffer with values from Field list + + SYNOPSIS + fill_record() + thd thread handler + ptr pointer on pointer to record + values list of fields + ignore_errors TRUE if we should ignore errors + + NOTE + fill_record() may set table->auto_increment_field_not_null and a + caller should make sure that it is reset after their last call to this + function. + + RETURN + FALSE OK + TRUE error occured +*/ + +bool +fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors) { List_iterator_fast<Item> v(values); Item *value; + TABLE *table= 0; DBUG_ENTER("fill_record"); Field *field; + /* + Reset the table->auto_increment_field_not_null as it is valid for + only one row. + */ + if (*ptr) + { + /* + On INSERT or UPDATE fields are checked to be from the same table, + thus we safely can take table from the first field. + */ + table= (*ptr)->table; + table->auto_increment_field_not_null= FALSE; + } while ((field = *ptr++)) { value=v++; - TABLE *table= field->table; + table= field->table; if (field == table->next_number_field) table->auto_increment_field_not_null= TRUE; - if ((value->save_in_field(field, 0) < 0) && !ignore_errors) - DBUG_RETURN(1); + if (value->save_in_field(field, 0) == -1) + goto err; } - DBUG_RETURN(0); + DBUG_RETURN(thd->net.report_error); + +err: + if (table) + table->auto_increment_field_not_null= FALSE; + DBUG_RETURN(TRUE); +} + + +/* + Fill fields in array with values from the list of items and invoke + before triggers. + + SYNOPSIS + fill_record_n_invoke_before_triggers() + thd thread context + ptr NULL-ended array of fields to be filled + values values to fill with + ignore_errors TRUE if we should ignore errors + triggers object holding list of triggers to be invoked + event event type for triggers to be invoked + + NOTE + This function assumes that fields which values will be set and triggers + to be invoked belong to the same table, and that TABLE::record[0] and + record[1] buffers correspond to new and old versions of row respectively. + + RETURN + FALSE OK + TRUE error occured +*/ + +bool +fill_record_n_invoke_before_triggers(THD *thd, Field **ptr, + List<Item> &values, bool ignore_errors, + Table_triggers_list *triggers, + enum trg_event_type event) +{ + return (fill_record(thd, ptr, values, ignore_errors) || + triggers && triggers->process_triggers(thd, event, + TRG_ACTION_BEFORE, TRUE)); } @@ -2979,6 +5483,7 @@ my_bool mysql_rm_tmp_tables(void) if (!(thd= new THD)) DBUG_RETURN(1); + thd->thread_stack= (char*) &thd; thd->store_globals(); for (i=0; i<=mysql_tmpdir_list.max; i++) @@ -3008,7 +5513,7 @@ my_bool mysql_rm_tmp_tables(void) if (!bcmp(reg_ext, ext, ext_len)) { TABLE tmp_table; - if (!openfrm(filePath, "tmp_table", (uint) 0, + if (!openfrm(thd, filePath, "tmp_table", (uint) 0, READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, 0, &tmp_table)) { @@ -3041,34 +5546,31 @@ my_bool mysql_rm_tmp_tables(void) *****************************************************************************/ /* -** Invalidate any cache entries that are for some DB -** We can't use hash_delete when looping hash_elements. We mark them first -** and afterwards delete those marked unused. + Invalidate any cache entries that are for some DB + + SYNOPSIS + remove_db_from_cache() + db Database name. This will be in lower case if + lower_case_table_name is set + + NOTE: + We can't use hash_delete when looping hash_elements. We mark them first + and afterwards delete those marked unused. */ void remove_db_from_cache(const char *db) { - char name_buff[NAME_LEN+1]; - if (db && lower_case_table_names) - { - /* - convert database to lower case for comparision. - */ - strmake(name_buff, db, sizeof(name_buff)-1); - my_casedn_str(files_charset_info, name_buff); - db= name_buff; - } for (uint idx=0 ; idx < open_cache.records ; idx++) { TABLE *table=(TABLE*) hash_element(&open_cache,idx); - if (!strcmp(table->table_cache_key,db)) + if (!strcmp(table->s->db, db)) { - table->version=0L; /* Free when thread is ready */ + table->s->version= 0L; /* Free when thread is ready */ if (!table->in_use) relink_unused(table); } } - while (unused_tables && !unused_tables->version) + while (unused_tables && !unused_tables->s->version) VOID(hash_delete(&open_cache,(byte*) unused_tables)); } @@ -3109,7 +5611,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, TABLE *table; bool result=0, signalled= 0; DBUG_ENTER("remove_table_from_cache"); - + DBUG_PRINT("enter", ("Table: '%s.%s' flags: %u", db, table_name, flags)); key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; for (;;) @@ -3124,7 +5626,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, &state)) { THD *in_use; - table->version=0L; /* Free when thread is ready */ + table->s->version=0L; /* Free when thread is ready */ if (!(in_use=table->in_use)) { DBUG_PRINT("info",("Table was not in use")); @@ -3134,12 +5636,15 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, { in_use->some_tables_deleted=1; if (table->db_stat) + { + DBUG_PRINT("info", ("Found another active instance of the table")); result=1; + } /* Kill delayed insert threads */ if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) && ! in_use->killed) { - in_use->killed=1; + in_use->killed= THD::KILL_CONNECTION; pthread_mutex_lock(&in_use->mysys_var->mutex); if (in_use->mysys_var->current_cond) { @@ -3165,10 +5670,16 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, else result= result || (flags & RTFC_OWNED_BY_THD_FLAG); } - while (unused_tables && !unused_tables->version) + while (unused_tables && !unused_tables->s->version) VOID(hash_delete(&open_cache,(byte*) unused_tables)); if (result && (flags & RTFC_WAIT_OTHER_THREAD_FLAG)) { + /* + Signal any thread waiting for tables to be freed to + reopen their tables + */ + broadcast_refresh(); + DBUG_PRINT("info", ("Waiting for refresh signal")); if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed) { dropping_tables++; @@ -3235,3 +5746,70 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order) } return 0; } + + +/* + open new .frm format table + + SYNOPSIS + open_new_frm() + THD thread handler + path path to .frm + alias alias for table + db database + table_name name of table + db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..) + can be 0 (example in ha_example_table) + prgflag READ_ALL etc.. + ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc.. + outparam result table + table_desc TABLE_LIST descriptor + mem_root temporary MEM_ROOT for parsing +*/ + +static bool +open_new_frm(THD *thd, const char *path, const char *alias, + const char *db, const char *table_name, + uint db_stat, uint prgflag, + uint ha_open_flags, TABLE *outparam, TABLE_LIST *table_desc, + MEM_ROOT *mem_root) +{ + LEX_STRING pathstr; + File_parser *parser; + DBUG_ENTER("open_new_frm"); + + pathstr.str= (char*) path; + pathstr.length= strlen(path); + + if ((parser= sql_parse_prepare(&pathstr, mem_root, 1))) + { + if (is_equal(&view_type, parser->type())) + { + if (table_desc == 0 || table_desc->required_type == FRMTYPE_TABLE) + { + my_error(ER_WRONG_OBJECT, MYF(0), db, table_name, "BASE TABLE"); + goto err; + } + if (mysql_make_view(thd, parser, table_desc, + (prgflag & OPEN_VIEW_NO_PARSE))) + goto err; + } + else + { + /* only VIEWs are supported now */ + my_error(ER_FRM_UNKNOWN_TYPE, MYF(0), path, parser->type()->str); + goto err; + } + DBUG_RETURN(0); + } + +err: + bzero(outparam, sizeof(TABLE)); // do not run repair + DBUG_RETURN(1); +} + + +bool is_equal(const LEX_STRING *a, const LEX_STRING *b) +{ + return a->length == b->length && !strncmp(a->str, b->str, a->length); +} diff --git a/sql/sql_bitmap.h b/sql/sql_bitmap.h index 2fd603d9381..3b507d64df5 100644 --- a/sql/sql_bitmap.h +++ b/sql/sql_bitmap.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -28,7 +27,7 @@ template <uint default_width> class Bitmap uchar buffer[(default_width+7)/8]; public: Bitmap() { init(); } - Bitmap(Bitmap& from) { *this=from; } + Bitmap(const Bitmap& from) { *this=from; } explicit Bitmap(uint prefix_to_set) { init(prefix_to_set); } void init() { bitmap_init(&map, buffer, default_width, 0); } void init(uint prefix_to_set) { init(); set_prefix(prefix_to_set); } @@ -51,6 +50,14 @@ public: bitmap_init(&map2, (uchar *)&map2buff, sizeof(ulonglong)*8, 0); bitmap_intersect(&map, &map2); } + /* Use highest bit for all bits above sizeof(ulonglong)*8. */ + void intersect_extended(ulonglong map2buff) + { + intersect(map2buff); + if (map.bitmap_size > sizeof(ulonglong)) + bitmap_set_above(&map, sizeof(ulonglong), + test(map2buff & (LL(1) << (sizeof(ulonglong) * 8 - 1)))); + } void subtract(Bitmap& map2) { bitmap_subtract(&map, &map2.map); } void merge(Bitmap& map2) { bitmap_union(&map, &map2.map); } my_bool is_set(uint n) const { return bitmap_is_set(&map, n); } @@ -61,18 +68,17 @@ public: my_bool operator==(const Bitmap& map2) const { return bitmap_cmp(&map, &map2.map); } char *print(char *buf) const { - char *s=buf; int i; - for (i=sizeof(buffer)-1; i>=0 ; i--) - { - if ((*s=_dig_vec_upper[buffer[i] >> 4]) != '0') - break; - if ((*s=_dig_vec_upper[buffer[i] & 15]) != '0') - break; - } - for (s++, i-- ; i>=0 ; i--) + char *s=buf; + const uchar *e=buffer, *b=e+sizeof(buffer)-1; + while (!*b && b>e) + b--; + if ((*s=_dig_vec_upper[*b >> 4]) != '0') + s++; + *s++=_dig_vec_upper[*b & 15]; + while (--b>=e) { - *s++=_dig_vec_upper[buffer[i] >> 4]; - *s++=_dig_vec_upper[buffer[i] & 15]; + *s++=_dig_vec_upper[*b >> 4]; + *s++=_dig_vec_upper[*b & 15]; } *s=0; return buf; @@ -82,7 +88,7 @@ public: if (sizeof(buffer) >= 8) return uint8korr(buffer); DBUG_ASSERT(sizeof(buffer) >= 4); - uint4korr(buffer); + return (ulonglong) uint4korr(buffer); } }; @@ -117,6 +123,7 @@ public: void clear_all() { map=(ulonglong)0; } void intersect(Bitmap<64>& map2) { map&= map2.map; } void intersect(ulonglong map2) { map&= map2; } + void intersect_extended(ulonglong map2) { map&= map2; } void subtract(Bitmap<64>& map2) { map&= ~map2.map; } void merge(Bitmap<64>& map2) { map|= map2.map; } my_bool is_set(uint n) const { return test(map & (((ulonglong)1) << n)); } diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index fc03e03dee7..d06ac7824fd 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -278,7 +277,6 @@ TODO list: - Move MRG_MYISAM table type processing to handlers, something like: tables_used->table->file->register_used_filenames(callback, first_argument); - - Make derived tables cachable. - QC improvement suggested by Monty: - Add a counter in open_table() for how many MERGE (ISAM or MyISAM) tables are cached in the table cache. @@ -311,7 +309,7 @@ TODO list: #include "emb_qcache.h" #endif -#if defined(EXTRA_DEBUG) && !defined(DBUG_OFF) +#if !defined(EXTRA_DBUG) && !defined(DBUG_OFF) #define MUTEX_LOCK(M) { DBUG_PRINT("lock", ("mutex lock 0x%lx", (ulong)(M))); \ pthread_mutex_lock(M);} #define MUTEX_UNLOCK(M) {DBUG_PRINT("lock", ("mutex unlock 0x%lx",\ @@ -528,7 +526,8 @@ void Query_cache_query::init_n_lock() my_rwlock_init(&lock, NULL); lock_writing(); DBUG_PRINT("qcache", ("inited & locked query for block 0x%lx", - ((byte*) this)-ALIGN_SIZE(sizeof(Query_cache_block)))); + (long) (((byte*) this) - + ALIGN_SIZE(sizeof(Query_cache_block))))); DBUG_VOID_RETURN; } @@ -537,7 +536,8 @@ void Query_cache_query::unlock_n_destroy() { DBUG_ENTER("Query_cache_query::unlock_n_destroy"); DBUG_PRINT("qcache", ("destroyed & unlocked query for block 0x%lx", - ((byte*)this)-ALIGN_SIZE(sizeof(Query_cache_block)))); + (long) (((byte*) this) - + ALIGN_SIZE(sizeof(Query_cache_block))))); /* The following call is not needed on system where one can destroy an active semaphore @@ -566,21 +566,62 @@ byte *query_cache_query_get_key(const byte *record, uint *length, *****************************************************************************/ /* + Note on double-check locking (DCL) usage. + + Below, in query_cache_insert(), query_cache_abort() and + query_cache_end_of_result() we use what is called double-check + locking (DCL) for NET::query_cache_query. I.e. we test it first + without a lock, and, if positive, test again under the lock. + + This means that if we see 'NET::query_cache_query == 0' without a + lock we will skip the operation. But this is safe here: when we + started to cache a query, we called Query_cache::store_query(), and + NET::query_cache_query was set to non-zero in this thread (and the + thread always sees results of its memory operations, mutex or not). + If later we see 'NET::query_cache_query == 0' without locking a + mutex, that may only mean that some other thread have reset it by + invalidating the query. Skipping the operation in this case is the + right thing to do, as NET::query_cache_query won't get non-zero for + this query again. + + See also comments in Query_cache::store_query() and + Query_cache::send_result_to_client(). + + NOTE, however, that double-check locking is not applicable in + 'invalidate' functions, as we may erroneously skip invalidation, + because the thread doing invalidation may never see non-zero + NET::query_cache_query. +*/ + + +void query_cache_init_query(NET *net) +{ + /* + It is safe to initialize 'NET::query_cache_query' without a lock + here, because before it will be accessed from different threads it + will be set in this thread under a lock, and access from the same + thread is always safe. + */ + net->query_cache_query= 0; +} + + +/* Insert the packet into the query cache. - This should only be called if net->query_cache_query != 0 */ void query_cache_insert(NET *net, const char *packet, ulong length) { DBUG_ENTER("query_cache_insert"); + /* See the comment on double-check locking usage above. */ + if (net->query_cache_query == 0) + DBUG_VOID_RETURN; + STRUCT_LOCK(&query_cache.structure_guard_mutex); - /* - It is very unlikely that following condition is TRUE (it is possible - only if other thread is resizing cache), so we check it only after guard - mutex lock - */ - if (unlikely(query_cache.query_cache_size == 0)) + + if (unlikely(query_cache.query_cache_size == 0 || + query_cache.flush_in_progress)) { STRUCT_UNLOCK(&query_cache.structure_guard_mutex); DBUG_VOID_RETURN; @@ -615,11 +656,12 @@ void query_cache_insert(NET *net, const char *packet, ulong length) DBUG_VOID_RETURN; } header->result(result); + header->last_pkt_nr= net->pkt_nr; BLOCK_UNLOCK_WR(query_block); + DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0);); } else STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0);); DBUG_VOID_RETURN; } @@ -628,95 +670,95 @@ void query_cache_abort(NET *net) { DBUG_ENTER("query_cache_abort"); - if (net->query_cache_query != 0) // Quick check on unlocked structure + /* See the comment on double-check locking usage above. */ + if (net->query_cache_query == 0) + DBUG_VOID_RETURN; + + STRUCT_LOCK(&query_cache.structure_guard_mutex); + + if (unlikely(query_cache.query_cache_size == 0 || + query_cache.flush_in_progress)) { - STRUCT_LOCK(&query_cache.structure_guard_mutex); - /* - It is very unlikely that following condition is TRUE (it is possible - only if other thread is resizing cache), so we check it only after guard - mutex lock - */ - if (unlikely(query_cache.query_cache_size == 0)) - { - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - DBUG_VOID_RETURN; - } + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + DBUG_VOID_RETURN; + } - Query_cache_block *query_block = ((Query_cache_block*) - net->query_cache_query); - if (query_block) // Test if changed by other thread - { - DUMP(&query_cache); - BLOCK_LOCK_WR(query_block); - // The following call will remove the lock on query_block - query_cache.free_query(query_block); - } - net->query_cache_query=0; + Query_cache_block *query_block= ((Query_cache_block*) + net->query_cache_query); + if (query_block) // Test if changed by other thread + { + DUMP(&query_cache); + BLOCK_LOCK_WR(query_block); + // The following call will remove the lock on query_block + query_cache.free_query(query_block); + net->query_cache_query= 0; DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1);); - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); } + + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + DBUG_VOID_RETURN; } void query_cache_end_of_result(THD *thd) { + Query_cache_block *query_block; DBUG_ENTER("query_cache_end_of_result"); - if (thd->net.query_cache_query != 0) // Quick check on unlocked structure - { + /* See the comment on double-check locking usage above. */ + if (thd->net.query_cache_query == 0) + DBUG_VOID_RETURN; + #ifdef EMBEDDED_LIBRARY - query_cache_insert(&thd->net, (char*)thd, - emb_count_querycache_size(thd)); + query_cache_insert(&thd->net, (char*)thd, + emb_count_querycache_size(thd)); #endif - STRUCT_LOCK(&query_cache.structure_guard_mutex); - /* - It is very unlikely that following condition is TRUE (it is possible - only if other thread is resizing cache), so we check it only after guard - mutex lock - */ - if (unlikely(query_cache.query_cache_size == 0)) - { - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - DBUG_VOID_RETURN; - } - Query_cache_block *query_block = ((Query_cache_block*) - thd->net.query_cache_query); - if (query_block) - { - DUMP(&query_cache); - BLOCK_LOCK_WR(query_block); - Query_cache_query *header = query_block->query(); - Query_cache_block *last_result_block = header->result()->prev; - ulong allign_size = ALIGN_SIZE(last_result_block->used); - ulong len = max(query_cache.min_allocation_unit, allign_size); - if (last_result_block->length >= query_cache.min_allocation_unit + len) - query_cache.split_block(last_result_block,len); - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + STRUCT_LOCK(&query_cache.structure_guard_mutex); + + if (unlikely(query_cache.query_cache_size == 0 || + query_cache.flush_in_progress)) + goto end; + + query_block= ((Query_cache_block*) thd->net.query_cache_query); + if (query_block) + { + DUMP(&query_cache); + BLOCK_LOCK_WR(query_block); + Query_cache_query *header= query_block->query(); + Query_cache_block *last_result_block= header->result()->prev; + ulong allign_size= ALIGN_SIZE(last_result_block->used); + ulong len= max(query_cache.min_allocation_unit, allign_size); + if (last_result_block->length >= query_cache.min_allocation_unit + len) + query_cache.split_block(last_result_block,len); #ifndef DBUG_OFF - if (header->result() == 0) - { - DBUG_PRINT("error", ("end of data whith no result. query '%s'", - header->query())); - query_cache.wreck(__LINE__, ""); - DBUG_VOID_RETURN; - } -#endif - header->found_rows(current_thd->limit_found_rows); - header->result()->type = Query_cache_block::RESULT; - header->writer(0); - BLOCK_UNLOCK_WR(query_block); - } - else + if (header->result() == 0) { - // Cache was flushed or resized and query was deleted => do nothing - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + DBUG_PRINT("error", ("end of data whith no result. query '%s'", + header->query())); + query_cache.wreck(__LINE__, ""); + + /* + We do not need call of BLOCK_UNLOCK_WR(query_block); here because + query_cache.wreck() switched query cache off but left content + untouched for investigation (it is debugging method). + */ + goto end; } - thd->net.query_cache_query=0; - DBUG_EXECUTE("check_querycache",query_cache.check_integrity(0);); +#endif + header->found_rows(current_thd->limit_found_rows); + header->result()->type= Query_cache_block::RESULT; + header->writer(0); + thd->net.query_cache_query= 0; + BLOCK_UNLOCK_WR(query_block); + DBUG_EXECUTE("check_querycache",query_cache.check_integrity(1);); + } + +end: + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); DBUG_VOID_RETURN; } @@ -762,8 +804,7 @@ ulong Query_cache::resize(ulong query_cache_size_arg) query_cache_size_arg)); DBUG_ASSERT(initialized); STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) - free_cache(); + free_cache(); query_cache_size= query_cache_size_arg; ::query_cache_size= init_cache(); STRUCT_UNLOCK(&structure_guard_mutex); @@ -784,7 +825,15 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) TABLE_COUNTER_TYPE local_tables; ulong tot_length; DBUG_ENTER("Query_cache::store_query"); - if (query_cache_size == 0 || thd->locked_tables) + /* + Testing 'query_cache_size' without a lock here is safe: the thing + we may loose is that the query won't be cached, but we save on + mutex locking in the case when query cache is disabled or the + query is uncachable. + + See also a note on double-check locking usage above. + */ + if (thd->locked_tables || query_cache_size == 0) DBUG_VOID_RETURN; uint8 tables_type= 0; @@ -796,10 +845,12 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) Query_cache_query_flags flags; // fill all gaps between fields with 0 to get repeatable key bzero(&flags, QUERY_CACHE_FLAGS_SIZE); - flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ? - 1 : 0); - flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ? - 1 : 0); + flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG); + flags.client_protocol_41= test(thd->client_capabilities & + CLIENT_PROTOCOL_41); + flags.more_results_exists= test(thd->server_status & + SERVER_MORE_RESULTS_EXISTS); + flags.pkt_nr= net->pkt_nr; flags.character_set_client_num= thd->variables.character_set_client->number; flags.character_set_results_num= @@ -812,11 +863,31 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) flags.time_zone= thd->variables.time_zone; flags.sql_mode= thd->variables.sql_mode; flags.max_sort_length= thd->variables.max_sort_length; + flags.lc_time_names= thd->variables.lc_time_names; flags.group_concat_max_len= thd->variables.group_concat_max_len; - flags.lc_time_names= thd->variables.lc_time_names; - STRUCT_LOCK(&structure_guard_mutex); + DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ +CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ +sql mode: 0x%lx, sort len: %lu, conncat len: %lu", + (int)flags.client_long_flag, + (int)flags.client_protocol_41, + (int)flags.more_results_exists, + flags.pkt_nr, + flags.character_set_client_num, + flags.character_set_results_num, + flags.collation_connection_num, + (ulong) flags.limit, + (ulong) flags.time_zone, + flags.sql_mode, + flags.max_sort_length, + flags.group_concat_max_len)); + /* + Make InnoDB to release the adaptive hash index latch before + acquiring the query cache mutex. + */ + ha_release_temporary_latches(thd); - if (query_cache_size == 0) + STRUCT_LOCK(&structure_guard_mutex); + if (query_cache_size == 0 || flush_in_progress) { STRUCT_UNLOCK(&structure_guard_mutex); DBUG_VOID_RETURN; @@ -834,7 +905,7 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) if (thd->db_length) { memcpy(thd->query+thd->query_length+1, thd->db, thd->db_length); - DBUG_PRINT("qcache", ("database : %s length %u", + DBUG_PRINT("qcache", ("database: %s length: %u", thd->db, thd->db_length)); } else @@ -890,11 +961,12 @@ void Query_cache::store_query(THD *thd, TABLE_LIST *tables_used) double_linked_list_simple_include(query_block, &queries_blocks); inserts++; queries_in_cache++; - STRUCT_UNLOCK(&structure_guard_mutex); - net->query_cache_query= (gptr) query_block; header->writer(net); header->tables_type(tables_type); + + STRUCT_UNLOCK(&structure_guard_mutex); + // init_n_lock make query block locked BLOCK_UNLOCK_WR(query_block); } @@ -931,25 +1003,33 @@ end: 0 The query was cached and user was sent the result. -1 The query was cached but we didn't have rights to use it. No error is sent to the client yet. + + NOTE + This method requires that sql points to allocated memory of size: + tot_length= query_length + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE; */ int Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) { + ulonglong engine_data; Query_cache_query *query; Query_cache_block *first_result_block, *result_block; Query_cache_block_table *block_table, *block_table_end; ulong tot_length; Query_cache_query_flags flags; - bool check_tables; DBUG_ENTER("Query_cache::send_result_to_client"); - if (query_cache_size == 0 || thd->locked_tables || - thd->variables.query_cache_type == 0) - goto err; + /* + Testing 'query_cache_size' without a lock here is safe: the thing + we may loose is that the query won't be served from cache, but we + save on mutex locking in the case when query cache is disabled. - /* Check that we haven't forgot to reset the query cache variables */ - DBUG_ASSERT(thd->net.query_cache_query == 0); + See also a note on double-check locking usage above. + */ + if (thd->locked_tables || thd->variables.query_cache_type == 0 || + query_cache_size == 0) + goto err; if (!thd->lex->safe_to_cache_query) { @@ -969,10 +1049,15 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) /* Test if the query is a SELECT (pre-space is removed in dispatch_command) + + First '/' looks like comment before command it is not + frequently appeared in real life, consequently we can + check all such queries, too. */ - if (my_toupper(system_charset_info, sql[i]) != 'S' || - my_toupper(system_charset_info, sql[i + 1]) != 'E' || - my_toupper(system_charset_info, sql[i + 2]) != 'L') + if ((my_toupper(system_charset_info, sql[i]) != 'S' || + my_toupper(system_charset_info, sql[i + 1]) != 'E' || + my_toupper(system_charset_info, sql[i + 2]) != 'L') && + sql[i] != '/') { DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached")); goto err; @@ -980,18 +1065,22 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) } STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size == 0) + if (query_cache_size == 0 || flush_in_progress) { DBUG_PRINT("qcache", ("query cache disabled")); goto err_unlock; } + + /* Check that we haven't forgot to reset the query cache variables */ + DBUG_ASSERT(thd->net.query_cache_query == 0); + Query_cache_block *query_block; tot_length= query_length + thd->db_length + 1 + QUERY_CACHE_FLAGS_SIZE; if (thd->db_length) { memcpy(sql+query_length+1, thd->db, thd->db_length); - DBUG_PRINT("qcache", ("database: '%s' length %u", + DBUG_PRINT("qcache", ("database: '%s' length: %u", thd->db, thd->db_length)); } else @@ -1001,10 +1090,12 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) // fill all gaps between fields with 0 to get repeatable key bzero(&flags, QUERY_CACHE_FLAGS_SIZE); - flags.client_long_flag= (thd->client_capabilities & CLIENT_LONG_FLAG ? - 1 : 0); - flags.client_protocol_41= (thd->client_capabilities & CLIENT_PROTOCOL_41 ? - 1 : 0); + flags.client_long_flag= test(thd->client_capabilities & CLIENT_LONG_FLAG); + flags.client_protocol_41= test(thd->client_capabilities & + CLIENT_PROTOCOL_41); + flags.more_results_exists= test(thd->server_status & + SERVER_MORE_RESULTS_EXISTS); + flags.pkt_nr= thd->net.pkt_nr; flags.character_set_client_num= thd->variables.character_set_client->number; flags.character_set_results_num= (thd->variables.character_set_results ? @@ -1016,7 +1107,22 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) flags.sql_mode= thd->variables.sql_mode; flags.max_sort_length= thd->variables.max_sort_length; flags.group_concat_max_len= thd->variables.group_concat_max_len; - flags.lc_time_names= thd->variables.lc_time_names; + flags.lc_time_names= thd->variables.lc_time_names; + DBUG_PRINT("qcache", ("long %d, 4.1: %d, more results %d, pkt_nr: %d, \ +CS client: %u, CS result: %u, CS conn: %u, limit: %lu, TZ: 0x%lx, \ +sql mode: 0x%lx, sort len: %lu, conncat len: %lu", + (int)flags.client_long_flag, + (int)flags.client_protocol_41, + (int)flags.more_results_exists, + flags.pkt_nr, + flags.character_set_client_num, + flags.character_set_results_num, + flags.collation_connection_num, + (ulong) flags.limit, + (ulong) flags.time_zone, + flags.sql_mode, + flags.max_sort_length, + flags.group_concat_max_len)); memcpy((void *)(sql + (tot_length - QUERY_CACHE_FLAGS_SIZE)), &flags, QUERY_CACHE_FLAGS_SIZE); query_block = (Query_cache_block *) hash_search(&queries, (byte*) sql, @@ -1055,7 +1161,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; } - check_tables= query->tables_type() & HA_CACHE_TBL_ASKTRANSACT; // Check access; block_table= query_block->table(0); block_table_end= block_table+query_block->n_tables; @@ -1063,7 +1168,6 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) { TABLE_LIST table_list; TABLE *tmptable; - Query_cache_table *table = block_table->parent; /* @@ -1074,8 +1178,9 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) */ for (tmptable= thd->temporary_tables; tmptable ; tmptable= tmptable->next) { - if (tmptable->key_length - TMP_TABLE_KEY_EXTRA == table->key_length() && - !memcmp(tmptable->table_cache_key, table->data(), + if (tmptable->s->key_length - TMP_TABLE_KEY_EXTRA == + table->key_length() && + !memcmp(tmptable->s->table_cache_key, table->data(), table->key_length())) { DBUG_PRINT("qcache", @@ -1095,7 +1200,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) bzero((char*) &table_list,sizeof(table_list)); table_list.db = table->db(); - table_list.alias= table_list.real_name= table->table(); + table_list.alias= table_list.table_name= table->table(); #ifndef NO_EMBEDDED_ACCESS_CHECKS if (check_table_access(thd,SELECT_ACL,&table_list,1)) { @@ -1116,19 +1221,30 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) goto err_unlock; // Parse query } #endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ - if (check_tables && !ha_caching_allowed(thd, table->db(), - table->key_length(), - table->type())) + engine_data= table->engine_data(); + if (table->callback() && + !(*table->callback())(thd, table->db(), + table->key_length(), + &engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", table_list.db, table_list.alias)); BLOCK_UNLOCK_RD(query_block); - thd->lex->safe_to_cache_query= 0; // Don't try to cache this + if (engine_data != table->engine_data()) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lu-%lu", + table_list.db, table_list.alias, + (ulong) engine_data, (ulong) table->engine_data())); + invalidate_table((byte *) table->db(), table->key_length()); + } + else + thd->lex->safe_to_cache_query= 0; // Don't try to cache this goto err_unlock; // Parse query } else - DBUG_PRINT("qcache", ("handler allow caching (%d) %s,%s", - check_tables, table_list.db, table_list.alias)); + DBUG_PRINT("qcache", ("handler allow caching %s,%s", + table_list.db, table_list.alias)); } move_to_query_list_end(query_block); hits++; @@ -1140,10 +1256,10 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) #ifndef EMBEDDED_LIBRARY do { - DBUG_PRINT("qcache", ("Results (len %lu, used %lu, headers %lu)", + DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)", result_block->length, result_block->used, - result_block->headers_len()+ - ALIGN_SIZE(sizeof(Query_cache_result)))); + (ulong) (result_block->headers_len()+ + ALIGN_SIZE(sizeof(Query_cache_result))))); Query_cache_result *result = result_block->result(); if (net_real_write(&thd->net, result->data(), @@ -1152,6 +1268,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) ALIGN_SIZE(sizeof(Query_cache_result)))) break; // Client aborted result_block = result_block->next; + thd->net.pkt_nr= query->last_pkt_nr; // Keep packet number updated } while (result_block != first_result_block); #else { @@ -1162,6 +1279,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) #endif /*!EMBEDDED_LIBRARY*/ thd->limit_found_rows = query->found_rows(); + thd->status_var.last_query_cost= 0.0; BLOCK_UNLOCK_RD(query_block); DBUG_RETURN(1); // Result sent to client @@ -1181,51 +1299,49 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used, my_bool using_transactions) { DBUG_ENTER("Query_cache::invalidate (table list)"); - if (query_cache_size > 0) + STRUCT_LOCK(&structure_guard_mutex); + if (query_cache_size > 0 && !flush_in_progress) { - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) - { - DUMP(this); + DUMP(this); - using_transactions = using_transactions && - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - for (; tables_used; tables_used=tables_used->next) - { - DBUG_ASSERT(!using_transactions || tables_used->table!=0); - if (tables_used->derived) - continue; - if (using_transactions && - (tables_used->table->file->table_cache_type() == - HA_CACHE_TBL_TRANSACT)) - /* - Tables_used->table can't be 0 in transaction. - Only 'drop' invalidate not opened table, but 'drop' - force transaction finish. - */ - thd->add_changed_table(tables_used->table); - else - invalidate_table(tables_used); - } + using_transactions= using_transactions && + (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + for (; tables_used; tables_used= tables_used->next_local) + { + DBUG_ASSERT(!using_transactions || tables_used->table!=0); + if (tables_used->derived) + continue; + if (using_transactions && + (tables_used->table->file->table_cache_type() == + HA_CACHE_TBL_TRANSACT)) + /* + Tables_used->table can't be 0 in transaction. + Only 'drop' invalidate not opened table, but 'drop' + force transaction finish. + */ + thd->add_changed_table(tables_used->table); + else + invalidate_table(tables_used); } - STRUCT_UNLOCK(&structure_guard_mutex); } + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; } void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) { DBUG_ENTER("Query_cache::invalidate (changed table list)"); - if (query_cache_size > 0 && tables_used) + if (tables_used) { STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) + if (query_cache_size > 0 && !flush_in_progress) { DUMP(this); - for (; tables_used; tables_used=tables_used->next) + for (; tables_used; tables_used= tables_used->next) { invalidate_table((byte*) tables_used->key, tables_used->key_length); - DBUG_PRINT("qcache", (" db %s, table %s", tables_used->key, + DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key, tables_used->key+ strlen(tables_used->key)+1)); } @@ -1249,15 +1365,16 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used) { DBUG_ENTER("Query_cache::invalidate_locked_for_write"); - if (query_cache_size > 0 && tables_used) + if (tables_used) { STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) + if (query_cache_size > 0 && !flush_in_progress) { DUMP(this); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_local) { - if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE)) + if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE) && + tables_used->table) invalidate_table(tables_used->table); } } @@ -1275,21 +1392,19 @@ void Query_cache::invalidate(THD *thd, TABLE *table, { DBUG_ENTER("Query_cache::invalidate (table)"); - if (query_cache_size > 0) + STRUCT_LOCK(&structure_guard_mutex); + if (query_cache_size > 0 && !flush_in_progress) { - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) - { - using_transactions = using_transactions && - (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - if (using_transactions && - (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT)) - thd->add_changed_table(table); - else - invalidate_table(table); - } - STRUCT_UNLOCK(&structure_guard_mutex); + using_transactions= using_transactions && + (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); + if (using_transactions && + (table->file->table_cache_type() == HA_CACHE_TBL_TRANSACT)) + thd->add_changed_table(table); + else + invalidate_table(table); } + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; } @@ -1298,20 +1413,18 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length, { DBUG_ENTER("Query_cache::invalidate (key)"); - if (query_cache_size > 0) + STRUCT_LOCK(&structure_guard_mutex); + if (query_cache_size > 0 && !flush_in_progress) { - using_transactions = using_transactions && + using_transactions= using_transactions && (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); if (using_transactions) // used for innodb => has_transactions() is TRUE thd->add_changed_table(key, key_length); else - { - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) - invalidate_table((byte*)key, key_length); - STRUCT_UNLOCK(&structure_guard_mutex); - } + invalidate_table((byte*)key, key_length); } + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; } @@ -1322,38 +1435,36 @@ void Query_cache::invalidate(THD *thd, const char *key, uint32 key_length, void Query_cache::invalidate(char *db) { DBUG_ENTER("Query_cache::invalidate (db)"); - if (query_cache_size > 0) + STRUCT_LOCK(&structure_guard_mutex); + if (query_cache_size > 0 && !flush_in_progress) { - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) - { - DUMP(this); + DUMP(this); restart_search: - if (tables_blocks) + if (tables_blocks) + { + Query_cache_block *curr= tables_blocks; + Query_cache_block *next; + do { - Query_cache_block *curr= tables_blocks; - Query_cache_block *next; - do - { - next= curr->next; - if (strcmp(db, (char*)(curr->table()->db())) == 0) - invalidate_table(curr); - /* - invalidate_table can freed block on which point 'next' (if - table of this block used only in queries which was deleted - by invalidate_table). As far as we do not allocate new blocks - and mark all headers of freed blocks as 'FREE' (even if they are - merged with other blocks) we can just test type of block - to be sure that block is not deleted - */ - if (next->type == Query_cache_block::FREE) - goto restart_search; - curr= next; - } while (curr != tables_blocks); - } + next= curr->next; + if (strcmp(db, (char*)(curr->table()->db())) == 0) + invalidate_table(curr); + /* + invalidate_table can freed block on which point 'next' (if + table of this block used only in queries which was deleted + by invalidate_table). As far as we do not allocate new blocks + and mark all headers of freed blocks as 'FREE' (even if they are + merged with other blocks) we can just test type of block + to be sure that block is not deleted + */ + if (next->type == Query_cache_block::FREE) + goto restart_search; + curr= next; + } while (curr != tables_blocks); } - STRUCT_UNLOCK(&structure_guard_mutex); } + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; } @@ -1361,23 +1472,22 @@ void Query_cache::invalidate(char *db) void Query_cache::invalidate_by_MyISAM_filename(const char *filename) { DBUG_ENTER("Query_cache::invalidate_by_MyISAM_filename"); - if (query_cache_size > 0) + + STRUCT_LOCK(&structure_guard_mutex); + if (query_cache_size > 0 && !flush_in_progress) { /* Calculate the key outside the lock to make the lock shorter */ char key[MAX_DBKEY_LENGTH]; uint32 db_length; uint key_length= filename_2_table_key(key, filename, &db_length); - STRUCT_LOCK(&structure_guard_mutex); - if (query_cache_size > 0) // Safety if cache removed - { - Query_cache_block *table_block; - if ((table_block = (Query_cache_block*) hash_search(&tables, - (byte*) key, - key_length))) - invalidate_table(table_block); - } - STRUCT_UNLOCK(&structure_guard_mutex); + Query_cache_block *table_block; + if ((table_block = (Query_cache_block*) hash_search(&tables, + (byte*) key, + key_length))) + invalidate_table(table_block); } + STRUCT_UNLOCK(&structure_guard_mutex); + DBUG_VOID_RETURN; } @@ -1422,7 +1532,12 @@ void Query_cache::destroy() } else { + /* Underlying code expects the lock. */ + STRUCT_LOCK(&structure_guard_mutex); free_cache(); + STRUCT_UNLOCK(&structure_guard_mutex); + + pthread_cond_destroy(&COND_flush_finished); pthread_mutex_destroy(&structure_guard_mutex); initialized = 0; } @@ -1438,6 +1553,8 @@ void Query_cache::init() { DBUG_ENTER("Query_cache::init"); pthread_mutex_init(&structure_guard_mutex,MY_MUTEX_INIT_FAST); + pthread_cond_init(&COND_flush_finished, NULL); + flush_in_progress= FALSE; initialized = 1; DBUG_VOID_RETURN; } @@ -1633,12 +1750,33 @@ void Query_cache::make_disabled() } +/* + free_cache() - free all resources allocated by the cache. + + SYNOPSIS + free_cache() + + DESCRIPTION + This function frees all resources allocated by the cache. You + have to call init_cache() before using the cache again. +*/ + void Query_cache::free_cache() { DBUG_ENTER("Query_cache::free_cache"); if (query_cache_size > 0) - { flush_cache(); + /* + There may be two free_cache() calls in progress, because we + release 'structure_guard_mutex' in flush_cache(). When the second + flush_cache() wakes up from the wait on 'COND_flush_finished', the + first call to free_cache() has done its job. So we have to test + 'query_cache_size > 0' the second time to see if the cache wasn't + reset by other thread, or if it was reset and was re-enabled then. + If the cache was reset, then we have nothing to do here. + */ + if (query_cache_size > 0) + { #ifndef DBUG_OFF if (bins[0].free_blocks == 0) { @@ -1667,17 +1805,57 @@ void Query_cache::free_cache() Free block data *****************************************************************************/ + /* - The following assumes we have a lock on the cache + flush_cache() - flush the cache. + + SYNOPSIS + flush_cache() + + DESCRIPTION + This function will flush cache contents. It assumes we have + 'structure_guard_mutex' locked. The function sets the + flush_in_progress flag and releases the lock, so other threads may + proceed skipping the cache as if it is disabled. Concurrent + flushes are performed in turn. + + After flush_cache() call, the cache is flushed, all the freed + memory is accumulated in bin[0], and the 'structure_guard_mutex' + is locked. However, since we could release the mutex during + execution, the rest of the cache state could have been changed, + and should not be relied on. */ void Query_cache::flush_cache() { + /* + If there is flush in progress, wait for it to finish, and then do + our flush. This is necessary because something could be added to + the cache before we acquire the lock again, and some code (like + Query_cache::free_cache()) depends on the fact that after the + flush the cache is empty. + */ + while (flush_in_progress) + pthread_cond_wait(&COND_flush_finished, &structure_guard_mutex); + + /* + Setting 'flush_in_progress' will prevent other threads from using + the cache while we are in the middle of the flush, and we release + the lock so that other threads won't block. + */ + flush_in_progress= TRUE; + STRUCT_UNLOCK(&structure_guard_mutex); + + my_hash_reset(&queries); while (queries_blocks != 0) { BLOCK_LOCK_WR(queries_blocks); - free_query(queries_blocks); + free_query_internal(queries_blocks); } + + STRUCT_LOCK(&structure_guard_mutex); + flush_in_progress= FALSE; + pthread_cond_signal(&COND_flush_finished); } /* @@ -1723,36 +1901,48 @@ my_bool Query_cache::free_old_query() DBUG_RETURN(1); // Nothing to remove } + /* - Free query from query cache. - query_block must be locked for writing. - This function will remove (and destroy) the lock for the query. + free_query_internal() - free query from query cache. + + SYNOPSIS + free_query_internal() + query_block Query_cache_block representing the query + + DESCRIPTION + This function will remove the query from a cache, and place its + memory blocks to the list of free blocks. 'query_block' must be + locked for writing, this function will release (and destroy) this + lock. + + NOTE + 'query_block' should be removed from 'queries' hash _before_ + calling this method, as the lock will be destroyed here. */ -void Query_cache::free_query(Query_cache_block *query_block) +void Query_cache::free_query_internal(Query_cache_block *query_block) { - DBUG_ENTER("Query_cache::free_query"); + DBUG_ENTER("Query_cache::free_query_internal"); DBUG_PRINT("qcache", ("free query 0x%lx %lu bytes result", (ulong) query_block, query_block->query()->length() )); queries_in_cache--; - hash_delete(&queries,(byte *) query_block); - Query_cache_query *query = query_block->query(); + Query_cache_query *query= query_block->query(); if (query->writer() != 0) { /* Tell MySQL that this query should not be cached anymore */ - query->writer()->query_cache_query = 0; + query->writer()->query_cache_query= 0; query->writer(0); } double_linked_list_exclude(query_block, &queries_blocks); - Query_cache_block_table *table=query_block->table(0); + Query_cache_block_table *table= query_block->table(0); - for (TABLE_COUNTER_TYPE i=0; i < query_block->n_tables; i++) + for (TABLE_COUNTER_TYPE i= 0; i < query_block->n_tables; i++) unlink_table(table++); - Query_cache_block *result_block = query->result(); + Query_cache_block *result_block= query->result(); /* The following is true when query destruction was called and no results @@ -1766,11 +1956,11 @@ void Query_cache::free_query(Query_cache_block *query_block) refused++; inserts--; } - Query_cache_block *block = result_block; + Query_cache_block *block= result_block; do { - Query_cache_block *current = block; - block = block->next; + Query_cache_block *current= block; + block= block->next; free_memory_block(current); } while (block != result_block); } @@ -1787,6 +1977,32 @@ void Query_cache::free_query(Query_cache_block *query_block) DBUG_VOID_RETURN; } + +/* + free_query() - free query from query cache. + + SYNOPSIS + free_query() + query_block Query_cache_block representing the query + + DESCRIPTION + This function will remove 'query_block' from 'queries' hash, and + then call free_query_internal(), which see. +*/ + +void Query_cache::free_query(Query_cache_block *query_block) +{ + DBUG_ENTER("Query_cache::free_query"); + DBUG_PRINT("qcache", ("free query 0x%lx %lu bytes result", + (ulong) query_block, + query_block->query()->length() )); + + hash_delete(&queries,(byte *) query_block); + free_query_internal(query_block); + + DBUG_VOID_RETURN; +} + /***************************************************************************** Query data creation *****************************************************************************/ @@ -1833,7 +2049,7 @@ Query_cache::append_result_data(Query_cache_block **current_block, { DBUG_ENTER("Query_cache::append_result_data"); DBUG_PRINT("qcache", ("append %lu bytes to 0x%lx query", - data_len, query_block)); + data_len, (long) query_block)); if (query_block->query()->add(data_len) > query_cache_limit) { @@ -2057,7 +2273,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block, */ data_len= len - new_block->length; prev_block= new_block; - } while(1); + } while (1); DBUG_RETURN(TRUE); } @@ -2080,7 +2296,7 @@ void Query_cache::invalidate_table(TABLE_LIST *table_list) uint key_length; Query_cache_block *table_block; key_length=(uint) (strmov(strmov(key,table_list->db)+1, - table_list->real_name) -key)+ 1; + table_list->table_name) -key)+ 1; // We don't store temporary tables => no key_length+=4 ... if ((table_block = (Query_cache_block*) @@ -2091,7 +2307,7 @@ void Query_cache::invalidate_table(TABLE_LIST *table_list) void Query_cache::invalidate_table(TABLE *table) { - invalidate_table((byte*) table->table_cache_key, table->key_length); + invalidate_table((byte*) table->s->table_cache_key, table->s->key_length); } void Query_cache::invalidate_table(byte * key, uint32 key_length) @@ -2113,6 +2329,107 @@ void Query_cache::invalidate_table(Query_cache_block *table_block) } } + +/* + Register given table list begining with given position in tables table of + block + + SYNOPSIS + Query_cache::register_tables_from_list + tables_used given table list + counter number current position in table of tables of block + block_table pointer to current position in tables table of block + + RETURN + 0 error + number of next position of table entry in table of tables of block +*/ + +TABLE_COUNTER_TYPE +Query_cache::register_tables_from_list(TABLE_LIST *tables_used, + TABLE_COUNTER_TYPE counter, + Query_cache_block_table *block_table) +{ + TABLE_COUNTER_TYPE n; + DBUG_ENTER("Query_cache::register_tables_from_list"); + for (n= counter; + tables_used; + tables_used= tables_used->next_global, n++, block_table++) + { + if (tables_used->derived && !tables_used->view) + { + DBUG_PRINT("qcache", ("derived table skipped")); + n--; + block_table--; + continue; + } + block_table->n= n; + if (tables_used->view) + { + char key[MAX_DBKEY_LENGTH]; + uint key_length; + DBUG_PRINT("qcache", ("view: %s db: %s", + tables_used->view_name.str, + tables_used->view_db.str)); + key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1, + tables_used->view_name.str) - key) + 1; + /* + There are not callback function for for VIEWs + */ + if (!insert_table(key_length, key, block_table, + tables_used->view_db.length + 1, + HA_CACHE_TBL_NONTRANSACT, 0, 0)) + DBUG_RETURN(0); + /* + We do not need to register view tables here because they are already + present in the global list. + */ + } + else + { + DBUG_PRINT("qcache", + ("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx", + tables_used->table->s->table_name, + tables_used->table->s->table_cache_key, + (ulong) tables_used->table, + tables_used->table->s->key_length, + (ulong) tables_used->table->s->table_cache_key)); + if (!insert_table(tables_used->table->s->key_length, + tables_used->table->s->table_cache_key, block_table, + tables_used->db_length, + tables_used->table->file->table_cache_type(), + tables_used->callback_func, + tables_used->engine_data)) + DBUG_RETURN(0); + + if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM) + { + ha_myisammrg *handler = (ha_myisammrg *) tables_used->table->file; + MYRG_INFO *file = handler->myrg_info(); + for (MYRG_TABLE *table = file->open_tables; + table != file->end_table ; + table++) + { + char key[MAX_DBKEY_LENGTH]; + uint32 db_length; + uint key_length= filename_2_table_key(key, table->table->filename, + &db_length); + (++block_table)->n= ++n; + /* + There are not callback function for for MyISAM, and engine data + */ + if (!insert_table(key_length, key, block_table, + db_length, + tables_used->table->file->table_cache_type(), + 0, 0)) + DBUG_RETURN(0); + } + } + } + } + DBUG_RETURN(n - counter); +} + /* Store all used tables @@ -2134,51 +2451,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, Query_cache_block_table *block_table = block->table(0); - for (n=0; tables_used; tables_used=tables_used->next, n++, block_table++) - { - if (tables_used->derived) - { - DBUG_PRINT("qcache", ("derived table skipped")); - n--; - block_table--; - continue; - } - DBUG_PRINT("qcache", - ("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx", - tables_used->real_name, tables_used->db, - (ulong) tables_used->table, - tables_used->table->key_length, - (ulong) tables_used->table->table_cache_key)); - block_table->n=n; - if (!insert_table(tables_used->table->key_length, - tables_used->table->table_cache_key, block_table, - tables_used->db_length, - tables_used->table->file->table_cache_type())) - break; - - if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) - { - ha_myisammrg *handler = (ha_myisammrg *) tables_used->table->file; - MYRG_INFO *file = handler->myrg_info(); - for (MYRG_TABLE *table = file->open_tables; - table != file->end_table ; - table++) - { - char key[MAX_DBKEY_LENGTH]; - uint32 db_length; - uint key_length= filename_2_table_key(key, table->table->filename, - &db_length); - (++block_table)->n= ++n; - if (!insert_table(key_length, key, block_table, - db_length, - tables_used->table->file->table_cache_type())) - goto err; - } - } - } + n= register_tables_from_list(tables_used, 0, block_table); -err: - if (tables_used) + if (n) { DBUG_PRINT("qcache", ("failed at table %d", (int) n)); /* Unlink the tables we allocated above */ @@ -2187,7 +2462,7 @@ err: tmp++) unlink_table(tmp); } - return (tables_used == 0); + return (n); } /* @@ -2198,7 +2473,9 @@ err: my_bool Query_cache::insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type) + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data) { DBUG_ENTER("Query_cache::insert_table"); DBUG_PRINT("qcache", ("insert table node 0x%lx, len %d", @@ -2208,6 +2485,23 @@ Query_cache::insert_table(uint key_len, char *key, hash_search(&tables, (byte*) key, key_len)); + if (table_block && + table_block->table()->engine_data() != engine_data) + { + DBUG_PRINT("qcache", + ("Handler require invalidation queries of %s.%s %lu-%lu", + table_block->table()->db(), + table_block->table()->table(), + (ulong) engine_data, + (ulong) table_block->table()->engine_data())); + /* + as far as we delete all queries with this table, table block will be + deleted, too + */ + invalidate_table(table_block); + table_block= 0; + } + if (table_block == 0) { DBUG_PRINT("qcache", ("new table block from 0x%lx (%u)", @@ -2238,6 +2532,8 @@ Query_cache::insert_table(uint key_len, char *key, header->table(db + db_length + 1); header->key_length(key_len); header->type(cache_type); + header->callback(callback); + header->engine_data(engine_data); } Query_cache_block_table *list_root = table_block->table(0); @@ -2290,12 +2586,8 @@ Query_cache::allocate_block(ulong len, my_bool not_less, ulong min, if (!under_guard) { STRUCT_LOCK(&structure_guard_mutex); - /* - It is very unlikely that following condition is TRUE (it is possible - only if other thread is resizing cache), so we check it only after - guard mutex lock - */ - if (unlikely(query_cache.query_cache_size == 0)) + + if (unlikely(query_cache.query_cache_size == 0 || flush_in_progress)) { STRUCT_UNLOCK(&structure_guard_mutex); DBUG_RETURN(0); @@ -2676,66 +2968,101 @@ void Query_cache::double_linked_list_join(Query_cache_block *head_tail, *****************************************************************************/ /* - If query is cacheable return number tables in query - (query without tables are not cached) + Collect information about table types, check that tables are cachable and + count them + + SYNOPSIS + process_and_count_tables() + tables_used table list for processing + tables_type pointer to variable for table types collection + + RETURN + 0 error + >0 number of tables */ -TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, - char *query, - LEX *lex, - TABLE_LIST *tables_used, - uint8 *tables_type) +static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used, + uint8 *tables_type) { + DBUG_ENTER("process_and_count_tables"); TABLE_COUNTER_TYPE table_count = 0; - DBUG_ENTER("Query_cache::is_cacheable"); - - if (lex->sql_command == SQLCOM_SELECT && - (thd->variables.query_cache_type == 1 || - (thd->variables.query_cache_type == 2 && (lex->select_lex.options & - OPTION_TO_QUERY_CACHE))) && - lex->safe_to_cache_query) + for (; tables_used; tables_used= tables_used->next_global) { - DBUG_PRINT("qcache", ("options %lx %lx, type %u", - OPTION_TO_QUERY_CACHE, - lex->select_lex.options, - (int) thd->variables.query_cache_type)); - - for (; tables_used; tables_used= tables_used->next) + table_count++; + if (tables_used->view) + { + DBUG_PRINT("qcache", ("view: %s db: %s", + tables_used->view_name.str, + tables_used->view_db.str)); + *tables_type|= HA_CACHE_TBL_NONTRANSACT; + } + else { - table_count++; DBUG_PRINT("qcache", ("table %s, db %s, type %u", - tables_used->real_name, - tables_used->db, tables_used->table->db_type)); + tables_used->table->s->table_name, + tables_used->table->s->table_cache_key, + tables_used->table->s->db_type)); + if (tables_used->derived) + { + table_count--; + DBUG_PRINT("qcache", ("derived table skipped")); + continue; + } *tables_type|= tables_used->table->file->table_cache_type(); /* - table_alias_charset used here because it depends of - lower_case_table_names variable + table_alias_charset used here because it depends of + lower_case_table_names variable */ - if ((tables_used->table->tmp_table != NO_TMP_TABLE && - !tables_used->derived) || - (*tables_type & HA_CACHE_TBL_NOCACHE) || - (tables_used->db_length == 5 && - my_strnncoll(table_alias_charset, (uchar*)tables_used->db, 6, - (uchar*)"mysql",6) == 0)) + if (tables_used->table->s->tmp_table != NO_TMP_TABLE || + (*tables_type & HA_CACHE_TBL_NOCACHE) || + (tables_used->db_length == 5 && + my_strnncoll(table_alias_charset, + (uchar*)tables_used->table->s->table_cache_key, 6, + (uchar*)"mysql",6) == 0)) { - DBUG_PRINT("qcache", - ("select not cacheable: temporary, system or \ -other non-cacheable table(s)")); - DBUG_RETURN(0); - } - if (tables_used->derived) - { - table_count--; - DBUG_PRINT("qcache", ("derived table skipped")); + DBUG_PRINT("qcache", + ("select not cacheable: temporary, system or \ + other non-cacheable table(s)")); + DBUG_RETURN(0); } - else if (tables_used->table->db_type == DB_TYPE_MRG_MYISAM) + if (tables_used->table->s->db_type == DB_TYPE_MRG_MYISAM) { - ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file; - MYRG_INFO *file = handler->myrg_info(); - table_count+= (file->end_table - file->open_tables); + ha_myisammrg *handler = (ha_myisammrg *)tables_used->table->file; + MYRG_INFO *file = handler->myrg_info(); + table_count+= (file->end_table - file->open_tables); } } + } + DBUG_RETURN(table_count); +} + + +/* + If query is cacheable return number tables in query + (query without tables are not cached) +*/ + +TABLE_COUNTER_TYPE +Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, + TABLE_LIST *tables_used, uint8 *tables_type) +{ + TABLE_COUNTER_TYPE table_count; + DBUG_ENTER("Query_cache::is_cacheable"); + + if (lex->sql_command == SQLCOM_SELECT && + (thd->variables.query_cache_type == 1 || + (thd->variables.query_cache_type == 2 && (lex->select_lex.options & + OPTION_TO_QUERY_CACHE))) && + lex->safe_to_cache_query) + { + DBUG_PRINT("qcache", ("options: %lx %lx type: %u", + (long) OPTION_TO_QUERY_CACHE, + (long) lex->select_lex.options, + (int) thd->variables.query_cache_type)); + + if (!(table_count= process_and_count_tables(tables_used, tables_type))) + DBUG_RETURN(0); if ((thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && ((*tables_type)&HA_CACHE_TBL_TRANSACT)) @@ -2748,10 +3075,10 @@ other non-cacheable table(s)")); } DBUG_PRINT("qcache", - ("not interesting query: %d or not cacheable, options %lx %lx, type %u", + ("not interesting query: %d or not cacheable, options %lx %lx type: %u", (int) lex->sql_command, - OPTION_TO_QUERY_CACHE, - lex->select_lex.options, + (long) OPTION_TO_QUERY_CACHE, + (long) lex->select_lex.options, (int) thd->variables.query_cache_type)); DBUG_RETURN(0); } @@ -2773,12 +3100,16 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, { DBUG_ENTER("Query_cache::ask_handler_allowance"); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_global) { - TABLE *table= tables_used->table; - if (!ha_caching_allowed(thd, table->table_cache_key, - table->key_length, - table->file->table_cache_type())) + TABLE *table; + if (!(table= tables_used->table)) + continue; + handler *handler= table->file; + if (!handler->register_query_cache_table(thd, table->s->table_cache_key, + table->s->key_length, + &tables_used->callback_func, + &tables_used->engine_data)) { DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s", tables_used->db, tables_used->alias)); @@ -2797,13 +3128,10 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, void Query_cache::pack_cache() { DBUG_ENTER("Query_cache::pack_cache"); + STRUCT_LOCK(&structure_guard_mutex); - /* - It is very unlikely that following condition is TRUE (it is possible - only if other thread is resizing cache), so we check it only after - guard mutex lock - */ - if (unlikely(query_cache_size == 0)) + + if (unlikely(query_cache_size == 0 || flush_in_progress)) { STRUCT_UNLOCK(&structure_guard_mutex); DBUG_VOID_RETURN; @@ -3118,7 +3446,7 @@ my_bool Query_cache::join_results(ulong join_limit) DBUG_ENTER("Query_cache::join_results"); STRUCT_LOCK(&structure_guard_mutex); - if (queries_blocks != 0) + if (queries_blocks != 0 && !flush_in_progress) { DBUG_ASSERT(query_cache_size > 0); Query_cache_block *block = queries_blocks; @@ -3213,7 +3541,7 @@ uint Query_cache::filename_2_table_key (char *key, const char *path, #if defined(DBUG_OFF) && !defined(USE_QUERY_CACHE_INTEGRITY_CHECK) -void wreck(uint line, const char *message) {} +void wreck(uint line, const char *message) { query_cache_size = 0; } void bins_dump() {} void cache_dump() {} void queries_dump() {} @@ -3225,6 +3553,17 @@ my_bool in_blocks(Query_cache_block * point) { return 0; } #else + +/* + Debug method which switch query cache off but left content for + investigation. + + SYNOPSIS + Query_cache::wreck() + line line of the wreck() call + message message for logging +*/ + void Query_cache::wreck(uint line, const char *message) { THD *thd=current_thd; @@ -3236,7 +3575,7 @@ void Query_cache::wreck(uint line, const char *message) DBUG_PRINT("warning", ("%5d QUERY CACHE WRECK => DISABLED",line)); DBUG_PRINT("warning", ("==================================")); if (thd) - thd->killed = 1; + thd->killed= THD::KILL_CONNECTION; cache_dump(); /* check_integrity(0); */ /* Can't call it here because of locks */ bins_dump(); @@ -3339,7 +3678,8 @@ void Query_cache::queries_dump() DBUG_PRINT("qcache", ("F:%u C:%u L:%lu T:'%s' (%u) '%s' '%s'", flags.client_long_flag, flags.character_set_client_num, - (ulong)flags.limit, flags.time_zone->get_name(), + (ulong)flags.limit, + flags.time_zone->get_name()->ptr(), len, str, strend(str)+1)); DBUG_PRINT("qcache", ("-b- 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx", (ulong) block, (ulong) block->next, (ulong) block->prev, @@ -3405,31 +3745,23 @@ void Query_cache::tables_dump() } -my_bool Query_cache::check_integrity(bool not_locked) +my_bool Query_cache::check_integrity(bool locked) { my_bool result = 0; uint i; DBUG_ENTER("check_integrity"); - if (query_cache_size == 0) + if (!locked) + STRUCT_LOCK(&structure_guard_mutex); + + if (unlikely(query_cache_size == 0 || flush_in_progress)) { + if (!locked) + STRUCT_UNLOCK(&query_cache.structure_guard_mutex); + DBUG_PRINT("qcache", ("Query Cache not initialized")); DBUG_RETURN(0); } - if (!not_locked) - { - STRUCT_LOCK(&structure_guard_mutex); - /* - It is very unlikely that following condition is TRUE (it is possible - only if other thread is resizing cache), so we check it only after - guard mutex lock - */ - if (unlikely(query_cache_size == 0)) - { - STRUCT_UNLOCK(&query_cache.structure_guard_mutex); - DBUG_RETURN(0); - } - } if (hash_check(&queries)) { @@ -3455,8 +3787,8 @@ my_bool Query_cache::check_integrity(bool not_locked) (((long)first_block) % (long)ALIGN_SIZE(1))) { DBUG_PRINT("error", - ("block 0x%lx do not aligned by %d", (ulong) block, - ALIGN_SIZE(1))); + ("block 0x%lx do not aligned by %d", (long) block, + (int) ALIGN_SIZE(1))); result = 1; } // Check memory allocation @@ -3567,9 +3899,8 @@ my_bool Query_cache::check_integrity(bool not_locked) break; } default: - DBUG_PRINT("error", - ("block 0x%lx have incorrect type %u", - block, block->type)); + DBUG_PRINT("error", ("block 0x%lx have incorrect type %u", + (long) block, block->type)); result = 1; } @@ -3667,14 +3998,14 @@ my_bool Query_cache::check_integrity(bool not_locked) } while (block != bins[i].free_blocks); if (count != bins[i].number) { - DBUG_PRINT("error", ("bin[%d].number is %d, but bin have %d blocks", - bins[i].number, count)); + DBUG_PRINT("error", ("bins[%d].number = %d, but bin have %d blocks", + i, bins[i].number, count)); result = 1; } } } DBUG_ASSERT(result == 0); - if (!not_locked) + if (!locked) STRUCT_UNLOCK(&structure_guard_mutex); DBUG_RETURN(result); } diff --git a/sql/sql_cache.h b/sql/sql_cache.h index b0a045a8aad..bc00f7ea629 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2001-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -69,6 +68,7 @@ class Query_cache; struct Query_cache_block_table { + Query_cache_block_table() {} /* Remove gcc warning */ TABLE_COUNTER_TYPE n; // numbr in table (from 0) Query_cache_block_table *next, *prev; Query_cache_table *parent; @@ -78,6 +78,7 @@ struct Query_cache_block_table struct Query_cache_block { + Query_cache_block() {} /* Remove gcc warning */ enum block_type {FREE, QUERY, RESULT, RES_CONT, RES_BEG, RES_INCOMPLETE, TABLE, INCOMPLETE}; @@ -112,6 +113,7 @@ struct Query_cache_query NET *wri; ulong len; uint8 tbls_type; + unsigned int last_pkt_nr; inline void init_n_lock(); void unlock_n_destroy(); @@ -125,7 +127,7 @@ struct Query_cache_query inline void tables_type(uint8 type) { tbls_type= type; } inline ulong length() { return len; } inline ulong add(ulong packet_len) { return(len+= packet_len); } - inline void length(ulong length) { len= length; } + inline void length(ulong length_arg) { len= length_arg; } inline gptr query() { return (gptr)(((byte*)this)+ @@ -142,17 +144,26 @@ struct Query_cache_query struct Query_cache_table { + Query_cache_table() {} /* Remove gcc warning */ char *tbl; uint32 key_len; uint8 table_type; + /* unique for every engine reference */ + qc_engine_callback callback_func; + /* data need by some engines */ + ulonglong engine_data_buff; inline char *db() { return (char *) data(); } inline char *table() { return tbl; } - inline void table(char *table) { tbl= table; } + inline void table(char *table_arg) { tbl= table_arg; } inline uint32 key_length() { return key_len; } inline void key_length(uint32 len) { key_len= len; } inline uint8 type() { return table_type; } inline void type(uint8 t) { table_type= t; } + inline qc_engine_callback callback() { return callback_func; } + inline void callback(qc_engine_callback fn){ callback_func= fn; } + inline ulonglong engine_data() { return engine_data_buff; } + inline void engine_data(ulonglong data_arg){ engine_data_buff= data_arg; } inline gptr data() { return (gptr)(((byte*)this)+ @@ -162,6 +173,7 @@ struct Query_cache_table struct Query_cache_result { + Query_cache_result() {} /* Remove gcc warning */ Query_cache_block *query; inline gptr data() @@ -182,12 +194,12 @@ extern "C" byte *query_cache_table_get_key(const byte *record, uint *length, my_bool not_used); } -void query_cache_insert(NET *thd, const char *packet, ulong length); extern "C" void query_cache_invalidate_by_MyISAM_filename(const char* filename); struct Query_cache_memory_bin { + Query_cache_memory_bin() {} /* Remove gcc warning */ #ifndef DBUG_OFF ulong size; #endif @@ -206,6 +218,7 @@ struct Query_cache_memory_bin struct Query_cache_memory_bin_step { + Query_cache_memory_bin_step() {} /* Remove gcc warning */ ulong size; ulong increment; uint idx; @@ -226,6 +239,12 @@ public: ulong free_memory, queries_in_cache, hits, inserts, refused, free_memory_blocks, total_blocks, lowmem_prunes; +private: + pthread_cond_t COND_flush_finished; + bool flush_in_progress; + + void free_query_internal(Query_cache_block *point); + protected: /* The following mutex is locked when searching or changing global @@ -234,6 +253,12 @@ protected: LOCK SEQUENCE (to prevent deadlocks): 1. structure_guard_mutex 2. query block (for operation inside query (query block/results)) + + Thread doing cache flush releases the mutex once it sets + flush_in_progress flag, so other threads may bypass the cache as + if it is disabled, not waiting for reset to finish. The exception + is other threads that were going to do cache flush---they'll wait + till the end of a flush operation. */ pthread_mutex_t structure_guard_mutex; byte *cache; // cache memory @@ -276,12 +301,18 @@ protected: void invalidate_table(TABLE *table); void invalidate_table(byte *key, uint32 key_length); void invalidate_table(Query_cache_block *table_block); + TABLE_COUNTER_TYPE + register_tables_from_list(TABLE_LIST *tables_used, + TABLE_COUNTER_TYPE counter, + Query_cache_block_table *block_table); my_bool register_all_tables(Query_cache_block *block, TABLE_LIST *tables_used, TABLE_COUNTER_TYPE tables); my_bool insert_table(uint key_len, char *key, Query_cache_block_table *node, - uint32 db_length, uint8 cache_type); + uint32 db_length, uint8 cache_type, + qc_engine_callback callback, + ulonglong engine_data); void unlink_table(Query_cache_block_table *node); Query_cache_block *get_free_block (ulong len, my_bool not_less, ulong min); @@ -337,6 +368,7 @@ protected: If query is cacheable return number tables in query (query without tables not cached) */ + static TABLE_COUNTER_TYPE is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex, TABLE_LIST *tables_used, uint8 *tables_type); @@ -389,13 +421,14 @@ protected: void destroy(); + friend void query_cache_init_query(NET *net); friend void query_cache_insert(NET *net, const char *packet, ulong length); friend void query_cache_end_of_result(THD *thd); friend void query_cache_abort(NET *net); /* The following functions are only used when debugging - We don't protect these with ifndef DEBUG_OFF to not have to recompile + We don't protect these with ifndef DBUG_OFF to not have to recompile everything if we want to add checks of the cache at some places. */ void wreck(uint line, const char *message); @@ -414,6 +447,8 @@ protected: extern Query_cache query_cache; extern TYPELIB query_cache_type_typelib; +void query_cache_init_query(NET *net); +void query_cache_insert(NET *net, const char *packet, ulong length); void query_cache_end_of_result(THD *thd); void query_cache_abort(NET *net); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index b187d29021a..8dea9383f34 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -35,18 +34,24 @@ #endif #include <mysys_err.h> +#include "sp_rcontext.h" +#include "sp_cache.h" + /* The following is used to initialise Table_ident with a internal table name */ char internal_table_name[2]= "*"; +char empty_c_string[1]= {0}; /* used for not defined db */ + +const char * const THD::DEFAULT_WHERE= "field list"; /***************************************************************************** ** Instansiate templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION /* Used templates */ template class List<Key>; template class List_iterator<Key>; @@ -154,26 +159,48 @@ bool foreign_key_prefix(Key *a, Key *b) ** Thread specific functions ****************************************************************************/ -THD::THD() - :user_time(0), global_read_lock(0), is_fatal_error(0), - last_insert_id_used(0), - insert_id_used(0), rand_used(0), time_zone_used(0), - in_lock_tables(0), bootstrap(0) +Open_tables_state::Open_tables_state(ulong version_arg) + :version(version_arg) { - current_arena= this; - host= user= priv_user= db= ip=0; - host_or_ip= "connecting host"; + reset_open_tables_state(); +} + + + +THD::THD() + :Statement(&main_lex, &main_mem_root, CONVENTIONAL_EXECUTION, + /* statement id */ 0), + Open_tables_state(refresh_version), + lock_id(&main_lock_id), + user_time(0), in_sub_stmt(0), global_read_lock(0), is_fatal_error(0), + rand_used(0), time_zone_used(0), + last_insert_id_used(0), last_insert_id_used_bin_log(0), insert_id_used(0), + clear_next_insert_id(0), in_lock_tables(0), bootstrap(0), + derived_tables_processing(FALSE), spcont(NULL) +{ + ulong tmp; + + /* + Pass nominal parameters to init_alloc_root only to ensure that + the destructor works OK in case of an error. The main_mem_root + will be re-initialized in init_for_queries(). + */ + init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); + stmt_arena= this; + thread_stack= 0; + db= 0; + catalog= (char*)"std"; // the only catalog we have for now + main_security_ctx.init(); + security_ctx= &main_security_ctx; locked=some_tables_deleted=no_errors=password= 0; - killed=0; query_start_used= 0; count_cuted_fields= CHECK_FIELD_IGNORE; - db_length= col_access= 0; + killed= NOT_KILLED; + db_length= col_access=0; query_error= tmp_table_used= 0; next_insert_id=last_insert_id=0; - open_tables= temporary_tables= handler_tables= derived_tables= 0; hash_clear(&handler_tables_hash); tmp_table=0; - lock=locked_tables=0; used_tables=0; cuted_fields= sent_row_count= 0L; limit_found_rows= 0; @@ -190,53 +217,54 @@ THD::THD() query_id= 0; warn_id= 0; db_charset= global_system_variables.collation_database; + bzero(ha_data, sizeof(ha_data)); mysys_var=0; + binlog_evt_union.do_union= FALSE; #ifndef DBUG_OFF dbug_sentry=THD_SENTRY_MAGIC; #endif -#ifndef EMBEDDED_LIBRARY +#ifndef EMBEDDED_LIBRARY net.vio=0; #endif - net.last_error[0]=0; // If error on boot client_capabilities= 0; // minimalistic client + net.last_error[0]=0; // If error on boot +#ifdef HAVE_QUERY_CACHE + query_cache_init_query(&net); // If error on boot +#endif ull=0; - system_thread=cleanup_done=0; + system_thread= cleanup_done= abort_on_warning= no_warnings_for_error= 0; peer_port= 0; // For SHOW PROCESSLIST - transaction.changed_tables = 0; #ifdef __WIN__ real_id = 0; #endif #ifdef SIGNAL_WITH_VIO_CLOSE active_vio = 0; -#endif +#endif pthread_mutex_init(&LOCK_delete, MY_MUTEX_INIT_FAST); /* Variables with default values */ proc_info="login"; - where="field list"; + where= THD::DEFAULT_WHERE; server_id = ::server_id; slave_net = 0; command=COM_CONNECT; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - db_access=NO_ACCESS; -#endif - version=refresh_version; // For boot *scramble= '\0'; init(); /* Initialize sub structures */ init_sql_alloc(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE); user_connect=(USER_CONN *)0; - hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, + hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, - (hash_free_key) free_user_var,0); + (hash_free_key) free_user_var, 0); + + sp_proc_cache= NULL; + sp_func_cache= NULL; /* For user vars replication*/ if (opt_bin_log) my_init_dynamic_array(&user_var_events, - sizeof(BINLOG_USER_VAR_EVENT *), - 16, - 16); + sizeof(BINLOG_USER_VAR_EVENT *), 16, 16); else bzero((char*) &user_var_events, sizeof(user_var_events)); @@ -246,27 +274,43 @@ THD::THD() protocol_prep.init(this); tablespace_op=FALSE; -#ifdef USING_TRANSACTIONS - bzero((char*) &transaction,sizeof(transaction)); + tmp= sql_rnd_with_mutex(); + randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::global_query_id); + substitute_null_with_insert_id = FALSE; + thr_lock_info_init(&lock_info); /* safety: will be reset after start */ + thr_lock_owner_init(&main_lock_id, &lock_info); + + m_internal_handler= NULL; +} + + +void THD::push_internal_handler(Internal_error_handler *handler) +{ /* - Binlog is always open (if needed) before a THD is created (including - bootstrap). + TODO: The current implementation is limited to 1 handler at a time only. + THD and sp_rcontext need to be modified to use a common handler stack. */ - if (opt_using_transactions && mysql_bin_log.is_open()) - { - if (open_cached_file(&transaction.trans_log, - mysql_tmpdir, LOG_PREFIX, binlog_cache_size, - MYF(MY_WME))) - killed=1; - transaction.trans_log.end_of_file= max_binlog_cache_size; - } -#endif - init_sql_alloc(&transaction.mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); + DBUG_ASSERT(m_internal_handler == NULL); + m_internal_handler= handler; +} + + +bool THD::handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level) +{ + if (m_internal_handler) { - ulong tmp=sql_rnd_with_mutex(); - randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id); + return m_internal_handler->handle_error(sql_errno, level, this); } - substitute_null_with_insert_id = FALSE; + + return FALSE; // 'FALSE', as per coding style +} + + +void THD::pop_internal_handler() +{ + DBUG_ASSERT(m_internal_handler != NULL); + m_internal_handler= NULL; } @@ -284,11 +328,10 @@ void THD::init(void) variables.date_format); variables.datetime_format= date_time_format_copy((THD*) 0, variables.datetime_format); -#ifdef HAVE_NDBCLUSTER_DB - variables.ndb_use_transactions= 1; -#endif pthread_mutex_unlock(&LOCK_global_system_variables); server_status= SERVER_STATUS_AUTOCOMMIT; + if (variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) + server_status|= SERVER_STATUS_NO_BACKSLASH_ESCAPES; options= thd_startup_options; open_options=ha_open_options; update_lock_default= (variables.low_priority_updates ? @@ -299,6 +342,7 @@ void THD::init(void) bzero((char*) warn_count, sizeof(warn_count)); total_warn_count= 0; update_charset(); + bzero((char *) &status_var, sizeof(status_var)); variables.lc_time_names = &my_locale_en_US; } @@ -311,13 +355,18 @@ void THD::init(void) void THD::init_for_queries() { + set_time(); ha_enable_transaction(this,TRUE); reset_root_defaults(mem_root, variables.query_alloc_block_size, variables.query_prealloc_size); +#ifdef USING_TRANSACTIONS reset_root_defaults(&transaction.mem_root, variables.trans_alloc_block_size, variables.trans_prealloc_size); +#endif + transaction.xid_state.xid.null(); + transaction.xid_state.in_thd=1; } @@ -338,9 +387,11 @@ void THD::change_user(void) cleanup_done= 0; init(); stmt_map.reset(); - hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, + hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, (hash_free_key) free_user_var, 0); + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); } @@ -349,7 +400,16 @@ void THD::change_user(void) void THD::cleanup(void) { DBUG_ENTER("THD::cleanup"); - ha_rollback(this); +#ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE + if (transaction.xid_state.xa_state == XA_PREPARED) + { +#error xid_state in the cache should be replaced by the allocated value + } +#endif + { + ha_rollback(this); + xid_cache_delete(&transaction.xid_state); + } if (locked_tables) { lock=locked_tables; locked_tables=0; @@ -364,6 +424,10 @@ void THD::cleanup(void) my_free((char*) variables.time_format, MYF(MY_ALLOW_ZERO_PTR)); my_free((char*) variables.date_format, MYF(MY_ALLOW_ZERO_PTR)); my_free((char*) variables.datetime_format, MYF(MY_ALLOW_ZERO_PTR)); + + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); + if (global_read_lock) unlock_global_read_lock(this); if (ull) @@ -373,6 +437,7 @@ void THD::cleanup(void) pthread_mutex_unlock(&LOCK_user_locks); ull= 0; } + cleanup_done=1; DBUG_VOID_RETURN; } @@ -385,57 +450,79 @@ THD::~THD() /* Ensure that no one is using THD */ pthread_mutex_lock(&LOCK_delete); pthread_mutex_unlock(&LOCK_delete); + add_to_status(&global_status_var, &status_var); /* Close connection */ -#ifndef EMBEDDED_LIBRARY +#ifndef EMBEDDED_LIBRARY if (net.vio) { vio_delete(net.vio); - net_end(&net); + net_end(&net); } #endif + stmt_map.reset(); /* close all prepared statements */ + DBUG_ASSERT(lock_info.n_cursors == 0); if (!cleanup_done) cleanup(); -#ifdef USING_TRANSACTIONS - if (opt_using_transactions) - { - close_cached_file(&transaction.trans_log); - ha_close_connection(this); - } -#endif - DBUG_PRINT("info", ("freeing host")); - if (host != my_localhost) // If not pointer to constant - safeFree(host); - if (user != delayed_user) - safeFree(user); - safeFree(ip); + ha_close_connection(this); + + DBUG_PRINT("info", ("freeing security context")); + main_security_ctx.destroy(); safeFree(db); free_root(&warn_root,MYF(0)); +#ifdef USING_TRANSACTIONS free_root(&transaction.mem_root,MYF(0)); +#endif mysys_var=0; // Safety (shouldn't be needed) pthread_mutex_destroy(&LOCK_delete); #ifndef DBUG_OFF - dbug_sentry = THD_SENTRY_GONE; + dbug_sentry= THD_SENTRY_GONE; #endif - /* Reset stmt_backup.mem_root to not double-free memory from thd.mem_root */ - clear_alloc_root(&stmt_backup.main_mem_root); + free_root(&main_mem_root, MYF(0)); DBUG_VOID_RETURN; } -void THD::awake(bool prepare_to_die) +/* + Add all status variables to another status variable array + + SYNOPSIS + add_to_status() + to_var add to this array + from_var from this array + + NOTES + This function assumes that all variables are long/ulong. + If this assumption will change, then we have to explictely add + the other variables after the while loop +*/ + +void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var) +{ + ulong *end= (ulong*) ((byte*) to_var + + offsetof(STATUS_VAR, last_system_status_var) + + sizeof(ulong)); + ulong *to= (ulong*) to_var, *from= (ulong*) from_var; + + while (to != end) + *(to++)+= *(from++); +} + + +void THD::awake(THD::killed_state state_to_set) { THD_CHECK_SENTRY(this); safe_mutex_assert_owner(&LOCK_delete); - thr_alarm_kill(real_id); - if (prepare_to_die) - killed = 1; + killed= state_to_set; + if (state_to_set != THD::KILL_QUERY) + { + thr_alarm_kill(real_id); #ifdef SIGNAL_WITH_VIO_CLOSE - else close_active_vio(); #endif + } if (mysys_var) { pthread_mutex_lock(&mysys_var->mutex); @@ -477,6 +564,12 @@ void THD::awake(bool prepare_to_die) bool THD::store_globals() { + /* + Assert that thread_stack is initialized: it's necessary to be able + to track stack overrun. + */ + DBUG_ASSERT(this->thread_stack); + if (my_pthread_setspecific_ptr(THR_THD, this) || my_pthread_setspecific_ptr(THR_MALLOC, &mem_root)) return 1; @@ -487,11 +580,47 @@ bool THD::store_globals() if this is the slave SQL thread. */ variables.pseudo_thread_id= thread_id; + /* + We have to call thr_lock_info_init() again here as THD may have been + created in another thread + */ + thr_lock_info_init(&lock_info); return 0; } /* + Cleanup after query. + + SYNOPSIS + THD::cleanup_after_query() + + DESCRIPTION + This function is used to reset thread data to its default state. + + NOTE + This function is not suitable for setting thread data to some + non-default values, as there is only one replication thread, so + different master threads may overwrite data of each other on + slave. +*/ + +void THD::cleanup_after_query() +{ + last_insert_id_used= FALSE; + if (clear_next_insert_id) + { + clear_next_insert_id= 0; + next_insert_id= 0; + } + /* Free Items that were created during this execution */ + free_items(); + /* Reset where. */ + where= THD::DEFAULT_WHERE; +} + + +/* Convert a string to another character set SYNOPSIS @@ -574,6 +703,9 @@ void THD::update_charset() charset_is_collation_connection= !String::needs_conversion(0,charset(),variables.collation_connection, ¬_used); + charset_is_character_set_filesystem= + !String::needs_conversion(0, charset(), + variables.character_set_filesystem, ¬_used); } @@ -598,7 +730,7 @@ void THD::add_changed_table(TABLE *table) DBUG_ASSERT((options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && table->file->has_transactions()); - add_changed_table(table->table_cache_key, table->key_length); + add_changed_table(table->s->table_cache_key, table->s->key_length); DBUG_VOID_RETURN; } @@ -616,7 +748,7 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %u %u", key_length, (*prev_changed)->key_length)); + ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } else if (cmp == 0) @@ -626,7 +758,7 @@ void THD::add_changed_table(const char *key, long key_length) { list_include(prev_changed, curr, changed_table_dup(key, key_length)); DBUG_PRINT("info", - ("key_length %u %u", key_length, + ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } @@ -638,7 +770,7 @@ void THD::add_changed_table(const char *key, long key_length) } } *prev_changed = changed_table_dup(key, key_length); - DBUG_PRINT("info", ("key_length %u %u", key_length, + DBUG_PRINT("info", ("key_length %ld %u", key_length, (*prev_changed)->key_length)); DBUG_VOID_RETURN; } @@ -652,8 +784,8 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length) if (!new_table) { my_error(EE_OUTOFMEMORY, MYF(ME_BELL), - ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1); - killed= 1; + ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1); + killed= KILL_CONNECTION; return 0; } @@ -682,8 +814,8 @@ int THD::send_explain_fields(select_result *result) item->maybe_null=1; field_list.push_back(item=new Item_empty_string("key", NAME_LEN, cs)); item->maybe_null=1; - field_list.push_back(item=new Item_return_int("key_len",3, - MYSQL_TYPE_LONGLONG)); + field_list.push_back(item=new Item_empty_string("key_len", + NAME_LEN*MAX_KEY)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("ref", NAME_LEN*MAX_REF_PARTS, cs)); @@ -692,7 +824,8 @@ int THD::send_explain_fields(select_result *result) MYSQL_TYPE_LONGLONG)); item->maybe_null= 1; field_list.push_back(new Item_empty_string("Extra", 255, cs)); - return (result->send_fields(field_list,1)); + return (result->send_fields(field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)); } #ifdef SIGNAL_WITH_VIO_CLOSE @@ -726,7 +859,7 @@ struct Item_change_record: public ilink /* Register an item tree tree transformation, performed by the query optimizer. We need a pointer to runtime_memroot because it may be != - thd->mem_root (due to possible set_n_backup_item_arena called for thd). + thd->mem_root (due to possible set_n_backup_active_arena called for thd). */ void THD::nocheck_register_item_tree_change(Item **place, Item *old_value, @@ -741,7 +874,10 @@ void THD::nocheck_register_item_tree_change(Item **place, Item *old_value, void *change_mem= alloc_root(runtime_memroot, sizeof(*change)); if (change_mem == 0) { - fatal_error(); + /* + OOM, thd->fatal_error() is called by the error handler of the + memroot. Just return. + */ return; } change= new (change_mem) Item_change_record; @@ -755,10 +891,13 @@ void THD::rollback_item_tree_changes() { I_List_iterator<Item_change_record> it(change_list); Item_change_record *change; + DBUG_ENTER("rollback_item_tree_changes"); + while ((change= it++)) *change->place= change->old_value; /* We can forget about changes memory: it's allocated in runtime memroot */ change_list.empty(); + DBUG_VOID_RETURN; } @@ -773,7 +912,7 @@ select_result::select_result() void select_result::send_error(uint errcode,const char *err) { - ::send_error(thd, errcode, err); + my_message(errcode, err, MYF(0)); } @@ -782,6 +921,13 @@ void select_result::cleanup() /* do nothing */ } +bool select_result::check_simple_select() const +{ + my_error(ER_SP_BAD_CURSOR_QUERY, MYF(0)); + return TRUE; +} + + static String default_line_term("\n",default_charset_info); static String default_escaped("\\",default_charset_info); static String default_field_term("\t",default_charset_info); @@ -793,13 +939,39 @@ sql_exchange::sql_exchange(char *name,bool flag) enclosed= line_start= &my_empty_string; line_term= &default_line_term; escaped= &default_escaped; + cs= NULL; } -bool select_send::send_fields(List<Item> &list,uint flag) +bool select_send::send_fields(List<Item> &list, uint flags) { - return thd->protocol->send_fields(&list,flag); + bool res; + if (!(res= thd->protocol->send_fields(&list, flags))) + status= 1; + return res; +} + +void select_send::abort() +{ + DBUG_ENTER("select_send::abort"); + if (status && thd->spcont && + thd->spcont->find_handler(thd->net.last_errno, + MYSQL_ERROR::WARN_LEVEL_ERROR)) + { + /* + Executing stored procedure without a handler. + Here we should actually send an error to the client, + but as an error will break a multiple result set, the only thing we + can do for now is to nicely end the current data set and remembering + the error so that the calling routine will abort + */ + thd->net.report_error= 0; + send_eof(); + thd->net.report_error= 1; // Abort SP + } + DBUG_VOID_RETURN; } + /* Send data to client. Returns 0 if ok */ bool select_send::send_data(List<Item> &items) @@ -810,15 +982,12 @@ bool select_send::send_data(List<Item> &items) return 0; } -#ifdef HAVE_INNOBASE_DB /* We may be passing the control from mysqld to the client: release the InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved by thd */ - if (thd->transaction.all.innobase_tid) ha_release_temporary_latches(thd); -#endif List_iterator_fast<Item> li(items); Protocol *protocol= thd->protocol; @@ -842,18 +1011,16 @@ bool select_send::send_data(List<Item> &items) DBUG_RETURN(0); if (!thd->net.report_error) DBUG_RETURN(protocol->write()); + protocol->remove_last_row(); DBUG_RETURN(1); } bool select_send::send_eof() { -#ifdef HAVE_INNOBASE_DB /* We may be passing the control from mysqld to the client: release the InnoDB adaptive hash S-latch to avoid thread deadlocks if it was reserved by thd */ - if (thd->transaction.all.innobase_tid) ha_release_temporary_latches(thd); -#endif /* Unlock tables before sending packet to gain some speed */ if (thd->lock) @@ -864,6 +1031,7 @@ bool select_send::send_eof() if (!thd->net.report_error) { ::send_eof(thd); + status= 0; return 0; } else @@ -877,7 +1045,7 @@ bool select_send::send_eof() void select_to_file::send_error(uint errcode,const char *err) { - ::send_error(thd,errcode,err); + my_message(errcode, err, MYF(0)); if (file > 0) { (void) end_io_cache(&cache); @@ -954,7 +1122,7 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange, IO_CACHE *cache) { File file; - uint option= MY_UNPACK_FILENAME; + uint option= MY_UNPACK_FILENAME | MY_RELATIVE_PATH; #ifdef DONT_ALLOW_FULL_LOAD_DATA_PATHS option|= MY_REPLACE_DIR; // Force use of db directory @@ -967,7 +1135,15 @@ static File create_file(THD *thd, char *path, sql_exchange *exchange, } else (void) fn_format(path, exchange->file_name, mysql_real_data_home, "", option); - + + if (opt_secure_file_priv && + strncmp(opt_secure_file_priv, path, strlen(opt_secure_file_priv))) + { + /* Write only allowed to dir or subdir specified by secure_file_priv */ + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); + return -1; + } + if (!access(path, F_OK)) { my_error(ER_FILE_EXISTS_ERROR, MYF(0), exchange->file_name); @@ -1199,7 +1375,7 @@ bool select_dump::send_data(List<Item> &items) } if (row_count++ > 1) { - my_error(ER_TOO_MANY_ROWS, MYF(0)); + my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0)); goto err; } while ((item=li++)) @@ -1212,7 +1388,7 @@ bool select_dump::send_data(List<Item> &items) } else if (my_b_write(&cache,(byte*) res->ptr(),res->length())) { - my_error(ER_ERROR_ON_WRITE,MYF(0), path, my_errno); + my_error(ER_ERROR_ON_WRITE, MYF(0), path, my_errno); goto err; } } @@ -1288,6 +1464,9 @@ bool select_max_min_finder_subselect::send_data(List<Item> &items) case STRING_RESULT: op= &select_max_min_finder_subselect::cmp_str; break; + case DECIMAL_RESULT: + op= &select_max_min_finder_subselect::cmp_decimal; + break; case ROW_RESULT: // This case should never be choosen DBUG_ASSERT(0); @@ -1303,36 +1482,48 @@ bool select_max_min_finder_subselect::send_data(List<Item> &items) bool select_max_min_finder_subselect::cmp_real() { - Item *maxmin= ((Item_singlerow_subselect *)item)->el(0); - double val1= cache->val(), val2= maxmin->val(); + Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0); + double val1= cache->val_real(), val2= maxmin->val_real(); if (fmax) return (cache->null_value && !maxmin->null_value) || (!cache->null_value && !maxmin->null_value && val1 > val2); - else - return (maxmin->null_value && !cache->null_value) || - (!cache->null_value && !maxmin->null_value && - val1 < val2); + return (maxmin->null_value && !cache->null_value) || + (!cache->null_value && !maxmin->null_value && + val1 < val2); } bool select_max_min_finder_subselect::cmp_int() { - Item *maxmin= ((Item_singlerow_subselect *)item)->el(0); + Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0); longlong val1= cache->val_int(), val2= maxmin->val_int(); if (fmax) return (cache->null_value && !maxmin->null_value) || (!cache->null_value && !maxmin->null_value && val1 > val2); - else - return (maxmin->null_value && !cache->null_value) || + return (maxmin->null_value && !cache->null_value) || + (!cache->null_value && !maxmin->null_value && + val1 < val2); +} + +bool select_max_min_finder_subselect::cmp_decimal() +{ + Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0); + my_decimal cval, *cvalue= cache->val_decimal(&cval); + my_decimal mval, *mvalue= maxmin->val_decimal(&mval); + if (fmax) + return (cache->null_value && !maxmin->null_value) || (!cache->null_value && !maxmin->null_value && - val1 < val2); + my_decimal_cmp(cvalue, mvalue) > 0) ; + return (maxmin->null_value && !cache->null_value) || + (!cache->null_value && !maxmin->null_value && + my_decimal_cmp(cvalue,mvalue) < 0); } bool select_max_min_finder_subselect::cmp_str() { String *val1, *val2, buf1, buf2; - Item *maxmin= ((Item_singlerow_subselect *)item)->el(0); + Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0); /* as far as both operand is Item_cache buf1 & buf2 will not be used, but added for safety @@ -1343,10 +1534,9 @@ bool select_max_min_finder_subselect::cmp_str() return (cache->null_value && !maxmin->null_value) || (!cache->null_value && !maxmin->null_value && sortcmp(val1, val2, cache->collation.collation) > 0) ; - else - return (maxmin->null_value && !cache->null_value) || - (!cache->null_value && !maxmin->null_value && - sortcmp(val1, val2, cache->collation.collation) < 0); + return (maxmin->null_value && !cache->null_value) || + (!cache->null_value && !maxmin->null_value && + sortcmp(val1, val2, cache->collation.collation) < 0); } bool select_exists_subselect::send_data(List<Item> &items) @@ -1374,107 +1564,81 @@ int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u) if (var_list.elements != list.elements) { - my_error(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, MYF(0)); + my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, + ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0)); return 1; - } + } return 0; } -void select_dumpvar::cleanup() +bool select_dumpvar::check_simple_select() const { - row_count= 0; + my_error(ER_SP_BAD_CURSOR_SELECT, MYF(0)); + return TRUE; } -/* - Create arena for already constructed THD. +void select_dumpvar::cleanup() +{ + row_count= 0; +} - SYNOPSYS - Item_arena() - thd - thread for which arena is created - DESCRIPTION - Create arena for already existing THD using its variables as parameters - for memory root initialization. -*/ -Item_arena::Item_arena(THD* thd) - :free_list(0), mem_root(&main_mem_root), - state(INITIALIZED) +Query_arena::Type Query_arena::type() const { - init_sql_alloc(&main_mem_root, - thd->variables.query_alloc_block_size, - thd->variables.query_prealloc_size); + DBUG_ASSERT(0); /* Should never be called */ + return STATEMENT; } -/* - Create arena and optionally initialize memory root. - - SYNOPSYS - Item_arena() - init_mem_root - whenever we need to initialize memory root +void Query_arena::free_items() +{ + Item *next; + DBUG_ENTER("Query_arena::free_items"); + /* This works because items are allocated with sql_alloc() */ + for (; free_list; free_list= next) + { + next= free_list->next; + free_list->delete_self(); + } + /* Postcondition: free_list is 0 */ + DBUG_VOID_RETURN; +} - DESCRIPTION - Create arena and optionally initialize memory root with minimal - possible parameters. - NOTE - We use this constructor when arena is part of THD, but reinitialize - its memory root in THD::init_for_queries() before execution of real - statements. -*/ -Item_arena::Item_arena(bool init_mem_root) - :free_list(0), mem_root(&main_mem_root), - state(CONVENTIONAL_EXECUTION) +void Query_arena::set_query_arena(Query_arena *set) { - if (init_mem_root) - init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); + mem_root= set->mem_root; + free_list= set->free_list; + state= set->state; } -Item_arena::Type Item_arena::type() const +void Query_arena::cleanup_stmt() { - DBUG_ASSERT("Item_arena::type()" == "abstract"); - return STATEMENT; + DBUG_ASSERT("Query_arena::cleanup_stmt()" == "not implemented"); } - /* Statement functions */ -Statement::Statement(THD *thd) - :Item_arena(thd), - id(++thd->statement_id_counter), +Statement::Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg, + enum enum_state state_arg, ulong id_arg) + :Query_arena(mem_root_arg, state_arg), + id(id_arg), set_query_id(1), - allow_sum_func(0), - lex(&main_lex), + lex(lex_arg), query(0), - query_length(0) + query_length(0), + cursor(0) { name.str= NULL; } -/* - This constructor is called when statement is a subobject of THD: - Some variables are initialized in THD::init due to locking problems - This statement object will be used to -*/ -Statement::Statement() - :Item_arena((bool)TRUE), - id(0), - set_query_id(1), - allow_sum_func(0), /* initialized later */ - lex(&main_lex), - query(0), /* these two are set */ - query_length(0) /* in alloc_query() */ -{ -} - - -Item_arena::Type Statement::type() const +Query_arena::Type Statement::type() const { return STATEMENT; } @@ -1484,36 +1648,40 @@ void Statement::set_statement(Statement *stmt) { id= stmt->id; set_query_id= stmt->set_query_id; - allow_sum_func= stmt->allow_sum_func; lex= stmt->lex; query= stmt->query; query_length= stmt->query_length; + cursor= stmt->cursor; } void Statement::set_n_backup_statement(Statement *stmt, Statement *backup) { + DBUG_ENTER("Statement::set_n_backup_statement"); backup->set_statement(this); set_statement(stmt); + DBUG_VOID_RETURN; } void Statement::restore_backup_statement(Statement *stmt, Statement *backup) { + DBUG_ENTER("Statement::restore_backup_statement"); stmt->set_statement(this); set_statement(backup); + DBUG_VOID_RETURN; } void THD::end_statement() { - /* Cleanup SQL processing state to resuse this statement in next query. */ + /* Cleanup SQL processing state to reuse this statement in next query. */ lex_end(lex); delete lex->result; lex->result= 0; - free_items(free_list); - free_list= 0; + /* Note that free_list is freed in cleanup_after_query() */ + /* Don't free mem_root, as mem_root is freed in the end of dispatch_command (once for any command). @@ -1521,43 +1689,34 @@ void THD::end_statement() } -void Item_arena::set_n_backup_item_arena(Item_arena *set, Item_arena *backup) +void THD::set_n_backup_active_arena(Query_arena *set, Query_arena *backup) { - DBUG_ENTER("Item_arena::set_n_backup_item_arena"); - backup->set_item_arena(this); - set_item_arena(set); + DBUG_ENTER("THD::set_n_backup_active_arena"); + DBUG_ASSERT(backup->is_backup_arena == FALSE); + + backup->set_query_arena(this); + set_query_arena(set); +#ifndef DBUG_OFF + backup->is_backup_arena= TRUE; +#endif DBUG_VOID_RETURN; } -void Item_arena::restore_backup_item_arena(Item_arena *set, Item_arena *backup) +void THD::restore_active_arena(Query_arena *set, Query_arena *backup) { - DBUG_ENTER("Item_arena::restore_backup_item_arena"); - set->set_item_arena(this); - set_item_arena(backup); -#ifdef NOT_NEEDED_NOW - /* - Reset backup mem_root to avoid its freeing. - Since Item_arena's mem_root is freed only when it is part of Statement - we need this only if we use some Statement's arena as backup storage. - But we do this only with THD::stmt_backup and this Statement is specially - handled in this respect. So this code is not really needed now. - */ - clear_alloc_root(&backup->mem_root); + DBUG_ENTER("THD::restore_active_arena"); + DBUG_ASSERT(backup->is_backup_arena); + set->set_query_arena(this); + set_query_arena(backup); +#ifndef DBUG_OFF + backup->is_backup_arena= FALSE; #endif DBUG_VOID_RETURN; } -void Item_arena::set_item_arena(Item_arena *set) -{ - mem_root= set->mem_root; - free_list= set->free_list; - state= set->state; -} - Statement::~Statement() { - free_root(&main_mem_root, MYF(0)); } C_MODE_START @@ -1601,6 +1760,7 @@ Statement_map::Statement_map() : NULL,MYF(0)); } + /* Insert a new statement to the thread-local statement map. @@ -1634,20 +1794,10 @@ int Statement_map::insert(THD *thd, Statement *statement) my_error(ER_OUT_OF_RESOURCES, MYF(0)); goto err_st_hash; } - if (statement->name.str) + if (statement->name.str && my_hash_insert(&names_hash, (byte*) statement)) { - /* - If there is a statement with the same name, remove it. It is ok to - remove old and fail to insert new one at the same time. - */ - Statement *old_stmt; - if ((old_stmt= find_by_name(&statement->name))) - erase(old_stmt); - if (my_hash_insert(&names_hash, (byte*) statement)) - { - my_error(ER_OUT_OF_RESOURCES, MYF(0)); - goto err_names_hash; - } + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + goto err_names_hash; } pthread_mutex_lock(&LOCK_prepared_stmt_count); /* @@ -1660,7 +1810,8 @@ int Statement_map::insert(THD *thd, Statement *statement) if (prepared_stmt_count >= max_prepared_stmt_count) { pthread_mutex_unlock(&LOCK_prepared_stmt_count); - my_error(ER_UNKNOWN_ERROR, MYF(0)); + my_error(ER_MAX_PREPARED_STMT_COUNT_REACHED, MYF(0), + max_prepared_stmt_count); goto err_max; } prepared_stmt_count++; @@ -1675,19 +1826,27 @@ err_max: err_names_hash: hash_delete(&st_hash, (byte*) statement); err_st_hash: - send_error(thd); return 1; } +void Statement_map::close_transient_cursors() +{ +#ifdef TO_BE_IMPLEMENTED + Statement *stmt; + while ((stmt= transient_cursor_list.head())) + stmt->close_cursor(); /* deletes itself from the list */ +#endif +} + + void Statement_map::erase(Statement *statement) { if (statement == last_found_statement) last_found_statement= 0; if (statement->name.str) - { hash_delete(&names_hash, (byte *) statement); - } + hash_delete(&st_hash, (byte *) statement); pthread_mutex_lock(&LOCK_prepared_stmt_count); DBUG_ASSERT(prepared_stmt_count > 0); @@ -1720,16 +1879,15 @@ Statement_map::~Statement_map() hash_free(&names_hash); hash_free(&st_hash); - } bool select_dumpvar::send_data(List<Item> &items) { - List_iterator_fast<LEX_STRING> var_li(var_list); + List_iterator_fast<my_var> var_li(var_list); List_iterator<Item> it(items); Item *item; - LEX_STRING *ls; - DBUG_ENTER("send_data"); + my_var *mv; + DBUG_ENTER("select_dumpvar::send_data"); if (unit->offset_limit_cnt) { // using limit offset,count @@ -1738,32 +1896,34 @@ bool select_dumpvar::send_data(List<Item> &items) } if (row_count++) { - my_error(ER_TOO_MANY_ROWS, MYF(0)); + my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0)); DBUG_RETURN(1); } - while ((ls= var_li++) && (item= it++)) + while ((mv= var_li++) && (item= it++)) { - Item_func_set_user_var *suv= new Item_func_set_user_var(*ls, item); - suv->fix_fields(thd, (TABLE_LIST *) thd->lex->select_lex.table_list.first, - 0); - suv->check(); - suv->update(); + if (mv->local) + { + if (thd->spcont->set_variable(thd, mv->offset, &item)) + DBUG_RETURN(1); + } + else + { + Item_func_set_user_var *suv= new Item_func_set_user_var(mv->s, item); + suv->fix_fields(thd, 0); + suv->check(0); + suv->update(); + } } DBUG_RETURN(0); } bool select_dumpvar::send_eof() { - if (row_count) - { - ::send_ok(thd,row_count); - return 0; - } - else - { - my_error(ER_EMPTY_QUERY,MYF(0)); - return 1; - } + if (! row_count) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA)); + ::send_ok(thd,row_count); + return 0; } /**************************************************************************** @@ -1777,5 +1937,300 @@ void TMP_TABLE_PARAM::init() field_count= sum_func_count= func_count= hidden_field_count= 0; group_parts= group_length= group_null_parts= 0; quick_group= 1; + table_charset= 0; + precomputed_group_by= 0; DBUG_VOID_RETURN; } + + +void thd_increment_bytes_sent(ulong length) +{ + THD *thd=current_thd; + if (likely(thd != 0)) + { /* current_thd==0 when close_connection() calls net_send_error() */ + thd->status_var.bytes_sent+= length; + } +} + + +void thd_increment_bytes_received(ulong length) +{ + current_thd->status_var.bytes_received+= length; +} + + +void thd_increment_net_big_packet_count(ulong length) +{ + current_thd->status_var.net_big_packet_count+= length; +} + + +void THD::set_status_var_init() +{ + bzero((char*) &status_var, sizeof(status_var)); +} + + +void Security_context::init() +{ + host= user= priv_user= ip= 0; + host_or_ip= "connecting host"; + priv_host[0]= '\0'; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + db_access= NO_ACCESS; +#endif +} + + +void Security_context::destroy() +{ + // If not pointer to constant + if (host != my_localhost) + safeFree(host); + if (user != delayed_user) + safeFree(user); + safeFree(ip); +} + + +void Security_context::skip_grants() +{ + /* privileges for the user are unknown everything is allowed */ + host_or_ip= (char *)""; + master_access= ~NO_ACCESS; + priv_user= (char *)""; + *priv_host= '\0'; +} + + +/**************************************************************************** + Handling of open and locked tables states. + + This is used when we want to open/lock (and then close) some tables when + we already have a set of tables open and locked. We use these methods for + access to mysql.proc table to find definitions of stored routines. +****************************************************************************/ + +void THD::reset_n_backup_open_tables_state(Open_tables_state *backup) +{ + DBUG_ENTER("reset_n_backup_open_tables_state"); + backup->set_open_tables_state(this); + reset_open_tables_state(); + DBUG_VOID_RETURN; +} + + +void THD::restore_backup_open_tables_state(Open_tables_state *backup) +{ + DBUG_ENTER("restore_backup_open_tables_state"); + /* + Before we will throw away current open tables state we want + to be sure that it was properly cleaned up. + */ + DBUG_ASSERT(open_tables == 0 && temporary_tables == 0 && + handler_tables == 0 && derived_tables == 0 && + lock == 0 && locked_tables == 0 && + prelocked_mode == NON_PRELOCKED); + set_open_tables_state(backup); + DBUG_VOID_RETURN; +} + + + +/**************************************************************************** + Handling of statement states in functions and triggers. + + This is used to ensure that the function/trigger gets a clean state + to work with and does not cause any side effects of the calling statement. + + It also allows most stored functions and triggers to replicate even + if they are used items that would normally be stored in the binary + replication (like last_insert_id() etc...) + + The following things is done + - Disable binary logging for the duration of the statement + - Disable multi-result-sets for the duration of the statement + - Value of last_insert_id() is saved and restored + - Value set by 'SET INSERT_ID=#' is reset and restored + - Value for found_rows() is reset and restored + - examined_row_count is added to the total + - cuted_fields is added to the total + - new savepoint level is created and destroyed + + NOTES: + Seed for random() is saved for the first! usage of RAND() + We reset examined_row_count and cuted_fields and add these to the + result to ensure that if we have a bug that would reset these within + a function, we are not loosing any rows from the main statement. + + We do not reset value of last_insert_id(). +****************************************************************************/ + +void THD::reset_sub_statement_state(Sub_statement_state *backup, + uint new_state) +{ + backup->options= options; + backup->in_sub_stmt= in_sub_stmt; + backup->no_send_ok= net.no_send_ok; + backup->enable_slow_log= enable_slow_log; + backup->last_insert_id= last_insert_id; + backup->next_insert_id= next_insert_id; + backup->current_insert_id= current_insert_id; + backup->insert_id_used= insert_id_used; + backup->last_insert_id_used= last_insert_id_used; + backup->clear_next_insert_id= clear_next_insert_id; + backup->limit_found_rows= limit_found_rows; + backup->examined_row_count= examined_row_count; + backup->sent_row_count= sent_row_count; + backup->cuted_fields= cuted_fields; + backup->client_capabilities= client_capabilities; + backup->savepoints= transaction.savepoints; + + if (!lex->requires_prelocking() || is_update_query(lex->sql_command)) + options&= ~OPTION_BIN_LOG; + + if ((backup->options & OPTION_BIN_LOG) && is_update_query(lex->sql_command)) + mysql_bin_log.start_union_events(this, this->query_id); + + /* Disable result sets */ + client_capabilities &= ~CLIENT_MULTI_RESULTS; + in_sub_stmt|= new_state; + next_insert_id= 0; + insert_id_used= 0; + examined_row_count= 0; + sent_row_count= 0; + cuted_fields= 0; + transaction.savepoints= 0; + + /* Surpress OK packets in case if we will execute statements */ + net.no_send_ok= TRUE; +} + + +void THD::restore_sub_statement_state(Sub_statement_state *backup) +{ + /* + To save resources we want to release savepoints which were created + during execution of function or trigger before leaving their savepoint + level. It is enough to release first savepoint set on this level since + all later savepoints will be released automatically. + */ + if (transaction.savepoints) + { + SAVEPOINT *sv; + for (sv= transaction.savepoints; sv->prev; sv= sv->prev) + {} + /* ha_release_savepoint() never returns error. */ + (void)ha_release_savepoint(this, sv); + } + transaction.savepoints= backup->savepoints; + options= backup->options; + in_sub_stmt= backup->in_sub_stmt; + net.no_send_ok= backup->no_send_ok; + enable_slow_log= backup->enable_slow_log; + last_insert_id= backup->last_insert_id; + next_insert_id= backup->next_insert_id; + current_insert_id= backup->current_insert_id; + insert_id_used= backup->insert_id_used; + last_insert_id_used= backup->last_insert_id_used; + clear_next_insert_id= backup->clear_next_insert_id; + limit_found_rows= backup->limit_found_rows; + sent_row_count= backup->sent_row_count; + client_capabilities= backup->client_capabilities; + + if ((options & OPTION_BIN_LOG) && is_update_query(lex->sql_command)) + mysql_bin_log.stop_union_events(this); + + /* + The following is added to the old values as we are interested in the + total complexity of the query + */ + examined_row_count+= backup->examined_row_count; + cuted_fields+= backup->cuted_fields; +} + + +/*************************************************************************** + Handling of XA id cacheing +***************************************************************************/ + +pthread_mutex_t LOCK_xid_cache; +HASH xid_cache; + +static byte *xid_get_hash_key(const byte *ptr,uint *length, + my_bool not_used __attribute__((unused))) +{ + *length=((XID_STATE*)ptr)->xid.key_length(); + return ((XID_STATE*)ptr)->xid.key(); +} + +static void xid_free_hash (void *ptr) +{ + if (!((XID_STATE*)ptr)->in_thd) + my_free((gptr)ptr, MYF(0)); +} + +bool xid_cache_init() +{ + pthread_mutex_init(&LOCK_xid_cache, MY_MUTEX_INIT_FAST); + return hash_init(&xid_cache, &my_charset_bin, 100, 0, 0, + xid_get_hash_key, xid_free_hash, 0) != 0; +} + +void xid_cache_free() +{ + if (hash_inited(&xid_cache)) + { + hash_free(&xid_cache); + pthread_mutex_destroy(&LOCK_xid_cache); + } +} + +XID_STATE *xid_cache_search(XID *xid) +{ + pthread_mutex_lock(&LOCK_xid_cache); + XID_STATE *res=(XID_STATE *)hash_search(&xid_cache, xid->key(), xid->key_length()); + pthread_mutex_unlock(&LOCK_xid_cache); + return res; +} + + +bool xid_cache_insert(XID *xid, enum xa_states xa_state) +{ + XID_STATE *xs; + my_bool res; + pthread_mutex_lock(&LOCK_xid_cache); + if (hash_search(&xid_cache, xid->key(), xid->key_length())) + res=0; + else if (!(xs=(XID_STATE *)my_malloc(sizeof(*xs), MYF(MY_WME)))) + res=1; + else + { + xs->xa_state=xa_state; + xs->xid.set(xid); + xs->in_thd=0; + res=my_hash_insert(&xid_cache, (byte*)xs); + } + pthread_mutex_unlock(&LOCK_xid_cache); + return res; +} + + +bool xid_cache_insert(XID_STATE *xid_state) +{ + pthread_mutex_lock(&LOCK_xid_cache); + DBUG_ASSERT(hash_search(&xid_cache, xid_state->xid.key(), + xid_state->xid.key_length())==0); + my_bool res=my_hash_insert(&xid_cache, (byte*)xid_state); + pthread_mutex_unlock(&LOCK_xid_cache); + return res; +} + + +void xid_cache_delete(XID_STATE *xid_state) +{ + pthread_mutex_lock(&LOCK_xid_cache); + hash_delete(&xid_cache, (byte *)xid_state); + pthread_mutex_unlock(&LOCK_xid_cache); +} + diff --git a/sql/sql_class.h b/sql/sql_class.h index db6f65cab55..99803802001 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -26,6 +25,9 @@ class Query_log_event; class Load_log_event; class Slave_log_event; +class Format_description_log_event; +class sp_rcontext; +class sp_cache; enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME }; @@ -38,6 +40,100 @@ enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, CHECK_FIELD_ERROR_FOR_NULL }; extern char internal_table_name[2]; +extern char empty_c_string[1]; +extern const char **errmesg; + +#define TC_LOG_PAGE_SIZE 8192 +#define TC_LOG_MIN_SIZE (3*TC_LOG_PAGE_SIZE) + +#define TC_HEURISTIC_RECOVER_COMMIT 1 +#define TC_HEURISTIC_RECOVER_ROLLBACK 2 +extern uint tc_heuristic_recover; + +/* + Transaction Coordinator log - a base abstract class + for two different implementations +*/ +class TC_LOG +{ + public: + int using_heuristic_recover(); + TC_LOG() {} + virtual ~TC_LOG() {} + + virtual int open(const char *opt_name)=0; + virtual void close()=0; + virtual int log_xid(THD *thd, my_xid xid)=0; + virtual void unlog(ulong cookie, my_xid xid)=0; +}; + +class TC_LOG_DUMMY: public TC_LOG // use it to disable the logging +{ +public: + TC_LOG_DUMMY() {} /* Remove gcc warning */ + int open(const char *opt_name) { return 0; } + void close() { } + int log_xid(THD *thd, my_xid xid) { return 1; } + void unlog(ulong cookie, my_xid xid) { } +}; + +#ifdef HAVE_MMAP +class TC_LOG_MMAP: public TC_LOG +{ + public: // only to keep Sun Forte on sol9x86 happy + typedef enum { + POOL, // page is in pool + ERROR, // last sync failed + DIRTY // new xids added since last sync + } PAGE_STATE; + + private: + typedef struct st_page { + struct st_page *next; // page a linked in a fifo queue + my_xid *start, *end; // usable area of a page + my_xid *ptr; // next xid will be written here + int size, free; // max and current number of free xid slots on the page + int waiters; // number of waiters on condition + PAGE_STATE state; // see above + pthread_mutex_t lock; // to access page data or control structure + pthread_cond_t cond; // to wait for a sync + } PAGE; + + char logname[FN_REFLEN]; + File fd; + my_off_t file_length; + uint npages, inited; + uchar *data; + struct st_page *pages, *syncing, *active, *pool, *pool_last; + /* + note that, e.g. LOCK_active is only used to protect + 'active' pointer, to protect the content of the active page + one has to use active->lock. + Same for LOCK_pool and LOCK_sync + */ + pthread_mutex_t LOCK_active, LOCK_pool, LOCK_sync; + pthread_cond_t COND_pool, COND_active; + + public: + TC_LOG_MMAP(): inited(0) {} + int open(const char *opt_name); + void close(); + int log_xid(THD *thd, my_xid xid); + void unlog(ulong cookie, my_xid xid); + int recover(); + + private: + void get_active_from_pool(); + int sync(); + int overflow(); +}; +#else +#define TC_LOG_MMAP TC_LOG_DUMMY +#endif + +extern TC_LOG *tc_log; +extern TC_LOG_MMAP tc_log_mmap; +extern TC_LOG_DUMMY tc_log_dummy; /* log info errors */ #define LOG_INFO_EOF -1 @@ -75,13 +171,28 @@ typedef struct st_user_var_events uint charset_number; } BINLOG_USER_VAR_EVENT; +#define RP_LOCK_LOG_IS_ALREADY_LOCKED 1 +#define RP_FORCE_ROTATE 2 + class Log_event; -class MYSQL_LOG - { +/* + TODO split MYSQL_LOG into base MYSQL_LOG and + MYSQL_QUERY_LOG, MYSQL_SLOW_LOG, MYSQL_BIN_LOG + most of the code from MYSQL_LOG should be in the MYSQL_BIN_LOG + only (TC_LOG included) + + TODO use mmap instead of IO_CACHE for binlog + (mmap+fsync is two times faster than write+fsync) +*/ + +class MYSQL_LOG: public TC_LOG +{ private: /* LOCK_log and LOCK_index are inited by init_pthread_objects() */ pthread_mutex_t LOCK_log, LOCK_index; + pthread_mutex_t LOCK_prep_xids; + pthread_cond_t COND_prep_xids; pthread_cond_t update_cond; ulonglong bytes_written; time_t last_time,query_start; @@ -90,15 +201,7 @@ class MYSQL_LOG char *name; char time_buff[20],db[NAME_LEN+1]; char log_file_name[FN_REFLEN],index_file_name[FN_REFLEN]; - // current file sequence number for load data infile binary logging - uint file_id; - uint open_count; // For replication - volatile enum_log_type log_type; - enum cache_type io_cache_type; - bool write_error, inited; - bool need_start_event; - bool no_auto_events; // For relay binlog - /* + /* The max size before rotation (usable only if log_type == LOG_BIN: binary logs and relay logs). For a binlog, max_size should be max_binlog_size. @@ -106,14 +209,52 @@ class MYSQL_LOG max_binlog_size otherwise. max_size is set in init(), and dynamically changed (when one does SET GLOBAL MAX_BINLOG_SIZE|MAX_RELAY_LOG_SIZE) by fix_max_binlog_size and - fix_max_relay_log_size). + fix_max_relay_log_size). */ ulong max_size; + ulong prepared_xids; /* for tc log - number of xids to remember */ + volatile enum_log_type log_type; + enum cache_type io_cache_type; + // current file sequence number for load data infile binary logging + uint file_id; + uint open_count; // For replication + int readers_count; + bool write_error, inited; + bool need_start_event; + /* + no_auto_events means we don't want any of these automatic events : + Start/Rotate/Stop. That is, in 4.x when we rotate a relay log, we don't + want a Rotate_log event to be written to the relay log. When we start a + relay log etc. So in 4.x this is 1 for relay logs, 0 for binlogs. + In 5.0 it's 0 for relay logs too! + */ + bool no_auto_events; friend class Log_event; public: + /* + These describe the log's format. This is used only for relay logs. + _for_exec is used by the SQL thread, _for_queue by the I/O thread. It's + necessary to have 2 distinct objects, because the I/O thread may be reading + events in a different format from what the SQL thread is reading (consider + the case of a master which has been upgraded from 5.0 to 5.1 without doing + RESET MASTER, or from 4.x to 5.0). + */ + Format_description_log_event *description_event_for_exec, + *description_event_for_queue; + MYSQL_LOG(); - ~MYSQL_LOG(); + /* + note that there's no destructor ~MYSQL_LOG() ! + The reason is that we don't want it to be automatically called + on exit() - but only during the correct shutdown process + */ + + int open(const char *opt_name); + void close(); + int log_xid(THD *thd, my_xid xid); + void unlog(ulong cookie, my_xid xid); + int recover(IO_CACHE *log, Format_description_log_event *fdle); void reset_bytes_written() { bytes_written = 0; @@ -122,7 +263,7 @@ public: { #ifndef DBUG_OFF char buf1[22],buf2[22]; -#endif +#endif DBUG_ENTER("harvest_bytes_written"); (*counter)+=bytes_written; DBUG_PRINT("info",("counter: %s bytes_written: %s", llstr(*counter,buf1), @@ -139,17 +280,40 @@ public: bool no_auto_events_arg, ulong max_size); void init_pthread_objects(); void cleanup(); - bool open(const char *log_name,enum_log_type log_type, - const char *new_name, const char *index_file_name_arg, + bool open(const char *log_name, + enum_log_type log_type, + const char *new_name, enum cache_type io_cache_type_arg, - bool no_auto_events_arg, ulong max_size); - void new_file(bool need_lock= 1); + bool no_auto_events_arg, ulong max_size, + bool null_created); + const char *generate_name(const char *log_name, const char *suffix, + bool strip_ext, char *buff); + /* simplified open_xxx wrappers for the gigantic open above */ + bool open_query_log(const char *log_name) + { + char buf[FN_REFLEN]; + return open(generate_name(log_name, ".log", 0, buf), + LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0); + } + bool open_slow_log(const char *log_name) + { + char buf[FN_REFLEN]; + return open(generate_name(log_name, "-slow.log", 0, buf), + LOG_NORMAL, 0, WRITE_CACHE, 0, 0, 0); + } + bool open_index_file(const char *index_file_name_arg, + const char *log_name); + void new_file(bool need_lock); bool write(THD *thd, enum enum_server_command command, const char *format, ...) ATTRIBUTE_FORMAT(printf, 4, 5); bool write(THD *thd, const char *query, uint query_length, time_t query_start=0); bool write(Log_event* event_info); // binary log write - bool write(THD *thd, IO_CACHE *cache, bool commit_or_rollback); + bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event); + + void start_union_events(THD *thd, query_id_t query_id_param); + void stop_union_events(THD *thd); + bool is_query_in_union(THD *thd, query_id_t query_id_param); /* v stands for vector @@ -157,20 +321,20 @@ public: */ bool appendv(const char* buf,uint len,...); bool append(Log_event* ev); - + int generate_new_name(char *new_name,const char *old_name); void make_log_name(char* buf, const char* log_ident); bool is_active(const char* log_file_name); int update_log_index(LOG_INFO* linfo, bool need_update_threads); - int purge_logs(const char *to_log, bool included, + void rotate_and_purge(uint flags); + bool flush_and_sync(); + int purge_logs(const char *to_log, bool included, bool need_mutex, bool need_update_threads, ulonglong *decrease_log_space); int purge_logs_before_date(time_t purge_time); - int purge_first_log(struct st_relay_log_info* rli, bool included); + int purge_first_log(struct st_relay_log_info* rli, bool included); bool reset_logs(THD* thd); void close(uint exiting); - bool cut_spurious_tail(); - void report_pos_in_innodb(); // iterating through the log index file int find_log_pos(LOG_INFO* linfo, const char* log_name, @@ -182,6 +346,7 @@ public: inline bool is_open() { return log_type != LOG_CLOSED; } inline char* get_index_fname() { return index_file_name;} inline char* get_log_fname() { return log_file_name; } + inline char* get_name() { return name; } inline pthread_mutex_t* get_log_lock() { return &LOCK_log; } inline IO_CACHE* get_log_file() { return &log_file; } @@ -191,21 +356,33 @@ public: inline uint32 get_open_count() { return open_count; } }; -/* character conversion tables */ - - +/* + The COPY_INFO structure is used by INSERT/REPLACE code. + The schema of the row counting by the INSERT/INSERT ... ON DUPLICATE KEY + UPDATE code: + If a row is inserted then the copied variable is incremented. + If a row is updated by the INSERT ... ON DUPLICATE KEY UPDATE and the + new data differs from the old one then the copied and the updated + variables are incremented. + The touched variable is incremented if a row was touched by the update part + of the INSERT ... ON DUPLICATE KEY UPDATE no matter whether the row + was actually changed or not. +*/ typedef struct st_copy_info { - ha_rows records; - ha_rows deleted; - ha_rows updated; - ha_rows copied; + ha_rows records; /* Number of processed records */ + ha_rows deleted; /* Number of deleted records */ + ha_rows updated; /* Number of updated records */ + ha_rows copied; /* Number of copied records */ ha_rows error_count; + ha_rows touched; /* Number of touched records */ enum enum_duplicates handle_duplicates; int escape_char, last_errno; bool ignore; /* for INSERT ... UPDATE */ List<Item> *update_fields; List<Item> *update_values; + /* for VIEW ... WITH CHECK OPTION */ + TABLE_LIST *view; } COPY_INFO; @@ -317,27 +494,6 @@ public: }; -class MYSQL_ERROR: public Sql_alloc -{ -public: - enum enum_warning_level - { WARN_LEVEL_NOTE, WARN_LEVEL_WARN, WARN_LEVEL_ERROR, WARN_LEVEL_END}; - - uint code; - enum_warning_level level; - char *msg; - - MYSQL_ERROR(THD *thd, uint code_arg, enum_warning_level level_arg, - const char *msg_arg) - :code(code_arg), level(level_arg) - { - if (msg_arg) - set_msg(thd, msg_arg); - } - void set_msg(THD *thd, const char *msg_arg); -}; - - class delayed_insert; class select_result; @@ -350,18 +506,21 @@ struct system_variables { ulonglong myisam_max_extra_sort_file_size; ulonglong myisam_max_sort_file_size; + ulonglong max_heap_table_size; + ulonglong tmp_table_size; ha_rows select_limit; ha_rows max_join_size; + ulong auto_increment_increment, auto_increment_offset; ulong bulk_insert_buff_size; ulong join_buff_size; ulong long_query_time; ulong max_allowed_packet; ulong max_error_count; - ulong max_heap_table_size; ulong max_length_for_sort_data; ulong max_sort_length; ulong max_tmp_tables; ulong max_insert_delayed_threads; + ulong multi_range_count; ulong myisam_repair_threads; ulong myisam_sort_buff_size; ulong myisam_stats_method; @@ -371,16 +530,22 @@ struct system_variables ulong net_retry_count; ulong net_wait_timeout; ulong net_write_timeout; + ulong optimizer_prune_level; + ulong optimizer_search_depth; ulong preload_buff_size; ulong query_cache_type; ulong read_buff_size; ulong read_rnd_buff_size; + ulong div_precincrement; ulong sortbuff_size; ulong table_type; - ulong tmp_table_size; ulong tx_isolation; + ulong completion_type; /* Determines which non-standard SQL behaviour should be enabled */ ulong sql_mode; + ulong max_sp_recursion_depth; + /* check of key presence in updatable view */ + ulong updatable_views_with_limit; ulong default_week_format; ulong max_seeks_for_key; ulong range_alloc_block_size; @@ -399,13 +564,11 @@ struct system_variables my_bool low_priority_updates; my_bool new_mode; my_bool query_cache_wlock_invalidate; -#ifdef HAVE_REPLICATION - ulong sync_replication; - ulong sync_replication_slave_id; - ulong sync_replication_timeout; -#endif /* HAVE_REPLICATION */ + my_bool engine_condition_pushdown; + #ifdef HAVE_INNOBASE_DB my_bool innodb_table_locks; + my_bool innodb_support_xa; #endif /* HAVE_INNOBASE_DB */ #ifdef HAVE_NDBCLUSTER_DB ulong ndb_autoincrement_prefetch_sz; @@ -414,11 +577,12 @@ struct system_variables my_bool ndb_use_transactions; #endif /* HAVE_NDBCLUSTER_DB */ my_bool old_passwords; - + /* Only charset part of these variables is sensible */ - CHARSET_INFO *character_set_client; + CHARSET_INFO *character_set_filesystem; + CHARSET_INFO *character_set_client; CHARSET_INFO *character_set_results; - + /* Both charset and collation parts of these variables are important */ CHARSET_INFO *collation_server; CHARSET_INFO *collation_database; @@ -433,12 +597,94 @@ struct system_variables DATE_TIME_FORMAT *date_format; DATE_TIME_FORMAT *datetime_format; DATE_TIME_FORMAT *time_format; + my_bool sysdate_is_now; }; + +/* per thread status variables */ + +typedef struct system_status_var +{ + ulong bytes_received; + ulong bytes_sent; + ulong com_other; + ulong com_stat[(uint) SQLCOM_END]; + ulong created_tmp_disk_tables; + ulong created_tmp_tables; + ulong ha_commit_count; + ulong ha_delete_count; + ulong ha_read_first_count; + ulong ha_read_last_count; + ulong ha_read_key_count; + ulong ha_read_next_count; + ulong ha_read_prev_count; + ulong ha_read_rnd_count; + ulong ha_read_rnd_next_count; + ulong ha_rollback_count; + ulong ha_update_count; + ulong ha_write_count; + ulong ha_prepare_count; + ulong ha_discover_count; + ulong ha_savepoint_count; + ulong ha_savepoint_rollback_count; + + /* KEY_CACHE parts. These are copies of the original */ + ulong key_blocks_changed; + ulong key_blocks_used; + ulong key_cache_r_requests; + ulong key_cache_read; + ulong key_cache_w_requests; + ulong key_cache_write; + /* END OF KEY_CACHE parts */ + + ulong net_big_packet_count; + ulong opened_tables; + ulong select_full_join_count; + ulong select_full_range_join_count; + ulong select_range_count; + ulong select_range_check_count; + ulong select_scan_count; + ulong long_query_count; + ulong filesort_merge_passes; + ulong filesort_range_count; + ulong filesort_rows; + ulong filesort_scan_count; + /* Prepared statements and binary protocol */ + ulong com_stmt_prepare; + ulong com_stmt_execute; + ulong com_stmt_send_long_data; + ulong com_stmt_fetch; + ulong com_stmt_reset; + ulong com_stmt_close; + + /* + Status variables which it does not make sense to add to + global status variable counter + */ + double last_query_cost; +} STATUS_VAR; + +/* + This is used for 'SHOW STATUS'. It must be updated to the last ulong + variable in system_status_var which is makes sens to add to the global + counter +*/ + +#define last_system_status_var com_stmt_close + + void free_tmp_table(THD *thd, TABLE *entry); -class Item_arena +/* The following macro is to make init of Query_arena simpler */ +#ifndef DBUG_OFF +#define INIT_ARENA_DBUG_INFO is_backup_arena= 0 +#else +#define INIT_ARENA_DBUG_INFO +#endif + + +class Query_arena { public: /* @@ -446,14 +692,16 @@ public: itself to the list on creation (see Item::Item() for details)) */ Item *free_list; - MEM_ROOT main_mem_root; MEM_ROOT *mem_root; // Pointer to current memroot - enum enum_state +#ifndef DBUG_OFF + bool is_backup_arena; /* True if this arena is used for backup. */ +#endif + enum enum_state { - INITIALIZED= 0, PREPARED= 1, EXECUTED= 3, CONVENTIONAL_EXECUTION= 2, - ERROR= -1 + INITIALIZED= 0, INITIALIZED_FOR_SP= 1, PREPARED= 2, + CONVENTIONAL_EXECUTION= 3, EXECUTED= 4, ERROR= -1 }; - + enum_state state; /* We build without RTTI, so dynamic_cast can't be used. */ @@ -462,30 +710,29 @@ public: STATEMENT, PREPARED_STATEMENT, STORED_PROCEDURE }; + Query_arena(MEM_ROOT *mem_root_arg, enum enum_state state_arg) : + free_list(0), mem_root(mem_root_arg), state(state_arg) + { INIT_ARENA_DBUG_INFO; } /* - This constructor is used only when Item_arena is created as - backup storage for another instance of Item_arena. + This constructor is used only when Query_arena is created as + backup storage for another instance of Query_arena. */ - Item_arena() {}; - /* - Create arena for already constructed THD using its variables as - parameters for memory root initialization. - */ - Item_arena(THD *thd); - /* - Create arena and optionally init memory root with minimal values. - Particularly used if Item_arena is part of Statement. - */ - Item_arena(bool init_mem_root); + Query_arena() { INIT_ARENA_DBUG_INFO; } + virtual Type type() const; - virtual ~Item_arena() {}; + virtual ~Query_arena() {}; - inline bool is_stmt_prepare() const { return (int)state < (int)PREPARED; } + inline bool is_stmt_prepare() const { return state == INITIALIZED; } + inline bool is_first_sp_execute() const + { return state == INITIALIZED_FOR_SP; } + inline bool is_stmt_prepare_or_first_sp_execute() const + { return (int)state < (int)PREPARED; } inline bool is_first_stmt_execute() const { return state == PREPARED; } inline bool is_stmt_execute() const { return state == PREPARED || state == EXECUTED; } - inline bool is_conventional_execution() const + inline bool is_conventional() const { return state == CONVENTIONAL_EXECUTION; } + inline gptr alloc(unsigned int size) { return alloc_root(mem_root,size); } inline gptr calloc(unsigned int size) { @@ -508,34 +755,37 @@ public: return ptr; } - void set_n_backup_item_arena(Item_arena *set, Item_arena *backup); - void restore_backup_item_arena(Item_arena *set, Item_arena *backup); - void set_item_arena(Item_arena *set); + void set_query_arena(Query_arena *set); + + void free_items(); + /* Close the active state associated with execution of this statement */ + virtual void cleanup_stmt(); }; -/* - State of a single command executed against this connection. +class Server_side_cursor; + +/** + @class Statement + @brief State of a single command executed against this connection. + One connection can contain a lot of simultaneously running statements, some of which could be: - prepared, that is, contain placeholders, - opened as cursors. We maintain 1 to 1 relationship between statement and cursor - if user wants to create another cursor for his - query, we create another statement for it. + query, we create another statement for it. To perform some action with statement we reset THD part to the state of that statement, do the action, and then save back modified state from THD to the statement. It will be changed in near future, and Statement will be used explicitly. */ -class Statement: public Item_arena +class Statement: public ilink, public Query_arena { Statement(const Statement &rhs); /* not implemented: */ Statement &operator=(const Statement &rhs); /* non-copyable */ public: - /* FIXME: must be private */ - LEX main_lex; - /* Uniquely identifies each statement object in thread scope; change during statement lifetime. FIXME: must be const @@ -543,23 +793,10 @@ public: ulong id; /* - - if set_query_id=1, we set field->query_id for all fields. In that case + - if set_query_id=1, we set field->query_id for all fields. In that case field list can not contain duplicates. */ bool set_query_id; - /* - This variable is used in post-parse stage to declare that sum-functions, - or functions which have sense only if GROUP BY is present, are allowed. - For example in queries - SELECT MIN(i) FROM foo - SELECT GROUP_CONCAT(a, b, MIN(i)) FROM ... GROUP BY ... - MIN(i) have no sense. - Though it's grammar-related issue, it's hard to catch it out during the - parse stage because GROUP BY clause goes in the end of query. This - variable is mainly used in setup_fields/fix_fields. - See item_sum.cc for details. - */ - bool allow_sum_func; LEX_STRING name; /* name for named prepared statements */ LEX *lex; // parse tree descriptor @@ -581,22 +818,21 @@ public: it. We will see the query_length field as either 0, or the right value for it. Assuming that the write and read of an n-bit memory field in an n-bit - computer is atomic, we can avoid races in the above way. + computer is atomic, we can avoid races in the above way. This printing is needed at least in SHOW PROCESSLIST and SHOW INNODB STATUS. */ char *query; uint32 query_length; // current query length + Server_side_cursor *cursor; public: - /* - This constructor is called when statement is a subobject of THD: - some variables are initialized in THD::init due to locking problems - */ - Statement(); + /* This constructor is called for backup statements */ + Statement() {} - Statement(THD *thd); + Statement(LEX *lex_arg, MEM_ROOT *mem_root_arg, + enum enum_state state_arg, ulong id_arg); virtual ~Statement(); /* Assign execution context (note: not all members) of given stmt to self */ @@ -608,7 +844,7 @@ public: }; -/* +/** Container for all statements created/used in a connection. Statements in Statement_map have unique Statement::id (guaranteed by id assignment in Statement::Statement) @@ -646,6 +882,12 @@ public: } return last_found_statement; } + /* + Close all cursors of this connection that use tables of a storage + engine that has transaction-specific state and therefore can not + survive COMMIT or ROLLBACK. Currently all but MyISAM cursors are closed. + */ + void close_transient_cursors(); void erase(Statement *statement); /* Erase all statements (calls Statement destructor) */ void reset(); @@ -653,11 +895,69 @@ public: private: HASH st_hash; HASH names_hash; + I_List<Statement> transient_cursor_list; Statement *last_found_statement; }; +struct st_savepoint { + struct st_savepoint *prev; + char *name; + uint length, nht; +}; + +enum xa_states {XA_NOTR=0, XA_ACTIVE, XA_IDLE, XA_PREPARED}; +extern const char *xa_state_names[]; + +typedef struct st_xid_state { + /* For now, this is only used to catch duplicated external xids */ + XID xid; // transaction identifier + enum xa_states xa_state; // used by external XA only + bool in_thd; +} XID_STATE; + +extern pthread_mutex_t LOCK_xid_cache; +extern HASH xid_cache; +bool xid_cache_init(void); +void xid_cache_free(void); +XID_STATE *xid_cache_search(XID *xid); +bool xid_cache_insert(XID *xid, enum xa_states xa_state); +bool xid_cache_insert(XID_STATE *xid_state); +void xid_cache_delete(XID_STATE *xid_state); + +/** + @class Security_context + @brief A set of THD members describing the current authenticated user. +*/ -/* +class Security_context { +public: + Security_context() {} /* Remove gcc warning */ + /* + host - host of the client + user - user of the client, set to NULL until the user has been read from + the connection + priv_user - The user privilege we are using. May be "" for anonymous user. + ip - client IP + */ + char *host, *user, *priv_user, *ip; + /* The host privilege we are using */ + char priv_host[MAX_HOSTNAME]; + /* points to host if host is available, otherwise points to ip */ + const char *host_or_ip; + ulong master_access; /* Global privileges from mysql.user */ + ulong db_access; /* Privileges for current db */ + + void init(); + void destroy(); + void skip_grants(); + inline char *priv_host_name() + { + return (*priv_host ? priv_host : (char *)"%"); + } +}; + + +/** A registry for item tree transformations performed during query optimization. We register only those changes which require a rollback to re-execute a prepared statement or stored procedure @@ -668,24 +968,199 @@ struct Item_change_record; typedef I_List<Item_change_record> Item_change_list; -/* +/** + Type of prelocked mode. + See comment for THD::prelocked_mode for complete description. +*/ + +enum prelocked_mode_type {NON_PRELOCKED= 0, PRELOCKED= 1, + PRELOCKED_UNDER_LOCK_TABLES= 2}; + + +/** + Class that holds information about tables which were opened and locked + by the thread. It is also used to save/restore this information in + push_open_tables_state()/pop_open_tables_state(). +*/ + +class Open_tables_state +{ +public: + /* + open_tables - list of regular tables in use by this thread + temporary_tables - list of temp tables in use by this thread + handler_tables - list of tables that were opened with HANDLER OPEN + and are still in use by this thread + */ + TABLE *open_tables, *temporary_tables, *handler_tables, *derived_tables; + /* + During a MySQL session, one can lock tables in two modes: automatic + or manual. In automatic mode all necessary tables are locked just before + statement execution, and all acquired locks are stored in 'lock' + member. Unlocking takes place automatically as well, when the + statement ends. + Manual mode comes into play when a user issues a 'LOCK TABLES' + statement. In this mode the user can only use the locked tables. + Trying to use any other tables will give an error. The locked tables are + stored in 'locked_tables' member. Manual locking is described in + the 'LOCK_TABLES' chapter of the MySQL manual. + See also lock_tables() for details. + */ + MYSQL_LOCK *lock; + /* + Tables that were locked with explicit or implicit LOCK TABLES. + (Implicit LOCK TABLES happens when we are prelocking tables for + execution of statement which uses stored routines. See description + THD::prelocked_mode for more info.) + */ + MYSQL_LOCK *locked_tables; + /* + prelocked_mode_type enum and prelocked_mode member are used for + indicating whenever "prelocked mode" is on, and what type of + "prelocked mode" is it. + + Prelocked mode is used for execution of queries which explicitly + or implicitly (via views or triggers) use functions, thus may need + some additional tables (mentioned in query table list) for their + execution. + + First open_tables() call for such query will analyse all functions + used by it and add all additional tables to table its list. It will + also mark this query as requiring prelocking. After that lock_tables() + will issue implicit LOCK TABLES for the whole table list and change + thd::prelocked_mode to non-0. All queries called in functions invoked + by the main query will use prelocked tables. Non-0 prelocked_mode + will also surpress mentioned analysys in those queries thus saving + cycles. Prelocked mode will be turned off once close_thread_tables() + for the main query will be called. + + Note: Since not all "tables" present in table list are really locked + thd::prelocked_mode does not imply thd::locked_tables. + */ + prelocked_mode_type prelocked_mode; + ulong version; + uint current_tablenr; + + /* + This constructor serves for creation of Open_tables_state instances + which are used as backup storage. + */ + Open_tables_state() {}; + + Open_tables_state(ulong version_arg); + + void set_open_tables_state(Open_tables_state *state) + { + *this= *state; + } + + void reset_open_tables_state() + { + open_tables= temporary_tables= handler_tables= derived_tables= 0; + lock= locked_tables= 0; + prelocked_mode= NON_PRELOCKED; + } +}; + +/** + @class Sub_statement_state + @brief Used to save context when executing a function or trigger +*/ + +/* Defines used for Sub_statement_state::in_sub_stmt */ + +#define SUB_STMT_TRIGGER 1 +#define SUB_STMT_FUNCTION 2 + + +class Sub_statement_state +{ +public: + ulonglong options; + ulonglong last_insert_id, next_insert_id, current_insert_id; + ulonglong limit_found_rows; + ha_rows cuted_fields, sent_row_count, examined_row_count; + ulong client_capabilities; + uint in_sub_stmt; + bool enable_slow_log, insert_id_used, clear_next_insert_id; + bool last_insert_id_used; + my_bool no_send_ok; + SAVEPOINT *savepoints; +}; + +/** + This class represents the interface for internal error handlers. + Internal error handlers are exception handlers used by the server + implementation. +*/ +class Internal_error_handler +{ +protected: + Internal_error_handler() {} + virtual ~Internal_error_handler() {} + +public: + /** + Handle an error condition. + This method can be implemented by a subclass to achieve any of the + following: + - mask an error internally, prevent exposing it to the user, + - mask an error and throw another one instead. + When this method returns true, the error condition is considered + 'handled', and will not be propagated to upper layers. + It is the responsability of the code installing an internal handler + to then check for trapped conditions, and implement logic to recover + from the anticipated conditions trapped during runtime. + + This mechanism is similar to C++ try/throw/catch: + - 'try' correspond to <code>THD::push_internal_handler()</code>, + - 'throw' correspond to <code>my_error()</code>, + which invokes <code>my_message_sql()</code>, + - 'catch' correspond to checking how/if an internal handler was invoked, + before removing it from the exception stack with + <code>THD::pop_internal_handler()</code>. + + @param sql_errno the error number + @param level the error level + @param thd the calling thread + @return true if the error is handled + */ + virtual bool handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level, + THD *thd) = 0; +}; + + +/** + @class THD For each client connection we create a separate thread with THD serving as a thread/connection descriptor */ -class THD :public ilink, - public Statement +class THD :public Statement, + public Open_tables_state { public: + /* + Constant for THD::where initialization in the beginning of every query. + + It's needed because we do not save/restore THD::where normally during + primary (non subselect) query execution. + */ + static const char * const DEFAULT_WHERE; + #ifdef EMBEDDED_LIBRARY struct st_mysql *mysql; - struct st_mysql_data *data; unsigned long client_stmt_id; unsigned long client_param_count; struct st_mysql_bind *client_params; char *extra_data; ulong extra_length; - String query_rest; + struct st_mysql_data *cur_data; + struct st_mysql_data *first_data; + struct st_mysql_data **data_tail; + void clear_data_list(); + struct st_mysql_data *alloc_new_dataset(); /* In embedded server it points to the statement that is processed in the current query. We store some results directly in statement @@ -704,14 +1179,14 @@ public: struct sockaddr_in remote; // client socket address struct rand_struct rand; // used for authentication struct system_variables variables; // Changeable local variables + struct system_status_var status_var; // Per thread statistic vars + THR_LOCK_INFO lock_info; // Locking info of this thread + THR_LOCK_OWNER main_lock_id; // To use for conventional queries + THR_LOCK_OWNER *lock_id; // If not main_lock_id, points to + // the lock_id of a cursor. pthread_mutex_t LOCK_delete; // Locked before thd is deleted /* all prepared statements and cursors of this connection */ - Statement_map stmt_map; - /* - keeps THD state while it is used for active statement - Note: we perform special cleanup for it in THD destructor. - */ - Statement stmt_backup; + Statement_map stmt_map; /* A pointer to the stack frame of handle_one_connection(), which is called first in the thread for handling a client @@ -719,15 +1194,20 @@ public: char *thread_stack; /* - host - host of the client - user - user of the client, set to NULL until the user has been read from - the connection - priv_user - The user privilege we are using. May be '' for anonymous user. db - currently selected database - ip - client IP + catalog - currently selected catalog + WARNING: some members of THD (currently 'db', 'catalog' and 'query') are + set and alloced by the slave SQL thread (for the THD of that thread); that + thread is (and must remain, for now) the only responsible for freeing these + 3 members. If you add members here, and you add code to set them in + replication, don't forget to free_them_and_set_them_to_0 in replication + properly. For details see the 'err:' label of the handle_slave_sql() + in sql/slave.cc. */ - char *host,*user,*priv_user,*db,*ip; - char priv_host[MAX_HOSTNAME]; + char *db, *catalog; + Security_context main_security_ctx; + Security_context *security_ctx; + /* remote (peer) port */ uint16 peer_port; /* @@ -736,41 +1216,15 @@ public: a time-consuming piece that MySQL can get stuck in for a long time. */ const char *proc_info; - /* points to host if host is available, otherwise points to ip */ - const char *host_or_ip; ulong client_capabilities; /* What the client supports */ ulong max_client_packet_length; - ulong master_access; /* Global privileges from mysql.user */ - ulong db_access; /* Privileges for current db */ - /* - open_tables - list of regular tables in use by this thread - temporary_tables - list of temp tables in use by this thread - handler_tables - list of tables that were opened with HANDLER OPEN - and are still in use by this thread - */ - TABLE *open_tables,*temporary_tables, *handler_tables, *derived_tables; - /* - During a MySQL session, one can lock tables in two modes: automatic - or manual. In automatic mode all necessary tables are locked just before - statement execution, and all acquired locks are stored in 'lock' - member. Unlocking takes place automatically as well, when the - statement ends. - Manual mode comes into play when a user issues a 'LOCK TABLES' - statement. In this mode the user can only use the locked tables. - Trying to use any other tables will give an error. The locked tables are - stored in 'locked_tables' member. Manual locking is described in - the 'LOCK_TABLES' chapter of the MySQL manual. - See also lock_tables() for details. - */ - MYSQL_LOCK *lock; /* Current locks */ - MYSQL_LOCK *locked_tables; /* Tables locked with LOCK */ HASH handler_tables_hash; /* One thread can hold up to one named user-level lock. This variable points to a lock object if the lock is present. See item_func.cc and - chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK. + chapter 'Miscellaneous functions', for functions GET_LOCK, RELEASE_LOCK. */ User_level_lock *ull; #ifndef DBUG_OFF @@ -778,7 +1232,7 @@ public: #endif struct st_my_thread_var *mysys_var; /* - Type of current query: COM_PREPARE, COM_QUERY, etc. Set from + Type of current query: COM_STMT_PREPARE, COM_QUERY, etc. Set from first byte of the packet in do_command() */ enum enum_server_command command; @@ -794,16 +1248,18 @@ public: time_t connect_time,thr_create_time; // track down slow pthread_create thr_lock_type update_lock_default; delayed_insert *di; - my_bool tablespace_op; /* This is TRUE in DISCARD/IMPORT TABLESPACE */ + + /* <> 0 if we are inside of trigger or stored function. */ + uint in_sub_stmt; + + /* container for handler's private per-connection data */ + void *ha_data[MAX_HA]; struct st_transactions { - IO_CACHE trans_log; // Inited ONLY if binlog is open ! + SAVEPOINT *savepoints; THD_TRANS all; // Trans since BEGIN WORK THD_TRANS stmt; // Trans for current statement - uint bdb_lock_count; -#ifdef HAVE_NDBCLUSTER_DB - void* thd_ndb; -#endif - bool on; + bool on; // see ha_enable_transaction() + XID_STATE xid_state; /* Tables changed in transaction (that must be invalidated in query cache). List contain only transactional tables, that not invalidated in query @@ -813,8 +1269,21 @@ public: MEM_ROOT mem_root; // Transaction-life memory allocation pool void cleanup() { - changed_tables = 0; + changed_tables= 0; + savepoints= 0; +#ifdef USING_TRANSACTIONS free_root(&mem_root,MYF(MY_KEEP_PREALLOC)); +#endif + } + st_transactions() + { +#ifdef USING_TRANSACTIONS + bzero((char*)this, sizeof(*this)); + xid_state.xid.null(); + init_sql_alloc(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); +#else + xid_state.xa_state= XA_NOTR; +#endif } } transaction; Field *dupp_field; @@ -828,19 +1297,31 @@ public: This is to track items changed during execution of a prepared statement/stored procedure. It's created by register_item_tree_change() in memory root of THD, and freed in - rollback_item_tree_changes(). For conventional execution it's always 0. + rollback_item_tree_changes(). For conventional execution it's always + empty. */ Item_change_list change_list; /* - Current prepared Item_arena if there one, or 0 + A permanent memory area of the statement. For conventional + execution, the parsed tree and execution runtime reside in the same + memory root. In this case stmt_arena points to THD. In case of + a prepared statement or a stored procedure statement, thd->mem_root + conventionally points to runtime memory, and thd->stmt_arena + points to the memory of the PS/SP, where the parsed tree of the + statement resides. Whenever you need to perform a permanent + transformation of a parsed tree, you should allocate new memory in + stmt_arena, to allow correct re-execution of PS/SP. + Note: in the parser, stmt_arena == thd, even for PS/SP. */ - Item_arena *current_arena; + Query_arena *stmt_arena; /* next_insert_id is set on SET INSERT_ID= #. This is used as the next generated auto_increment value in handler.cc */ ulonglong next_insert_id; + /* Remember last next_insert_id to reset it if something went wrong */ + ulonglong prev_insert_id; /* At the beginning of the statement last_insert_id holds the first @@ -865,12 +1346,19 @@ public: ulonglong current_insert_id; ulonglong limit_found_rows; + ulonglong options; /* Bitmap of states */ + longlong row_count_func; /* For the ROW_COUNT() function */ ha_rows cuted_fields, sent_row_count, examined_row_count; + /* + The set of those tables whose fields are referenced in all subqueries + of the query. + TODO: possibly this it is incorrect to have used tables in THD because + with more than one subquery, it is not clear what does the field mean. + */ table_map used_tables; USER_CONN *user_connect; CHARSET_INFO *db_charset; - List<TABLE> temporary_tables_should_be_free; // list of temporary tables /* FIXME: this, and some other variables like 'count_cuted_fields' maybe should be statement/cursor local, that is, moved to Statement @@ -888,8 +1376,8 @@ public: from table are necessary for this select, to check if it's necessary to update auto-updatable fields (like auto_increment and timestamp). */ - ulong query_id; - ulong warn_id, version, options, thread_id, col_access; + query_id_t query_id, warn_id; + ulong thread_id, col_access; /* Statement id is thread-wide. This counter is used to generate ids */ ulong statement_id_counter; @@ -897,15 +1385,19 @@ public: ulong row_count; // Row counter, mainly for errors and warnings long dbug_thread_id; pthread_t real_id; - uint current_tablenr,tmp_table,global_read_lock; + uint tmp_table, global_read_lock; uint server_status,open_options,system_thread; - uint32 db_length; + uint db_length; uint select_number; //number of select (used for EXPLAIN) /* variables.transaction_isolation is reset to this after each commit */ enum_tx_isolation session_tx_isolation; enum_check_fields count_cuted_fields; - /* for user variables replication*/ - DYNAMIC_ARRAY user_var_events; + + DYNAMIC_ARRAY user_var_events; /* For user variables replication */ + MEM_ROOT *user_var_events_alloc; /* Allocate above array elements here */ + + enum killed_state { NOT_KILLED=0, KILL_BAD_DATA=1, KILL_CONNECTION=ER_SERVER_SHUTDOWN, KILL_QUERY=ER_QUERY_INTERRUPTED }; + killed_state volatile killed; /* scramble - random string sent to client on handshake */ char scramble[SCRAMBLE_LENGTH+1]; @@ -914,31 +1406,50 @@ public: bool locked, some_tables_deleted; bool last_cuted_field; bool no_errors, password, is_fatal_error; - bool query_start_used, rand_used; + bool query_start_used, rand_used, time_zone_used; /* last_insert_id_used is set when current statement calls - LAST_INSERT_ID() or reads @@LAST_INSERT_ID, so that binary log - LAST_INSERT_ID_EVENT be generated. + LAST_INSERT_ID() or reads @@LAST_INSERT_ID. */ bool last_insert_id_used; /* + last_insert_id_used is set when current statement or any stored + function called from this statement calls LAST_INSERT_ID() or + reads @@LAST_INSERT_ID, so that binary log LAST_INSERT_ID_EVENT be + generated. Required for statement-based binary log for issuing + "SET LAST_INSERT_ID= #" before "SELECT func()", if func() reads + LAST_INSERT_ID. + */ + bool last_insert_id_used_bin_log; + + /* insert_id_used is set when current statement updates THD::last_insert_id, so that binary log INSERT_ID_EVENT be generated. */ bool insert_id_used; + bool clear_next_insert_id; /* for IS NULL => = last_insert_id() fix in remove_eq_conds() */ bool substitute_null_with_insert_id; - bool time_zone_used; bool in_lock_tables; bool query_error, bootstrap, cleanup_done; bool tmp_table_used; bool charset_is_system_charset, charset_is_collation_connection; + bool charset_is_character_set_filesystem; bool enable_slow_log; /* enable slow log for current statement */ - my_bool volatile killed; + bool no_trans_update, abort_on_warning; + bool got_warning; /* Set on call to push_warning() */ + bool no_warnings_for_error; /* no warnings on call to my_error() */ + /* set during loop of derived table processing */ + bool derived_tables_processing; + my_bool tablespace_op; /* This is TRUE in DISCARD/IMPORT TABLESPACE */ + + sp_rcontext *spcont; // SP runtime context + sp_cache *sp_proc_cache; + sp_cache *sp_func_cache; /* If we do a purge of binary logs, log index info of the threads @@ -955,6 +1466,32 @@ public: ulong ulong_value; } sys_var_tmp; + struct { + /* + If true, mysql_bin_log::write(Log_event) call will not write events to + binlog, and maintain 2 below variables instead (use + mysql_bin_log.start_union_events to turn this on) + */ + bool do_union; + /* + If TRUE, at least one mysql_bin_log::write(Log_event) call has been + made after last mysql_bin_log.start_union_events() call. + */ + bool unioned_events; + /* + If TRUE, at least one mysql_bin_log::write(Log_event e), where + e.cache_stmt == TRUE call has been made after last + mysql_bin_log.start_union_events() call. + */ + bool unioned_events_trans; + + /* + 'queries' (actually SP statements) that run under inside this binlog + union have thd->query_id >= first_query_id. + */ + query_id_t first_query_id; + } binlog_evt_union; + THD(); ~THD(); @@ -966,11 +1503,12 @@ public: killing mysqld) where it's vital to not allocate excessive and not used memory. Note, that we still don't return error from init_for_queries(): if preallocation fails, we should notice that at the first call to - alloc_root. + alloc_root. */ void init_for_queries(); void change_user(void); void cleanup(void); + void cleanup_after_query(); bool store_globals(); #ifdef SIGNAL_WITH_VIO_CLOSE inline void set_active_vio(Vio* vio) @@ -986,18 +1524,18 @@ public: pthread_mutex_unlock(&LOCK_delete); } void close_active_vio(); -#endif - void awake(bool prepare_to_die); +#endif + void awake(THD::killed_state state_to_set); /* For enter_cond() / exit_cond() to work the mutex must be got before - enter_cond() (in 4.1 an assertion will soon ensure this); this mutex is - then released by exit_cond(). Use must be: - lock mutex; enter_cond(); your code; exit_cond(). + enter_cond(); this mutex is then released by exit_cond(). + Usage must be: lock mutex; enter_cond(); your code; exit_cond(). */ inline const char* enter_cond(pthread_cond_t *cond, pthread_mutex_t* mutex, const char* msg) { const char* old_msg = proc_info; + safe_mutex_assert_owner(mutex); mysys_var->current_mutex = mutex; mysys_var->current_cond = cond; proc_info = msg; @@ -1032,19 +1570,21 @@ public: inline ulonglong found_rows(void) { return limit_found_rows; - } + } inline bool active_transaction() { -#ifdef USING_TRANSACTIONS - return (transaction.all.bdb_tid != 0 || - transaction.all.innodb_active_trans != 0 || - transaction.all.ndb_tid != 0); +#ifdef USING_TRANSACTIONS + return server_status & SERVER_STATUS_IN_TRANS; #else return 0; #endif } - inline gptr trans_alloc(unsigned int size) - { + inline bool fill_derived_tables() + { + return !stmt_arena->is_stmt_prepare() && !lex->only_view_structure(); + } + inline gptr trans_alloc(unsigned int size) + { return alloc_root(&transaction.mem_root,size); } @@ -1064,6 +1604,7 @@ public: net.last_error[0]= 0; net.last_errno= 0; net.report_error= 0; + query_error= 0; } inline bool vio_ok() const { return net.vio != 0; } #else @@ -1073,23 +1614,22 @@ public: inline void fatal_error() { is_fatal_error= 1; - net.report_error= 1; + net.report_error= 1; DBUG_PRINT("error",("Fatal error set")); } inline CHARSET_INFO *charset() { return variables.character_set_client; } void update_charset(); - inline Item_arena *change_arena_if_needed(Item_arena *backup) + inline Query_arena *activate_stmt_arena_if_needed(Query_arena *backup) { /* - use new arena if we are in a prepared statements and we have not - already changed to use this arena. + Use the persistent arena if we are in a prepared statement or a stored + procedure statement and we have not already changed to use this arena. */ - if (current_arena->is_stmt_prepare() && - mem_root != ¤t_arena->main_mem_root) + if (!stmt_arena->is_conventional() && mem_root != stmt_arena->mem_root) { - set_n_backup_item_arena(current_arena, backup); - return current_arena; + set_n_backup_active_arena(stmt_arena, backup); + return stmt_arena; } return 0; } @@ -1097,7 +1637,7 @@ public: void change_item_tree(Item **place, Item *new_value) { /* TODO: check for OOM condition here */ - if (!current_arena->is_conventional_execution()) + if (!stmt_arena->is_conventional()) nocheck_register_item_tree_change(place, *place, mem_root); *place= new_value; } @@ -1110,13 +1650,124 @@ public: state after execution of a non-prepared SQL statement. */ void end_statement(); + inline int killed_errno() const + { + return killed != KILL_BAD_DATA ? killed : 0; + } + inline void send_kill_message() const + { + int err= killed_errno(); + if (err) + my_message(err, ER(err), MYF(0)); + } + /* return TRUE if we will abort query if we make a warning now */ + inline bool really_abort_on_warning() + { + return (abort_on_warning && + (!no_trans_update || + (variables.sql_mode & MODE_STRICT_ALL_TABLES))); + } + void set_status_var_init(); + bool is_context_analysis_only() + { return stmt_arena->is_stmt_prepare() || lex->view_prepare_mode; } + void reset_n_backup_open_tables_state(Open_tables_state *backup); + void restore_backup_open_tables_state(Open_tables_state *backup); + void reset_sub_statement_state(Sub_statement_state *backup, uint new_state); + void restore_sub_statement_state(Sub_statement_state *backup); + void set_n_backup_active_arena(Query_arena *set, Query_arena *backup); + void restore_active_arena(Query_arena *set, Query_arena *backup); + + /* + Initialize the current database from a NULL-terminated string with length + If we run out of memory, we free the current database and return TRUE. + This way the user will notice the error as there will be no current + database selected (in addition to the error message set by malloc). + */ + bool set_db(const char *new_db, uint new_db_len) + { + /* Do not reallocate memory if current chunk is big enough. */ + if (db && new_db && db_length >= new_db_len) + memcpy(db, new_db, new_db_len+1); + else + { + x_free(db); + db= new_db ? my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)) : + NULL; + } + db_length= db ? new_db_len : 0; + return new_db && !db; + } + void reset_db(char *new_db, uint new_db_len) + { + db= new_db; + db_length= new_db_len; + } + /* + Copy the current database to the argument. Use the current arena to + allocate memory for a deep copy: current database may be freed after + a statement is parsed but before it's executed. + */ + bool copy_db_to(char **p_db, uint *p_db_length) + { + if (db == NULL) + { + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + return TRUE; + } + *p_db= strmake(db, db_length); + if (p_db_length) + *p_db_length= db_length; + return FALSE; + } + +public: + /** + Add an internal error handler to the thread execution context. + @param handler the exception handler to add + */ + void push_internal_handler(Internal_error_handler *handler); + + /** + Handle an error condition. + @param sql_errno the error number + @param level the error level + @return true if the error is handled + */ + virtual bool handle_error(uint sql_errno, + MYSQL_ERROR::enum_warning_level level); + + /** + Remove the error handler last pushed. + */ + void pop_internal_handler(); + +private: + /** The current internal error handler for this thread, or NULL. */ + Internal_error_handler *m_internal_handler; + /** + The lex to hold the parsed tree of conventional (non-prepared) queries. + Whereas for prepared and stored procedure statements we use an own lex + instance for each new query, for conventional statements we reuse + the same lex. (@see mysql_parse for details). + */ + LEX main_lex; + /** + This memory root is used for two purposes: + - for conventional queries, to allocate structures stored in main_lex + during parsing, and allocate runtime data (execution plan, etc.) + during execution. + - for prepared queries, only to allocate runtime data. The parsed + tree itself is reused between executions and thus is stored elsewhere. + */ + MEM_ROOT main_mem_root; }; + #define tmp_disable_binlog(A) \ - ulong save_options= (A)->options; \ - (A)->options&= ~OPTION_BIN_LOG; + {ulonglong tmp_disable_binlog__save_options= (A)->options; \ + (A)->options&= ~OPTION_BIN_LOG -#define reenable_binlog(A) (A)->options= save_options; +#define reenable_binlog(A) (A)->options= tmp_disable_binlog__save_options;} /* Flags for the THD::system_thread (bitmap) variable */ #define SYSTEM_THREAD_DELAYED_INSERT 1 @@ -1124,7 +1775,7 @@ public: #define SYSTEM_THREAD_SLAVE_SQL 4 /* - Used to hold information about file and file structure in exchainge + Used to hold information about file and file structure in exchange via non-DB file (...INTO OUTFILE..., ...LOAD DATA...) XXX: We never call destructor for objects of this class. */ @@ -1137,6 +1788,7 @@ public: bool opt_enclosed; bool dumpfile; ulong skip_lines; + CHARSET_INFO *cs; sql_exchange(char *name,bool dumpfile_flag); }; @@ -1148,8 +1800,6 @@ public: class JOIN; -void send_error(THD *thd, uint sql_errno=0, const char *err=0); - class select_result :public Sql_alloc { protected: THD *thd; @@ -1162,6 +1812,7 @@ public: unit= u; return 0; } + virtual int prepare2(void) { return 0; } /* Because of peculiarities of prepared statements protocol we need to know number of columns in the result set (if @@ -1169,17 +1820,31 @@ public: */ virtual uint field_count(List<Item> &fields) const { return fields.elements; } - virtual bool send_fields(List<Item> &list,uint flag)=0; + virtual bool send_fields(List<Item> &list, uint flags)=0; virtual bool send_data(List<Item> &items)=0; virtual bool initialize_tables (JOIN *join=0) { return 0; } virtual void send_error(uint errcode,const char *err); virtual bool send_eof()=0; + /** + Check if this query returns a result set and therefore is allowed in + cursors and set an error message if it is not the case. + + @retval FALSE success + @retval TRUE error, an error message is set + */ + virtual bool check_simple_select() const; virtual void abort() {} /* Cleanup instance of this class for next execution of a prepared statement/stored procedure. */ virtual void cleanup(); + void set_thd(THD *thd_arg) { thd= thd_arg; } +#ifdef EMBEDDED_LIBRARY + virtual void begin_dataset() {} +#else + void begin_dataset() {} +#endif }; @@ -1192,17 +1857,21 @@ public: class select_result_interceptor: public select_result { public: + select_result_interceptor() {} /* Remove gcc warning */ uint field_count(List<Item> &fields) const { return 0; } bool send_fields(List<Item> &fields, uint flag) { return FALSE; } }; class select_send :public select_result { + int status; public: - select_send() {} - bool send_fields(List<Item> &list,uint flag); + select_send() :status(0) {} + bool send_fields(List<Item> &list, uint flags); bool send_data(List<Item> &items); bool send_eof(); + virtual bool check_simple_select() const { return FALSE; } + void abort(); }; @@ -1246,40 +1915,20 @@ public: class select_insert :public select_result_interceptor { public: + TABLE_LIST *table_list; TABLE *table; List<Item> *fields; ulonglong last_insert_id; COPY_INFO info; - TABLE_LIST *insert_table_list; - TABLE_LIST *dup_table_list; + bool insert_into_view; - select_insert(TABLE *table_par, List<Item> *fields_par, - enum_duplicates duplic, bool ignore) - :table(table_par), fields(fields_par), last_insert_id(0), - insert_table_list(0), dup_table_list(0) - { - bzero((char*) &info,sizeof(info)); - info.ignore= ignore; - info.handle_duplicates=duplic; - } - select_insert(TABLE *table_par, - TABLE_LIST *insert_table_list_par, - TABLE_LIST *dup_table_list_par, - List<Item> *fields_par, + select_insert(TABLE_LIST *table_list_par, + TABLE *table_par, List<Item> *fields_par, List<Item> *update_fields, List<Item> *update_values, - enum_duplicates duplic, bool ignore) - :table(table_par), fields(fields_par), last_insert_id(0), - insert_table_list(insert_table_list_par), - dup_table_list(dup_table_list_par) - { - bzero((char*) &info,sizeof(info)); - info.ignore= ignore; - info.handle_duplicates= duplic; - info.update_fields= update_fields; - info.update_values= update_values; - } + enum_duplicates duplic, bool ignore); ~select_insert(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); + int prepare2(void); bool send_data(List<Item> &items); virtual void store_values(List<Item> &values); void send_error(uint errcode,const char *err); @@ -1291,24 +1940,23 @@ class select_insert :public select_result_interceptor { class select_create: public select_insert { ORDER *group; - const char *db; - const char *name; + TABLE_LIST *create_table; HA_CREATE_INFO *create_info; Alter_info *alter_info; MYSQL_LOCK *lock; Field **field; public: - select_create(const char *db_name, const char *table_name, + select_create(TABLE_LIST *table_arg, HA_CREATE_INFO *create_info_arg, Alter_info *alter_info_arg, List<Item> &select_fields, enum_duplicates duplic, bool ignore) - :select_insert(NULL, &select_fields, duplic, ignore), - db(db_name), name(table_name), + :select_insert(NULL, NULL, &select_fields, 0, 0, duplic, ignore), + create_table(table_arg), create_info(create_info_arg), alter_info(alter_info_arg), lock(0) - {} + {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); void store_values(List<Item> &values); void send_error(uint errcode,const char *err); @@ -1318,8 +1966,8 @@ public: #include <myisam.h> -/* - Param to create temporary tables when doing SELECT:s +/* + Param to create temporary tables when doing SELECT:s NOTE This structure is copied using memcpy as a part of JOIN. */ @@ -1347,12 +1995,22 @@ public: uint quick_group; bool using_indirect_summary_function; /* If >0 convert all blob fields to varchar(convert_blob_length) */ - uint convert_blob_length; + uint convert_blob_length; + CHARSET_INFO *table_charset; + bool schema_table; + /* + True if GROUP BY and its aggregate functions are already computed + by a table access method (e.g. by loose index scan). In this case + query execution should not perform aggregation and should treat + aggregate functions as normal functions. + */ + bool precomputed_group_by; bool force_copy_fields; + TMP_TABLE_PARAM() :copy_field(0), group_parts(0), - group_length(0), group_null_parts(0), convert_blob_length(0), - force_copy_fields(0) + group_length(0), group_null_parts(0), convert_blob_length(0), + schema_table(0), precomputed_group_by(0), force_copy_fields(0) {} ~TMP_TABLE_PARAM() { @@ -1369,19 +2027,21 @@ public: } }; -class select_union :public select_result_interceptor { - public: - TABLE *table; - COPY_INFO info; +class select_union :public select_result_interceptor +{ TMP_TABLE_PARAM tmp_table_param; +public: + TABLE *table; - select_union(TABLE *table_par); - ~select_union(); + select_union() :table(0) {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool send_eof(); bool flush(); - void set_table(TABLE *tbl) { table= tbl; } + + bool create_result_table(THD *thd, List<Item> *column_types, + bool is_distinct, ulonglong options, + const char *alias); }; /* Base subselect interface class */ @@ -1399,7 +2059,9 @@ public: class select_singlerow_subselect :public select_subselect { public: - select_singlerow_subselect(Item_subselect *item):select_subselect(item){} + select_singlerow_subselect(Item_subselect *item_arg) + :select_subselect(item_arg) + {} bool send_data(List<Item> &items); }; @@ -1410,13 +2072,14 @@ class select_max_min_finder_subselect :public select_subselect bool (select_max_min_finder_subselect::*op)(); bool fmax; public: - select_max_min_finder_subselect(Item_subselect *item, bool mx) - :select_subselect(item), cache(0), fmax(mx) + select_max_min_finder_subselect(Item_subselect *item_arg, bool mx) + :select_subselect(item_arg), cache(0), fmax(mx) {} void cleanup(); bool send_data(List<Item> &items); bool cmp_real(); bool cmp_int(); + bool cmp_decimal(); bool cmp_str(); }; @@ -1424,7 +2087,8 @@ public: class select_exists_subselect :public select_subselect { public: - select_exists_subselect(Item_subselect *item):select_subselect(item){} + select_exists_subselect(Item_subselect *item_arg) + :select_subselect(item_arg){} bool send_data(List<Item> &items); }; @@ -1434,6 +2098,7 @@ typedef struct st_sort_field { Field *field; /* Field to sort */ Item *item; /* Item if not sorting fields */ uint length; /* Length of sort field */ + uint suffix_length; /* Length suffix (0-4) */ Item_result result_type; /* Type of item */ bool reverse; /* if descending sort */ bool need_strxnfrm; /* If we have to use strxnfrm() */ @@ -1452,7 +2117,7 @@ typedef struct st_sort_buffer { class Table_ident :public Sql_alloc { - public: +public: LEX_STRING db; LEX_STRING table; SELECT_LEX_UNIT *sel; @@ -1465,16 +2130,26 @@ class Table_ident :public Sql_alloc else db= db_arg; } - inline Table_ident(LEX_STRING table_arg) + inline Table_ident(LEX_STRING table_arg) :table(table_arg), sel((SELECT_LEX_UNIT *)0) { db.str=0; } - inline Table_ident(SELECT_LEX_UNIT *s) : sel(s) + /* + This constructor is used only for the case when we create a derived + table. A derived table has no name and doesn't belong to any database. + Later, if there was an alias specified for the table, it will be set + by add_table_to_list. + */ + inline Table_ident(SELECT_LEX_UNIT *s) : sel(s) { /* We must have a table name here as this is used with add_table_to_list */ - db.str=0; table.str= internal_table_name; table.length=1; + db.str= empty_c_string; /* a subject to casedn_str */ + db.length= 0; + table.str= internal_table_name; + table.length=1; } + bool is_derived_table() const { return test(sel); } inline void change_db(char *db_name) { db.str= db_name; db.length= (uint) strlen(db_name); @@ -1485,24 +2160,34 @@ class Table_ident :public Sql_alloc class user_var_entry { public: + user_var_entry() {} /* Remove gcc warning */ LEX_STRING name; char *value; - ulong length, update_query_id, used_query_id; + ulong length; + query_id_t update_query_id, used_query_id; Item_result type; + bool unsigned_flag; - double val(my_bool *null_value); + double val_real(my_bool *null_value); longlong val_int(my_bool *null_value); String *val_str(my_bool *null_value, String *str, uint decimals); + my_decimal *val_decimal(my_bool *null_value, my_decimal *result); DTCollation collation; }; - -/* Class for unique (removing of duplicates) */ +/* + Unique -- class for unique (removing of duplicates). + Puts all values to the TREE. If the tree becomes too big, + it's dumped to the file. User can request sorted values, or + just iterate through them. In the last case tree merging is performed in + memory simultaneously with iteration, so it should be ~2-3x faster. + */ class Unique :public Sql_alloc { DYNAMIC_ARRAY file_ptrs; - ulong max_elements, max_in_memory_size; + ulong max_elements; + ulonglong max_in_memory_size; IO_CACHE file; TREE tree; byte *record_pointers; @@ -1511,17 +2196,32 @@ class Unique :public Sql_alloc public: ulong elements; - Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, - uint size_arg, ulong max_in_memory_size_arg); + Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg, + uint size_arg, ulonglong max_in_memory_size_arg); ~Unique(); - inline bool unique_add(gptr ptr) + ulong elements_in_tree() { return tree.elements_in_tree; } + inline bool unique_add(void *ptr) { + DBUG_ENTER("unique_add"); + DBUG_PRINT("info", ("tree %u - %lu", tree.elements_in_tree, max_elements)); if (tree.elements_in_tree > max_elements && flush()) - return 1; - return !tree_insert(&tree, ptr, 0, tree.custom_arg); + DBUG_RETURN(1); + DBUG_RETURN(!tree_insert(&tree, ptr, 0, tree.custom_arg)); } bool get(TABLE *table); + static double get_use_cost(uint *buffer, uint nkeys, uint key_size, + ulonglong max_in_memory_size); + inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size, + ulonglong max_in_memory_size) + { + register ulonglong max_elems_in_tree= + (1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size)); + return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree)); + } + + void reset(); + bool walk(tree_walk_action action, void *walk_action_arg); friend int unique_write_to_file(gptr key, element_count count, Unique *unique); friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique); @@ -1532,27 +2232,33 @@ class multi_delete :public select_result_interceptor { TABLE_LIST *delete_tables, *table_being_deleted; Unique **tempfiles; - THD *thd; ha_rows deleted, found; uint num_of_tables; int error; - bool do_delete, transactional_tables, log_delayed, normal_tables; + bool do_delete; + /* True if at least one table we delete from is transactional */ + bool transactional_tables; + /* True if at least one table we delete from is not transactional */ + bool normal_tables; + bool delete_while_scanning; + public: - multi_delete(THD *thd, TABLE_LIST *dt, uint num_of_tables); + multi_delete(TABLE_LIST *dt, uint num_of_tables); ~multi_delete(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool initialize_tables (JOIN *join); void send_error(uint errcode,const char *err); - int do_deletes (bool from_send_error); + int do_deletes(); bool send_eof(); }; class multi_update :public select_result_interceptor { - TABLE_LIST *all_tables, *update_tables, *table_being_updated; - THD *thd; + TABLE_LIST *all_tables; /* query/update command tables */ + TABLE_LIST *leaves; /* list of leves of join table tree */ + TABLE_LIST *update_tables, *table_being_updated; TABLE **tmp_tables, *main_table, *table_to_update; TMP_TABLE_PARAM *tmp_table_param; ha_rows updated, found; @@ -1561,11 +2267,15 @@ class multi_update :public select_result_interceptor uint table_count; Copy_field *copy_field; enum enum_duplicates handle_duplicates; - bool do_update, trans_safe, transactional_tables, log_delayed, ignore; + bool do_update, trans_safe; + /* True if the update operation has made a change in a transactional table */ + bool transactional_tables; + bool ignore; public: - multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> *fields, - List<Item> *values, enum_duplicates handle_duplicates, bool ignore); + multi_update(TABLE_LIST *ut, TABLE_LIST *leaves_list, + List<Item> *fields, List<Item> *values, + enum_duplicates handle_duplicates, bool ignore); ~multi_update(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); @@ -1575,15 +2285,38 @@ public: bool send_eof(); }; +class my_var : public Sql_alloc { +public: + LEX_STRING s; +#ifndef DBUG_OFF + /* + Routine to which this Item_splocal belongs. Used for checking if correct + runtime context is used for variable handling. + */ + sp_head *sp; +#endif + bool local; + uint offset; + enum_field_types type; + my_var (LEX_STRING& j, bool i, uint o, enum_field_types t) + :s(j), local(i), offset(o), type(t) + {} + ~my_var() {} +}; class select_dumpvar :public select_result_interceptor { ha_rows row_count; public: - List<LEX_STRING> var_list; + List<my_var> var_list; select_dumpvar() { var_list.empty(); row_count= 0;} ~select_dumpvar() {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool send_eof(); + virtual bool check_simple_select() const; void cleanup(); }; + +/* Functions in sql_class.cc */ + +void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); diff --git a/sql/sql_client.cc b/sql/sql_client.cc index 49d0d3087ad..d6f1183806e 100644 --- a/sql/sql_client.cc +++ b/sql/sql_client.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/sql_crypt.cc b/sql/sql_crypt.cc index f21a109e95d..367b9e38e56 100644 --- a/sql/sql_crypt.cc +++ b/sql/sql_crypt.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2003, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/sql_crypt.h b/sql/sql_crypt.h index 25bc2d29e1d..f3db9adde25 100644 --- a/sql/sql_crypt.h +++ b/sql/sql_crypt.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc new file mode 100644 index 00000000000..2e98da42be1 --- /dev/null +++ b/sql/sql_cursor.cc @@ -0,0 +1,669 @@ +/* Copyright (C) 2005-2006 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#ifdef USE_PRAGMA_IMPLEMENTATION +#pragma implementation /* gcc class implementation */ +#endif + +#include "mysql_priv.h" +#include "sql_cursor.h" +#include "sql_select.h" + +/**************************************************************************** + Declarations. +****************************************************************************/ + +/* + Sensitive_cursor -- a sensitive non-materialized server side + cursor An instance of this class cursor has its own runtime + state -- list of used items and memory root for runtime memory, + open and locked tables, change list for the changes of the + parsed tree. This state is freed when the cursor is closed. +*/ + +class Sensitive_cursor: public Server_side_cursor +{ + MEM_ROOT main_mem_root; + Query_arena *stmt_arena; + JOIN *join; + TABLE *open_tables; + MYSQL_LOCK *lock; + TABLE *derived_tables; + /* List of items created during execution */ + query_id_t query_id; + struct Engine_info + { + const handlerton *ht; + void *read_view; + }; + Engine_info ht_info[MAX_HA]; + Item_change_list change_list; + my_bool close_at_commit; + THR_LOCK_OWNER lock_id; +private: + /* bzero cursor state in THD */ + void reset_thd(THD *thd); +public: + Sensitive_cursor(THD *thd, select_result *result_arg); + + THR_LOCK_OWNER *get_lock_id() { return &lock_id; } + /* Save THD state into cursor */ + void post_open(THD *thd); + + virtual bool is_open() const { return join != 0; } + virtual int open(JOIN *join); + virtual void fetch(ulong num_rows); + virtual void close(); + virtual ~Sensitive_cursor(); +}; + + +/* + Materialized_cursor -- an insensitive materialized server-side + cursor. The result set of this cursor is saved in a temporary + table at open. The cursor itself is simply an interface for the + handler of the temporary table. +*/ + +class Materialized_cursor: public Server_side_cursor +{ + MEM_ROOT main_mem_root; + /* A fake unit to supply to select_send when fetching */ + SELECT_LEX_UNIT fake_unit; + TABLE *table; + List<Item> item_list; + ulong fetch_limit; + ulong fetch_count; +public: + Materialized_cursor(select_result *result, TABLE *table); + + virtual bool is_open() const { return table != 0; } + virtual int open(JOIN *join __attribute__((unused))); + virtual void fetch(ulong num_rows); + virtual void close(); + virtual ~Materialized_cursor(); +}; + + +/* + Select_materialize -- a mediator between a cursor query and the + protocol. In case we were not able to open a non-materialzed + cursor, it creates an internal temporary HEAP table, and insert + all rows into it. When the table reaches max_heap_table_size, + it's converted to a MyISAM table. Later this table is used to + create a Materialized_cursor. +*/ + +class Select_materialize: public select_union +{ + select_result *result; /* the result object of the caller (PS or SP) */ +public: + Select_materialize(select_result *result_arg) :result(result_arg) {} + virtual bool send_fields(List<Item> &list, uint flags); +}; + + +/**************************************************************************/ + +/* + Attempt to open a materialized or non-materialized cursor. + + SYNOPSIS + mysql_open_cursor() + thd thread handle + flags [in] create a materialized cursor or not + result [in] result class of the caller used as a destination + for the rows fetched from the cursor + pcursor [out] a pointer to store a pointer to cursor in + + RETURN VALUE + 0 the query has been successfully executed; in this + case pcursor may or may not contain + a pointer to an open cursor. + non-zero an error, 'pcursor' has been left intact. +*/ + +int mysql_open_cursor(THD *thd, uint flags, select_result *result, + Server_side_cursor **pcursor) +{ + Sensitive_cursor *sensitive_cursor; + select_result *save_result; + Select_materialize *result_materialize; + LEX *lex= thd->lex; + int rc; + + /* + The lifetime of the sensitive cursor is the same or less as the + lifetime of the runtime memory of the statement it's opened for. + */ + if (! (result_materialize= new (thd->mem_root) Select_materialize(result))) + return 1; + + if (! (sensitive_cursor= new (thd->mem_root) Sensitive_cursor(thd, result))) + { + delete result; + return 1; + } + + save_result= lex->result; + + lex->result= result_materialize; + if (! (flags & (uint) ALWAYS_MATERIALIZED_CURSOR)) + { + thd->lock_id= sensitive_cursor->get_lock_id(); + thd->cursor= sensitive_cursor; + } + + rc= mysql_execute_command(thd); + + lex->result= save_result; + thd->lock_id= &thd->main_lock_id; + thd->cursor= 0; + + /* + Possible options here: + - a sensitive cursor is open. In this case rc is 0 and + result_materialize->table is NULL, or + - a materialized cursor is open. In this case rc is 0 and + result_materialize->table is not NULL + - an error occured during materializaton. + result_materialize->table is not NULL, but rc != 0 + - successful completion of mysql_execute_command without + a cursor: rc is 0, result_materialize->table is NULL, + sensitive_cursor is not open. + This is possible if some command writes directly to the + network, bypassing select_result mechanism. An example of + such command is SHOW VARIABLES or SHOW STATUS. + */ + if (rc) + goto err_open; + + if (sensitive_cursor->is_open()) + { + DBUG_ASSERT(!result_materialize->table); + /* + It's safer if we grab THD state after mysql_execute_command + is finished and not in Sensitive_cursor::open(), because + currently the call to Sensitive_cursor::open is buried deep + in JOIN::exec of the top level join. + */ + sensitive_cursor->post_open(thd); + *pcursor= sensitive_cursor; + goto end; + } + else if (result_materialize->table) + { + Materialized_cursor *materialized_cursor; + TABLE *table= result_materialize->table; + MEM_ROOT *mem_root= &table->mem_root; + + if (!(materialized_cursor= new (mem_root) + Materialized_cursor(result, table))) + { + rc= 1; + goto err_open; + } + + if ((rc= materialized_cursor->open(0))) + { + delete materialized_cursor; + goto err_open; + } + + *pcursor= materialized_cursor; + thd->stmt_arena->cleanup_stmt(); + goto end; + } + +err_open: + DBUG_ASSERT(! (sensitive_cursor && sensitive_cursor->is_open())); + delete sensitive_cursor; + if (result_materialize->table) + free_tmp_table(thd, result_materialize->table); +end: + delete result_materialize; + return rc; +} + +/**************************************************************************** + Server_side_cursor +****************************************************************************/ + +Server_side_cursor::~Server_side_cursor() +{ +} + + +void Server_side_cursor::operator delete(void *ptr, size_t size) +{ + Server_side_cursor *cursor= (Server_side_cursor*) ptr; + MEM_ROOT own_root= *cursor->mem_root; + + DBUG_ENTER("Server_side_cursor::operator delete"); + TRASH(ptr, size); + /* + If this cursor has never been opened mem_root is empty. Otherwise + mem_root points to the memory the cursor object was allocated in. + In this case it's important to call free_root last, and free a copy + instead of *mem_root to avoid writing into freed memory. + */ + free_root(&own_root, MYF(0)); + DBUG_VOID_RETURN; +} + +/**************************************************************************** + Sensitive_cursor +****************************************************************************/ + +Sensitive_cursor::Sensitive_cursor(THD *thd, select_result *result_arg) + :Server_side_cursor(&main_mem_root, result_arg), + stmt_arena(0), + join(0), + close_at_commit(FALSE) +{ + /* We will overwrite it at open anyway. */ + init_sql_alloc(&main_mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); + thr_lock_owner_init(&lock_id, &thd->lock_info); + bzero((void*) ht_info, sizeof(ht_info)); +} + + +void +Sensitive_cursor::post_open(THD *thd) +{ + Engine_info *info; + /* + We need to save and reset thd->mem_root, otherwise it'll be + freed later in mysql_parse. + + We can't just change thd->mem_root here as we want to keep the + things that are already allocated in thd->mem_root for + Sensitive_cursor::fetch() + */ + *mem_root= *thd->mem_root; + stmt_arena= thd->stmt_arena; + state= stmt_arena->state; + /* Allocate a new memory root for thd */ + init_sql_alloc(thd->mem_root, + thd->variables.query_alloc_block_size, + thd->variables.query_prealloc_size); + + /* + Save tables and zero THD pointers to prevent table close in + close_thread_tables. + */ + derived_tables= thd->derived_tables; + open_tables= thd->open_tables; + lock= thd->lock; + query_id= thd->query_id; + free_list= thd->free_list; + change_list= thd->change_list; + reset_thd(thd); + /* Now we have an active cursor and can cause a deadlock */ + thd->lock_info.n_cursors++; + + close_at_commit= FALSE; /* reset in case we're reusing the cursor */ + info= &ht_info[0]; + for (handlerton **pht= thd->transaction.stmt.ht; *pht; pht++) + { + const handlerton *ht= *pht; + close_at_commit|= test(ht->flags & HTON_CLOSE_CURSORS_AT_COMMIT); + if (ht->create_cursor_read_view) + { + info->ht= ht; + info->read_view= (ht->create_cursor_read_view)(); + ++info; + } + } + /* + XXX: thd->locked_tables is not changed. + What problems can we have with it if cursor is open? + TODO: must be fixed because of the prelocked mode. + */ +} + + +void +Sensitive_cursor::reset_thd(THD *thd) +{ + thd->derived_tables= 0; + thd->open_tables= 0; + thd->lock= 0; + thd->free_list= 0; + thd->change_list.empty(); +} + + +int +Sensitive_cursor::open(JOIN *join_arg) +{ + join= join_arg; + THD *thd= join->thd; + /* First non-constant table */ + JOIN_TAB *join_tab= join->join_tab + join->const_tables; + DBUG_ENTER("Sensitive_cursor::open"); + + join->change_result(result); + /* + Send fields description to the client; server_status is sent + in 'EOF' packet, which follows send_fields(). + We don't simply use SEND_EOF flag of send_fields because we also + want to flush the network buffer, which is done only in a standalone + send_eof(). + */ + result->send_fields(*join->fields, Protocol::SEND_NUM_ROWS); + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + result->send_eof(); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + + /* Prepare JOIN for reading rows. */ + join->tmp_table= 0; + join->join_tab[join->tables-1].next_select= setup_end_select_func(join); + join->send_records= 0; + join->fetch_limit= join->unit->offset_limit_cnt; + + /* Disable JOIN CACHE as it is not working with cursors yet */ + for (JOIN_TAB *tab= join_tab; + tab != join->join_tab + join->tables - 1; + tab++) + { + if (tab->next_select == sub_select_cache) + tab->next_select= sub_select; + } + + DBUG_ASSERT(join_tab->table->reginfo.not_exists_optimize == 0); + DBUG_ASSERT(join_tab->not_used_in_distinct == 0); + /* + null_row is set only if row not found and it's outer join: should never + happen for the first table in join_tab list + */ + DBUG_ASSERT(join_tab->table->null_row == 0); + DBUG_RETURN(0); +} + + +/* + SYNOPSIS + Sensitive_cursor::fetch() + num_rows fetch up to this number of rows (maybe less) + + DESCRIPTION + Fetch next num_rows rows from the cursor and send them to the client + + Precondition: + Sensitive_cursor is open + + RETURN VALUES: + none, this function will send OK to the clinet or set an error + message in THD +*/ + +void +Sensitive_cursor::fetch(ulong num_rows) +{ + THD *thd= join->thd; + JOIN_TAB *join_tab= join->join_tab + join->const_tables; + enum_nested_loop_state error= NESTED_LOOP_OK; + Query_arena backup_arena; + Engine_info *info; + DBUG_ENTER("Sensitive_cursor::fetch"); + DBUG_PRINT("enter",("rows: %lu", num_rows)); + + DBUG_ASSERT(thd->derived_tables == 0 && thd->open_tables == 0 && + thd->lock == 0); + + thd->derived_tables= derived_tables; + thd->open_tables= open_tables; + thd->lock= lock; + thd->query_id= query_id; + thd->change_list= change_list; + /* save references to memory allocated during fetch */ + thd->set_n_backup_active_arena(this, &backup_arena); + + for (info= ht_info; info->read_view ; info++) + (info->ht->set_cursor_read_view)(info->read_view); + + join->fetch_limit+= num_rows; + + error= sub_select(join, join_tab, 0); + if (error == NESTED_LOOP_OK || error == NESTED_LOOP_NO_MORE_ROWS) + error= sub_select(join,join_tab,1); + if (error == NESTED_LOOP_QUERY_LIMIT) + error= NESTED_LOOP_OK; /* select_limit used */ + if (error == NESTED_LOOP_CURSOR_LIMIT) + join->resume_nested_loop= TRUE; + + ha_release_temporary_latches(thd); + + /* Grab free_list here to correctly free it in close */ + thd->restore_active_arena(this, &backup_arena); + + change_list= thd->change_list; + reset_thd(thd); + + for (info= ht_info; info->read_view; info++) + (info->ht->set_cursor_read_view)(0); + + if (error == NESTED_LOOP_CURSOR_LIMIT) + { + /* Fetch limit worked, possibly more rows are there */ + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + result->send_eof(); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + } + else + { + close(); + if (error == NESTED_LOOP_OK) + { + thd->server_status|= SERVER_STATUS_LAST_ROW_SENT; + result->send_eof(); + thd->server_status&= ~SERVER_STATUS_LAST_ROW_SENT; + } + else if (error != NESTED_LOOP_KILLED) + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + } + DBUG_VOID_RETURN; +} + + +void +Sensitive_cursor::close() +{ + THD *thd= join->thd; + DBUG_ENTER("Sensitive_cursor::close"); + + for (Engine_info *info= ht_info; info->read_view; info++) + { + (info->ht->close_cursor_read_view)(info->read_view); + info->read_view= 0; + info->ht= 0; + } + + thd->change_list= change_list; + { + /* + XXX: Another hack: we need to set THD state as if in a fetch to be + able to call stmt close. + */ + DBUG_ASSERT(lock || open_tables || derived_tables); + + TABLE *tmp_derived_tables= thd->derived_tables; + MYSQL_LOCK *tmp_lock= thd->lock; + + thd->open_tables= open_tables; + thd->derived_tables= derived_tables; + thd->lock= lock; + + /* Is expected to at least close tables and empty thd->change_list */ + stmt_arena->cleanup_stmt(); + + thd->open_tables= tmp_derived_tables; + thd->derived_tables= tmp_derived_tables; + thd->lock= tmp_lock; + } + thd->lock_info.n_cursors--; /* Decrease the number of active cursors */ + join= 0; + stmt_arena= 0; + free_items(); + change_list.empty(); + DBUG_VOID_RETURN; +} + + +Sensitive_cursor::~Sensitive_cursor() +{ + if (is_open()) + close(); +} + +/*************************************************************************** + Materialized_cursor +****************************************************************************/ + +Materialized_cursor::Materialized_cursor(select_result *result_arg, + TABLE *table_arg) + :Server_side_cursor(&table_arg->mem_root, result_arg), + table(table_arg), + fetch_limit(0), + fetch_count(0) +{ + fake_unit.init_query(); + fake_unit.thd= table->in_use; +} + + +int Materialized_cursor::open(JOIN *join __attribute__((unused))) +{ + THD *thd= fake_unit.thd; + int rc; + Query_arena backup_arena; + + thd->set_n_backup_active_arena(this, &backup_arena); + /* Create a list of fields and start sequential scan */ + rc= (table->fill_item_list(&item_list) || + result->prepare(item_list, &fake_unit) || + table->file->ha_rnd_init(TRUE)); + thd->restore_active_arena(this, &backup_arena); + if (rc == 0) + { + /* + Now send the result set metadata to the client. We need to + do it here, as in Select_materialize::send_fields the items + for column types are not yet created (send_fields requires + a list of items). The new types may differ from the original + ones sent at prepare if some of them were altered by MySQL + HEAP tables mechanism -- used when create_tmp_field_from_item + may alter the original column type. + + We can't simply supply SEND_EOF flag to send_fields, because + send_fields doesn't flush the network buffer. + */ + rc= result->send_fields(item_list, Protocol::SEND_NUM_ROWS); + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + result->send_eof(); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + } + return rc; +} + + +/* + Fetch up to the given number of rows from a materialized cursor. + + DESCRIPTION + Precondition: the cursor is open. + + If the cursor points after the last row, the fetch will automatically + close the cursor and not send any data (except the 'EOF' packet + with SERVER_STATUS_LAST_ROW_SENT). This is an extra round trip + and probably should be improved to return + SERVER_STATUS_LAST_ROW_SENT along with the last row. + + RETURN VALUE + none, in case of success the row is sent to the client, otherwise + an error message is set in THD +*/ + +void Materialized_cursor::fetch(ulong num_rows) +{ + THD *thd= table->in_use; + + int res= 0; + result->begin_dataset(); + for (fetch_limit+= num_rows; fetch_count < fetch_limit; fetch_count++) + { + if ((res= table->file->rnd_next(table->record[0]))) + break; + /* Send data only if the read was successful. */ + result->send_data(item_list); + } + + switch (res) { + case 0: + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + result->send_eof(); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + break; + case HA_ERR_END_OF_FILE: + thd->server_status|= SERVER_STATUS_LAST_ROW_SENT; + result->send_eof(); + thd->server_status&= ~SERVER_STATUS_LAST_ROW_SENT; + close(); + break; + default: + table->file->print_error(res, MYF(0)); + close(); + break; + } +} + + +void Materialized_cursor::close() +{ + /* Free item_list items */ + free_items(); + (void) table->file->ha_rnd_end(); + /* + We need to grab table->mem_root to prevent free_tmp_table from freeing: + the cursor object was allocated in this memory. + */ + main_mem_root= table->mem_root; + mem_root= &main_mem_root; + clear_alloc_root(&table->mem_root); + free_tmp_table(table->in_use, table); + table= 0; +} + + +Materialized_cursor::~Materialized_cursor() +{ + if (is_open()) + close(); +} + + +/*************************************************************************** + Select_materialize +****************************************************************************/ + +bool Select_materialize::send_fields(List<Item> &list, uint flags) +{ + DBUG_ASSERT(table == 0); + if (create_result_table(unit->thd, unit->get_unit_column_types(), + FALSE, thd->options | TMP_TABLE_ALL_COLUMNS, "")) + return TRUE; + return FALSE; +} + diff --git a/sql/sql_cursor.h b/sql/sql_cursor.h new file mode 100644 index 00000000000..6edd6b24b36 --- /dev/null +++ b/sql/sql_cursor.h @@ -0,0 +1,65 @@ +/* Copyright (C) 2005 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _sql_cursor_h_ +#define _sql_cursor_h_ + +#ifdef USE_PRAGMA_INTERFACE +#pragma interface /* gcc class interface */ +#endif + +/* + Declarations for implementation of server side cursors. Only + read-only non-scrollable cursors are currently implemented. +*/ + +/* + Server_side_cursor -- an interface for materialized and + sensitive (non-materialized) implementation of cursors. All + cursors are self-contained (created in their own memory root). + For that reason they must be deleted only using a pointer to + Server_side_cursor, not to its base class. +*/ + +class Server_side_cursor: protected Query_arena, public Sql_alloc +{ +protected: + /* Row destination used for fetch */ + select_result *result; +public: + Server_side_cursor(MEM_ROOT *mem_root_arg, select_result *result_arg) + :Query_arena(mem_root_arg, INITIALIZED), result(result_arg) + {} + + virtual bool is_open() const= 0; + + virtual int open(JOIN *top_level_join)= 0; + virtual void fetch(ulong num_rows)= 0; + virtual void close()= 0; + virtual ~Server_side_cursor(); + + static void operator delete(void *ptr, size_t size); +}; + + +int mysql_open_cursor(THD *thd, uint flags, + select_result *result, + Server_side_cursor **res); + +/* Possible values for flags */ + +enum { ANY_CURSOR= 1, ALWAYS_MATERIALIZED_CURSOR= 2 }; + +#endif /* _sql_cusor_h_ */ diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 035a0b22a6b..f95ed8b6fc9 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -19,6 +18,7 @@ #include "mysql_priv.h" #include <mysys_err.h> +#include "sp.h" #include <my_dir.h> #include <m_ctype.h> #ifdef __WIN__ @@ -35,6 +35,8 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, const char *path, uint level, TABLE_LIST **dropped_tables); +static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path); +static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error); /* Database options hash */ static HASH dboptions; static my_bool dboptions_init= 0; @@ -284,7 +286,7 @@ static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) } -/* +/* Load database options file load_db_opt() @@ -292,7 +294,6 @@ static bool write_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) create Where to store the read options DESCRIPTION - For now, only default-character-set is read. RETURN VALUES 0 File found @@ -310,73 +311,121 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) bzero((char*) create,sizeof(*create)); create->default_table_charset= thd->variables.collation_server; - + /* Check if options for this database are already in the hash */ if (!get_dbopt(path, create)) - DBUG_RETURN(0); - + DBUG_RETURN(0); + /* Otherwise, load options from the .opt file */ - if ((file=my_open(path, O_RDONLY | O_SHARE, MYF(0))) >= 0) - { - IO_CACHE cache; - init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0)); + if ((file=my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0) + goto err1; - while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0) + IO_CACHE cache; + if (init_io_cache(&cache, file, IO_SIZE, READ_CACHE, 0, 0, MYF(0))) + goto err2; + + while ((int) (nbytes= my_b_gets(&cache, (char*) buf, sizeof(buf))) > 0) + { + char *pos= buf+nbytes-1; + /* Remove end space and control characters */ + while (pos > buf && !my_isgraph(&my_charset_latin1, pos[-1])) + pos--; + *pos=0; + if ((pos= strchr(buf, '='))) { - char *pos= buf+nbytes-1; - /* Remove end space and control characters */ - while (pos > buf && !my_isgraph(&my_charset_latin1, pos[-1])) - pos--; - *pos=0; - if ((pos= strchr(buf, '='))) + if (!strncmp(buf,"default-character-set", (pos-buf))) + { + /* + Try character set name, and if it fails + try collation name, probably it's an old + 4.1.0 db.opt file, which didn't have + separate default-character-set and + default-collation commands. + */ + if (!(create->default_table_charset= + get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) && + !(create->default_table_charset= + get_charset_by_name(pos+1, MYF(0)))) + { + sql_print_error("Error while loading database options: '%s':",path); + sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1); + create->default_table_charset= default_charset_info; + } + } + else if (!strncmp(buf,"default-collation", (pos-buf))) { - if (!strncmp(buf,"default-character-set", (pos-buf))) - { - /* - Try character set name, and if it fails - try collation name, probably it's an old - 4.1.0 db.opt file, which didn't have - separate default-character-set and - default-collation commands. - */ - if (!(create->default_table_charset= - get_charset_by_csname(pos+1, MY_CS_PRIMARY, MYF(0))) && - !(create->default_table_charset= - get_charset_by_name(pos+1, MYF(0)))) - { - sql_print_error("Error while loading database options: '%s':",path); - sql_print_error(ER(ER_UNKNOWN_CHARACTER_SET),pos+1); - create->default_table_charset= default_charset_info; - } - } - else if (!strncmp(buf,"default-collation", (pos-buf))) - { - if (!(create->default_table_charset= get_charset_by_name(pos+1, - MYF(0)))) - { - sql_print_error("Error while loading database options: '%s':",path); - sql_print_error(ER(ER_UNKNOWN_COLLATION),pos+1); - create->default_table_charset= default_charset_info; - } - } + if (!(create->default_table_charset= get_charset_by_name(pos+1, + MYF(0)))) + { + sql_print_error("Error while loading database options: '%s':",path); + sql_print_error(ER(ER_UNKNOWN_COLLATION),pos+1); + create->default_table_charset= default_charset_info; + } } } - end_io_cache(&cache); - my_close(file,MYF(0)); - /* - Put the loaded value into the hash. - Note that another thread could've added the same - entry to the hash after we called get_dbopt(), - but it's not an error, as put_dbopt() takes this - possibility into account. - */ - error= put_dbopt(path, create); } + /* + Put the loaded value into the hash. + Note that another thread could've added the same + entry to the hash after we called get_dbopt(), + but it's not an error, as put_dbopt() takes this + possibility into account. + */ + error= put_dbopt(path, create); + + end_io_cache(&cache); +err2: + my_close(file,MYF(0)); +err1: DBUG_RETURN(error); } /* + Retrieve database options by name. Load database options file or fetch from + cache. + + SYNOPSIS + load_db_opt_by_name() + db_name Database name + db_create_info Where to store the database options + + DESCRIPTION + load_db_opt_by_name() is a shortcut for load_db_opt(). + + NOTE + Although load_db_opt_by_name() (and load_db_opt()) returns status of + the operation, it is useless usually and should be ignored. The problem + is that there are 1) system databases ("mysql") and 2) virtual + databases ("information_schema"), which do not contain options file. + So, load_db_opt[_by_name]() returns FALSE for these databases, but this + is not an error. + + load_db_opt[_by_name]() clears db_create_info structure in any case, so + even on failure it contains valid data. So, common use case is just + call load_db_opt[_by_name]() without checking return value and use + db_create_info right after that. + + RETURN VALUES (read NOTE!) + FALSE Success + TRUE Failed to retrieve options +*/ + +bool load_db_opt_by_name(THD *thd, const char *db_name, + HA_CREATE_INFO *db_create_info) +{ + char db_opt_path[FN_REFLEN]; + + strxnmov(db_opt_path, sizeof (db_opt_path) - 1, mysql_data_home, "/", + db_name, "/", MY_DB_OPT_FILE, NullS); + + unpack_filename(db_opt_path, db_opt_path); + + return load_db_opt(thd, db_opt_path, db_create_info); +} + + +/* Create a database SYNOPSIS @@ -395,13 +444,13 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) (The 'silent' flags turns off 1 and 3.) RETURN VALUES - 0 ok - -1 Error + FALSE ok + TRUE Error */ -int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, - bool silent) +bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, + bool silent) { char path[FN_REFLEN+16]; long result= 1; @@ -410,16 +459,34 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, uint create_options= create_info ? create_info->options : 0; uint path_len; DBUG_ENTER("mysql_create_db"); - - VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - /* do not create database if another thread is holding read lock */ + /* do not create 'information_schema' db */ + if (!my_strcasecmp(system_charset_info, db, information_schema_name.str)) + { + my_error(ER_DB_CREATE_EXISTS, MYF(0), db); + DBUG_RETURN(-1); + } + + /* + Do not create database if another thread is holding read lock. + Wait for global read lock before acquiring LOCK_mysql_create_db. + After wait_if_global_read_lock() we have protection against another + global read lock. If we would acquire LOCK_mysql_create_db first, + another thread could step in and get the global read lock before we + reach wait_if_global_read_lock(). If this thread tries the same as we + (admin a db), it would then go and wait on LOCK_mysql_create_db... + Furthermore wait_if_global_read_lock() checks if the current thread + has the global read lock and refuses the operation with + ER_CANT_UPDATE_WITH_READLOCK if applicable. + */ if (wait_if_global_read_lock(thd, 0, 1)) { error= -1; goto exit2; } + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); + /* Check directory */ strxmov(path, mysql_data_home, "/", db, NullS); path_len= unpack_dirname(path,path); // Convert if not unix @@ -429,7 +496,7 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, { if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS)) { - my_error(ER_DB_CREATE_EXISTS,MYF(0),db); + my_error(ER_DB_CREATE_EXISTS, MYF(0), db); error= -1; goto exit; } @@ -444,12 +511,12 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, { if (my_errno != ENOENT) { - my_error(EE_STAT, MYF(0),path,my_errno); + my_error(EE_STAT, MYF(0), path, my_errno); goto exit; } if (my_mkdir(path,0777,MYF(0)) < 0) { - my_error(ER_CANT_CREATE_DB,MYF(0),db,my_errno); + my_error(ER_CANT_CREATE_DB, MYF(0), db, my_errno); error= -1; goto exit; } @@ -492,7 +559,6 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, query= thd->query; query_length= thd->query_length; } - mysql_update_log.write(thd, query, query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, query, query_length, 0, @@ -518,34 +584,46 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, qinfo.db = db; qinfo.db_len = strlen(db); + /* These DDL methods and logging protected with LOCK_mysql_create_db */ mysql_bin_log.write(&qinfo); } send_ok(thd, result); } exit: + VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); start_waiting_global_read_lock(thd); exit2: - VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); DBUG_RETURN(error); } /* db-name is already validated when we come here */ -int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) +bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) { char path[FN_REFLEN+16]; long result=1; int error= 0; DBUG_ENTER("mysql_alter_db"); - VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - - /* do not alter database if another thread is holding read lock */ + /* + Do not alter database if another thread is holding read lock. + Wait for global read lock before acquiring LOCK_mysql_create_db. + After wait_if_global_read_lock() we have protection against another + global read lock. If we would acquire LOCK_mysql_create_db first, + another thread could step in and get the global read lock before we + reach wait_if_global_read_lock(). If this thread tries the same as we + (admin a db), it would then go and wait on LOCK_mysql_create_db... + Furthermore wait_if_global_read_lock() checks if the current thread + has the global read lock and refuses the operation with + ER_CANT_UPDATE_WITH_READLOCK if applicable. + */ if ((error=wait_if_global_read_lock(thd,0,1))) goto exit2; + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); + /* Check directory */ strxmov(path, mysql_data_home, "/", db, "/", MY_DB_OPT_FILE, NullS); fn_format(path, path, "", "", MYF(MY_UNPACK_FILENAME)); @@ -564,7 +642,6 @@ int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) thd->variables.collation_database= thd->db_charset; } - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0, @@ -579,15 +656,16 @@ int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) qinfo.db_len = strlen(db); thd->clear_error(); + /* These DDL methods and logging protected with LOCK_mysql_create_db */ mysql_bin_log.write(&qinfo); } send_ok(thd, result); exit: + VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); start_waiting_global_read_lock(thd); exit2: - VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); - DBUG_RETURN(error ? -1 : 0); /* -1 to delegate send_error() */ + DBUG_RETURN(error); } @@ -598,34 +676,46 @@ exit2: mysql_rm_db() thd Thread handle db Database name in the case given by user - It's already validated when we come here + It's already validated and set to lower case + (if needed) when we come here if_exists Don't give error if database doesn't exists silent Don't generate errors RETURN - 0 ok (Database dropped) - -1 Error generated + FALSE ok (Database dropped) + ERROR Error */ -int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) +bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) { long deleted=0; int error= 0; - char path[FN_REFLEN+16], tmp_db[NAME_LEN+1]; + char path[FN_REFLEN+16]; MY_DIR *dirp; uint length; TABLE_LIST* dropped_tables= 0; DBUG_ENTER("mysql_rm_db"); - VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); - - /* do not drop database if another thread is holding read lock */ + /* + Do not drop database if another thread is holding read lock. + Wait for global read lock before acquiring LOCK_mysql_create_db. + After wait_if_global_read_lock() we have protection against another + global read lock. If we would acquire LOCK_mysql_create_db first, + another thread could step in and get the global read lock before we + reach wait_if_global_read_lock(). If this thread tries the same as we + (admin a db), it would then go and wait on LOCK_mysql_create_db... + Furthermore wait_if_global_read_lock() checks if the current thread + has the global read lock and refuses the operation with + ER_CANT_UPDATE_WITH_READLOCK if applicable. + */ if (wait_if_global_read_lock(thd, 0, 1)) { error= -1; goto exit2; } + VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); + (void) sprintf(path,"%s/%s",mysql_data_home,db); length= unpack_dirname(path,path); // Convert if not unix strmov(path+length, MY_DB_OPT_FILE); // Append db option file name @@ -638,7 +728,7 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) if (!if_exists) { error= -1; - my_error(ER_DB_DROP_EXISTS,MYF(0),db); + my_error(ER_DB_DROP_EXISTS, MYF(0), db); goto exit; } else @@ -661,13 +751,6 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) error = 0; } } - if (lower_case_table_names) - { - /* Convert database to lower case */ - strmov(tmp_db, db); - my_casedn_str(files_charset_info, tmp_db); - db= tmp_db; - } if (!silent && deleted>=0) { const char *query; @@ -684,7 +767,6 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) query =thd->query; query_length= thd->query_length; } - mysql_update_log.write(thd, query, query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, query, query_length, 0, @@ -698,6 +780,7 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) qinfo.db_len = strlen(db); thd->clear_error(); + /* These DDL methods and logging protected with LOCK_mysql_create_db */ mysql_bin_log.write(&qinfo); } thd->server_status|= SERVER_STATUS_DB_DROPPED; @@ -716,63 +799,45 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) query_end= query + MAX_DROP_TABLE_Q_LEN; db_len= strlen(db); - for (tbl= dropped_tables; tbl; tbl= tbl->next) + for (tbl= dropped_tables; tbl; tbl= tbl->next_local) { uint tbl_name_len; /* 3 for the quotes and the comma*/ - tbl_name_len= strlen(tbl->real_name) + 3; + tbl_name_len= strlen(tbl->table_name) + 3; if (query_pos + tbl_name_len + 1 >= query_end) { + /* These DDL methods and logging protected with LOCK_mysql_create_db */ write_to_binlog(thd, query, query_pos -1 - query, db, db_len); query_pos= query_data_start; } *query_pos++ = '`'; - query_pos= strmov(query_pos,tbl->real_name); + query_pos= strmov(query_pos,tbl->table_name); *query_pos++ = '`'; *query_pos++ = ','; } if (query_pos != query_data_start) { + /* These DDL methods and logging protected with LOCK_mysql_create_db */ write_to_binlog(thd, query, query_pos -1 - query, db, db_len); } } exit: - start_waiting_global_read_lock(thd); + (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ /* - If this database was the client's selected database, we silently change the - client's selected database to nothing (to have an empty SELECT DATABASE() - in the future). For this we free() thd->db and set it to 0. But we don't do - free() for the slave thread. Indeed, doing a x_free() on it leads to nasty - problems (i.e. long painful debugging) because in this thread, thd->db is - the same as data_buf and db of the Query_log_event which is dropping the - database. So if you free() thd->db, you're freeing data_buf. You set - thd->db to 0 but not data_buf (thd->db and data_buf are two distinct - pointers which point to the same place). Then in ~Query_log_event(), we - have 'if (data_buf) free(data_buf)' data_buf is !=0 so this makes a - DOUBLE free(). - Side effects of this double free() are, randomly (depends on the machine), - when the slave is replicating a DROP DATABASE: - - garbage characters in the error message: - "Error 'Can't drop database 'test2'; database doesn't exist' on query - 'h4zI©'" - - segfault - - hang in "free(vio)" (yes!) in the I/O or SQL slave threads (so slave - server hangs at shutdown etc). + If this database was the client's selected database, we silently + change the client's selected database to nothing (to have an empty + SELECT DATABASE() in the future). For this we free() thd->db and set + it to 0. */ if (thd->db && !strcmp(thd->db, db)) - { - if (!(thd->slave_thread)) /* a slave thread will free it itself */ - x_free(thd->db); - thd->db= 0; - thd->db_length= 0; - } -exit2: + thd->set_db(NULL, 0); VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); - + start_waiting_global_read_lock(thd); +exit2: DBUG_RETURN(error); } @@ -810,9 +875,9 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, continue; /* Check if file is a raid directory */ - if ((my_isdigit(&my_charset_latin1, file->name[0]) || + if ((my_isdigit(system_charset_info, file->name[0]) || (file->name[0] >= 'a' && file->name[0] <= 'f')) && - (my_isdigit(&my_charset_latin1, file->name[1]) || + (my_isdigit(system_charset_info, file->name[1]) || (file->name[1] >= 'a' && file->name[1] <= 'f')) && !file->name[2] && !level) { @@ -838,6 +903,24 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, found_other_files++; continue; } + else if (file->name[0] == 'a' && file->name[1] == 'r' && + file->name[2] == 'c' && file->name[3] == '\0') + { + /* .frm archive */ + char newpath[FN_REFLEN]; + MY_DIR *new_dirp; + strxmov(newpath, org_path, "/", "arc", NullS); + (void) unpack_filename(newpath, newpath); + if ((new_dirp = my_dir(newpath, MYF(MY_DONT_SORT)))) + { + DBUG_PRINT("my",("Archive subdir found: %s", newpath)); + if ((mysql_rm_arc_files(thd, new_dirp, newpath)) < 0) + goto err; + continue; + } + found_other_files++; + continue; + } extension= fn_ext(file->name); if (find_type(extension, &deletable_extentions,1+2) <= 0) { @@ -856,11 +939,11 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, if (!table_list) goto err; table_list->db= (char*) (table_list+1); - strmov(table_list->real_name= strmov(table_list->db,db)+1, file->name); - table_list->alias= table_list->real_name; // If lower_case_table_names=2 + strmov(table_list->table_name= strmov(table_list->db,db)+1, file->name); + table_list->alias= table_list->table_name; // If lower_case_table_names=2 /* Link into list */ (*tot_list_next)= table_list; - tot_list_next= &table_list->next; + tot_list_next= &table_list->next_local; deleted++; } else @@ -900,44 +983,141 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, } else { - char tmp_path[FN_REFLEN], *pos; - char *path= tmp_path; - unpack_filename(tmp_path,org_path); + /* Don't give errors if we can't delete 'RAID' directory */ + if (rm_dir_w_symlink(org_path, level == 0)) + DBUG_RETURN(-1); + } + + DBUG_RETURN(deleted); + +err: + my_dirend(dirp); + DBUG_RETURN(-1); +} + + +/* + Remove directory with symlink + + SYNOPSIS + rm_dir_w_symlink() + org_path path of derictory + send_error send errors + RETURN + 0 OK + 1 ERROR +*/ + +static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) +{ + char tmp_path[FN_REFLEN], *pos; + char *path= tmp_path; + DBUG_ENTER("rm_dir_w_symlink"); + unpack_filename(tmp_path, org_path); #ifdef HAVE_READLINK - int error; - - /* Remove end FN_LIBCHAR as this causes problem on Linux in readlink */ - pos=strend(path); - if (pos > path && pos[-1] == FN_LIBCHAR) - *--pos=0; + int error; + char tmp2_path[FN_REFLEN]; - if ((error=my_readlink(filePath, path, MYF(MY_WME))) < 0) - DBUG_RETURN(-1); - if (!error) + /* Remove end FN_LIBCHAR as this causes problem on Linux in readlink */ + pos= strend(path); + if (pos > path && pos[-1] == FN_LIBCHAR) + *--pos=0; + + if ((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0) + DBUG_RETURN(1); + if (!error) + { + if (my_delete(path, MYF(send_error ? MY_WME : 0))) { - if (my_delete(path,MYF(!level ? MY_WME : 0))) - { - /* Don't give errors if we can't delete 'RAID' directory */ - if (level) - DBUG_RETURN(deleted); - DBUG_RETURN(-1); - } - /* Delete directory symbolic link pointed at */ - path= filePath; + DBUG_RETURN(send_error); } + /* Delete directory symbolic link pointed at */ + path= tmp2_path; + } #endif - /* Remove last FN_LIBCHAR to not cause a problem on OS/2 */ - pos=strend(path); + /* Remove last FN_LIBCHAR to not cause a problem on OS/2 */ + pos= strend(path); - if (pos > path && pos[-1] == FN_LIBCHAR) - *--pos=0; - /* Don't give errors if we can't delete 'RAID' directory */ - if (rmdir(path) < 0 && !level) + if (pos > path && pos[-1] == FN_LIBCHAR) + *--pos=0; + if (rmdir(path) < 0 && send_error) + { + my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* + Remove .frm archives from directory + + SYNOPSIS + thd thread handler + dirp list of files in archive directory + db data base name + org_path path of archive directory + + RETURN + > 0 number of removed files + -1 error +*/ +static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, + const char *org_path) +{ + long deleted= 0; + ulong found_other_files= 0; + char filePath[FN_REFLEN]; + DBUG_ENTER("mysql_rm_arc_files"); + DBUG_PRINT("enter", ("path: %s", org_path)); + + for (uint idx=0 ; + idx < (uint) dirp->number_off_files && !thd->killed ; + idx++) + { + FILEINFO *file=dirp->dir_entry+idx; + char *extension, *revision; + DBUG_PRINT("info",("Examining: %s", file->name)); + + /* skiping . and .. */ + if (file->name[0] == '.' && (!file->name[1] || + (file->name[1] == '.' && !file->name[2]))) + continue; + + extension= fn_ext(file->name); + if (extension[0] != '.' || + extension[1] != 'f' || extension[2] != 'r' || + extension[3] != 'm' || extension[4] != '-') { - my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); - DBUG_RETURN(-1); + found_other_files++; + continue; + } + revision= extension+5; + while (*revision && my_isdigit(system_charset_info, *revision)) + revision++; + if (*revision) + { + found_other_files++; + continue; + } + strxmov(filePath, org_path, "/", file->name, NullS); + if (my_delete_with_symlink(filePath,MYF(MY_WME))) + { + goto err; } } + if (thd->killed) + goto err; + + my_dirend(dirp); + + /* + If the directory is a symbolic link, remove the link first, then + remove the directory the symbolic link pointed at + */ + if (!found_other_files && + rm_dir_w_symlink(org_path, 0)) + DBUG_RETURN(-1); DBUG_RETURN(deleted); err: @@ -947,94 +1127,184 @@ err: /* - Change default database. + Change the current database. SYNOPSIS mysql_change_db() - thd Thread handler - name Databasename + thd thread handle + name database name + no_access_check if TRUE, don't do access check. In this + case name may be "" DESCRIPTION - Becasue the database name may have been given directly from the - communication packet (in case of 'connect' or 'COM_INIT_DB') - we have to do end space removal in this function. + Check that the database name corresponds to a valid and + existent database, check access rights (unless called with + no_access_check), and set the current database. This function + is called to change the current database upon user request + (COM_CHANGE_DB command) or temporarily, to execute a stored + routine. NOTES - Do as little as possible in this function, as it is not called for the - replication slave SQL thread (for that thread, setting of thd->db is done - in ::exec_event() methods of log_event.cc). + This function is not the only way to switch the database that + is currently employed. When the replication slave thread + switches the database before executing a query, it calls + thd->set_db directly. However, if the query, in turn, uses + a stored routine, the stored routine will use this function, + even if it's run on the slave. + + This function allocates the name of the database on the system + heap: this is necessary to be able to uniformly change the + database from any module of the server. Up to 5.0 different + modules were using different memory to store the name of the + database, and this led to memory corruption: a stack pointer + set by Stored Procedures was used by replication after the + stack address was long gone. + + This function does not send anything, including error + messages, to the client. If that should be sent to the client, + call net_send_error after this function. RETURN VALUES - 0 ok + 0 OK 1 error */ -bool mysql_change_db(THD *thd, const char *name) +bool mysql_change_db(THD *thd, const char *name, bool no_access_check) { - int length, db_length; - char *dbname=my_strdup((char*) name,MYF(MY_WME)); - char path[FN_REFLEN]; - HA_CREATE_INFO create; + int db_length; + char *db_name; + bool system_db= 0; #ifndef NO_EMBEDDED_ACCESS_CHECKS ulong db_access; + Security_context *sctx= thd->security_ctx; + LINT_INIT(db_access); #endif DBUG_ENTER("mysql_change_db"); + DBUG_PRINT("enter",("name: '%s'",name)); - if (!dbname || !(db_length= strlen(dbname))) + if (name == NULL || name[0] == '\0' && no_access_check == FALSE) { - x_free(dbname); /* purecov: inspected */ - send_error(thd,ER_NO_DB_ERROR); /* purecov: inspected */ + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); DBUG_RETURN(1); /* purecov: inspected */ } - if (check_db_name(dbname)) + else if (name[0] == '\0') { - net_printf(thd, ER_WRONG_DB_NAME, dbname); - x_free(dbname); + /* Called from SP to restore the original database, which was NULL */ + DBUG_ASSERT(no_access_check); + system_db= 1; + db_name= NULL; + db_length= 0; + goto end; + } + /* + Now we need to make a copy because check_db_name requires a + non-constant argument. TODO: fix check_db_name. + */ + if ((db_name= my_strdup(name, MYF(MY_WME))) == NULL) + DBUG_RETURN(1); /* the error is set */ + db_length= strlen(db_name); + if (check_db_name(db_name)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), db_name); + my_free(db_name, MYF(0)); DBUG_RETURN(1); } - DBUG_PRINT("info",("Use database: %s", dbname)); + DBUG_PRINT("info",("Use database: %s", db_name)); + if (!my_strcasecmp(system_charset_info, db_name, information_schema_name.str)) + { + system_db= 1; #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (test_all_bits(thd->master_access,DB_ACLS)) - db_access=DB_ACLS; - else - db_access= (acl_get(thd->host,thd->ip, thd->priv_user,dbname,0) | - thd->master_access); - if (!(db_access & DB_ACLS) && (!grant_option || check_grant_db(thd,dbname))) + db_access= SELECT_ACL; +#endif + goto end; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (!no_access_check) { - net_printf(thd,ER_DBACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - dbname); - mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), - thd->priv_user, - thd->priv_host, - dbname); - my_free(dbname,MYF(0)); - DBUG_RETURN(1); + if (test_all_bits(sctx->master_access, DB_ACLS)) + db_access=DB_ACLS; + else + db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name, 0) | + sctx->master_access); + if (!(db_access & DB_ACLS) && (!grant_option || + check_grant_db(thd,db_name))) + { + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + sctx->priv_user, + sctx->priv_host, + db_name); + mysql_log.write(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), + sctx->priv_user, sctx->priv_host, db_name); + my_free(db_name, MYF(0)); + DBUG_RETURN(1); + } } #endif - (void) sprintf(path,"%s/%s",mysql_data_home,dbname); - length=unpack_dirname(path,path); // Convert if not unix - if (length && path[length-1] == FN_LIBCHAR) - path[length-1]=0; // remove ending '\' - if (my_access(path,F_OK)) + + if (check_db_dir_existence(db_name)) { - net_printf(thd,ER_BAD_DB_ERROR,dbname); - my_free(dbname,MYF(0)); + my_error(ER_BAD_DB_ERROR, MYF(0), db_name); + my_free(db_name, MYF(0)); DBUG_RETURN(1); } - send_ok(thd); + +end: x_free(thd->db); - thd->db=dbname; // THD::~THD will free this - thd->db_length=db_length; + DBUG_ASSERT(db_name == NULL || db_name[0] != '\0'); + thd->reset_db(db_name, db_length); // THD::~THD will free this #ifndef NO_EMBEDDED_ACCESS_CHECKS - thd->db_access=db_access; + if (!no_access_check) + sctx->db_access= db_access; #endif - strmov(path+unpack_dirname(path,path), MY_DB_OPT_FILE); - load_db_opt(thd, path, &create); - thd->db_charset= create.default_table_charset ? - create.default_table_charset : - thd->variables.collation_server; - thd->variables.collation_database= thd->db_charset; + if (system_db) + { + thd->db_charset= system_charset_info; + thd->variables.collation_database= system_charset_info; + } + else + { + HA_CREATE_INFO create; + + load_db_opt_by_name(thd, db_name, &create); + + thd->db_charset= create.default_table_charset ? + create.default_table_charset : + thd->variables.collation_server; + thd->variables.collation_database= thd->db_charset; + } DBUG_RETURN(0); } + + +/* + Check if there is directory for the database name. + + SYNOPSIS + check_db_dir_existence() + db_name database name + + RETURN VALUES + FALSE There is directory for the specified database name. + TRUE The directory does not exist. +*/ + +bool check_db_dir_existence(const char *db_name) +{ + char db_dir_path[FN_REFLEN]; + uint db_dir_path_len; + + strxnmov(db_dir_path, sizeof (db_dir_path) - 1, mysql_data_home, "/", + db_name, NullS); + + db_dir_path_len= unpack_dirname(db_dir_path, db_dir_path); + + /* Remove trailing '/' or '\' if exists. */ + + if (db_dir_path_len && db_dir_path[db_dir_path_len - 1] == FN_LIBCHAR) + db_dir_path[db_dir_path_len - 1]= 0; + + /* Check access. */ + + return my_access(db_dir_path, F_OK); +} diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index b84b2f7eef4..e653324d9bf 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -14,35 +13,41 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - /* Delete of records and truncate of tables. Multi-table deletes were introduced by Monty and Sinisa */ - - #include "mysql_priv.h" #include "ha_innodb.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" -int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, - SQL_LIST *order, ha_rows limit, ulong options) +bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, + SQL_LIST *order, ha_rows limit, ulonglong options, + bool reset_auto_increment) { int error; TABLE *table; SQL_SELECT *select=0; READ_RECORD info; - bool using_limit=limit != HA_POS_ERROR; - bool transactional_table, log_delayed, safe_update, const_cond; + bool using_limit=limit != HA_POS_ERROR; + bool transactional_table, safe_update, const_cond; ha_rows deleted; uint usable_index= MAX_KEY; + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_delete"); - if ((open_and_lock_tables(thd, table_list))) - DBUG_RETURN(-1); - table= table_list->table; + if (open_and_lock_tables(thd, table_list)) + DBUG_RETURN(TRUE); + if (!(table= table_list->table)) + { + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + table_list->view_db.str, table_list->view_name.str); + DBUG_RETURN(TRUE); + } error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); if (error) { @@ -52,22 +57,53 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, thd->proc_info="init"; table->map=1; - if ((error= mysql_prepare_delete(thd, table_list, &conds))) - DBUG_RETURN(error); + if (mysql_prepare_delete(thd, table_list, &conds)) + DBUG_RETURN(TRUE); + + /* check ORDER BY even if it can be ignored */ + if (order && order->elements) + { + TABLE_LIST tables; + List<Item> fields; + List<Item> all_fields; + + bzero((char*) &tables,sizeof(tables)); + tables.table = table; + tables.alias = table_list->alias; + + if (select_lex->setup_ref_array(thd, order->elements) || + setup_order(thd, select_lex->ref_pointer_array, &tables, + fields, all_fields, (ORDER*) order->first)) + { + delete select; + free_underlaid_joins(thd, &thd->lex->select_lex); + DBUG_RETURN(TRUE); + } + } const_cond= (!conds || conds->const_item()); safe_update=test(thd->options & OPTION_SAFE_UPDATES); if (safe_update && const_cond) { - send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); - DBUG_RETURN(1); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); + DBUG_RETURN(TRUE); } - thd->lex->select_lex.no_error= thd->lex->ignore; + select_lex->no_error= thd->lex->ignore; - /* Test if the user wants to delete all rows */ + /* + Test if the user wants to delete all rows and deletion doesn't have + any side-effects (because of triggers), so we can use optimized + handler::delete_all_rows() method. + We implement fast TRUNCATE for InnoDB even if triggers are present. + TRUNCATE ignores triggers. + */ if (!using_limit && const_cond && (!conds || conds->val_int()) && - !(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE))) + !(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) && + (thd->lex->sql_command == SQLCOM_TRUNCATE || + !(table->triggers && table->triggers->has_delete_triggers())) + ) { deleted= table->file->records; if (!(error=table->file->delete_all_rows())) @@ -94,14 +130,22 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, table->used_keys.clear_all(); table->quick_keys.clear_all(); // Can't use 'only index' - select=make_select(table,0,0,conds,&error); + select=make_select(table, 0, 0, conds, 0, &error); if (error) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); if ((select && select->check_quick(thd, safe_update, limit)) || !limit) { delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); + thd->row_count_func= 0; send_ok(thd,0L); + + /* + We don't need to call reset_auto_increment in this case, because + mysql_truncate always gives a NULL conds argument, hence we never + get here. + */ + DBUG_RETURN(0); // Nothing to delete } @@ -112,9 +156,10 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (safe_update && !using_limit) { delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); - send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); - DBUG_RETURN(1); + free_underlaid_joins(thd, select_lex); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); + DBUG_RETURN(TRUE); } } if (options & OPTION_QUICK) @@ -122,25 +167,9 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (order && order->elements) { - uint length; + uint length= 0; SORT_FIELD *sortorder; - TABLE_LIST tables; - List<Item> fields; - List<Item> all_fields; ha_rows examined_rows; - - bzero((char*) &tables,sizeof(tables)); - tables.table = table; - tables.alias = table_list->alias; - - if (thd->lex->select_lex.setup_ref_array(thd, order->elements) || - setup_order(thd, thd->lex->select_lex.ref_pointer_array, &tables, - fields, all_fields, (ORDER*) order->first)) - { - delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(-1); // This will force out message - } if ((!select || table->quick_keys.is_clear_all()) && limit != HA_POS_ERROR) usable_index= get_index_for_order(table, (ORDER*)(order->first), limit); @@ -150,42 +179,71 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, table->sort.io_cache= (IO_CACHE *) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL)); - if ( !(sortorder=make_unireg_sortorder((ORDER*) order->first, &length)) || + if (!(sortorder= make_unireg_sortorder((ORDER*) order->first, + &length, NULL)) || (table->sort.found_records = filesort(thd, table, sortorder, length, - select, HA_POS_ERROR, - &examined_rows)) + select, HA_POS_ERROR, + &examined_rows)) == HA_POS_ERROR) { delete select; free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(-1); // This will force out message + DBUG_RETURN(TRUE); } /* Filesort has already found and selected the rows we want to delete, so we don't need the where clause */ delete select; + free_underlaid_joins(thd, select_lex); select= 0; } } + /* If quick select is used, initialize it before retrieving rows. */ + if (select && select->quick && select->quick->reset()) + { + delete select; + free_underlaid_joins(thd, select_lex); + DBUG_RETURN(TRUE); + } if (usable_index==MAX_KEY) init_read_record(&info,thd,table,select,1,1); else init_read_record_idx(&info, thd, table, 1, usable_index); deleted=0L; - init_ftfuncs(thd, &thd->lex->select_lex, 1); + init_ftfuncs(thd, select_lex, 1); thd->proc_info="updating"; + + if (table->triggers) + table->triggers->mark_fields_used(thd, TRG_EVENT_DELETE); + while (!(error=info.read_record(&info)) && !thd->killed && !thd->net.report_error) { // thd->net.report_error is tested to disallow delete row on error if (!(select && select->skip_record())&& !thd->net.report_error ) { + + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE, FALSE)) + { + error= 1; + break; + } + if (!(error=table->file->delete_row(table->record[0]))) { deleted++; + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_AFTER, FALSE)) + { + error= 1; + break; + } if (!--limit && using_limit) { error= -1; @@ -218,6 +276,21 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, if (options & OPTION_QUICK) (void) table->file->extra(HA_EXTRA_NORMAL); + if (reset_auto_increment && (error < 0)) + { + /* + We're really doing a truncate and need to reset the table's + auto-increment counter. + */ + int error2= table->file->reset_auto_increment(0); + + if (error2 && (error2 != HA_ERR_WRONG_COMMAND)) + { + table->file->print_error(error2, MYF(0)); + error= 1; + } + } + cleanup: /* Invalidate the table in the query cache if something changed. This must @@ -230,31 +303,22 @@ cleanup: delete select; transactional_table= table->file->has_transactions(); - log_delayed= (transactional_table || table->tmp_table); - /* - We write to the binary log even if we deleted no row, because maybe the - user is using this command to ensure that a table is clean on master *and - on slave*. Think of the case of a user having played separately with the - master's table and slave's table and wanting to take a fresh identical - start now. - error < 0 means "really no error". error <= 0 means "maybe some error". - */ - if ((deleted || (error < 0)) && (error <= 0 || !transactional_table)) + /* See similar binlogging code in sql_update.cc, for comments */ + if ((error < 0) || (deleted && !transactional_table)) { - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { - if (error <= 0) + if (error < 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed, FALSE); + transactional_table, FALSE); if (mysql_bin_log.write(&qinfo) && transactional_table) error=1; } - if (!log_delayed) + if (!transactional_table) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); if (transactional_table) { if (ha_autocommit_or_rollback(thd,error >= 0)) @@ -266,15 +330,13 @@ cleanup: mysql_unlock_tables(thd, thd->lock); thd->lock=0; } - if ((error >= 0 || thd->net.report_error) && - (!thd->lex->ignore || thd->is_fatal_error)) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN: 0); - else + if (error < 0 || (thd->lex->ignore && !thd->is_fatal_error)) { + thd->row_count_func= deleted; send_ok(thd,deleted); - DBUG_PRINT("info",("%d records deleted",deleted)); + DBUG_PRINT("info",("%ld records deleted",(long) deleted)); } - DBUG_RETURN(0); + DBUG_RETURN(error >= 0 || thd->net.report_error); } @@ -284,31 +346,49 @@ cleanup: SYNOPSIS mysql_prepare_delete() thd - thread handler - table_list - global table list + table_list - global/local table list conds - conditions RETURN VALUE - 0 - OK - 1 - error (message is sent to user) - -1 - error (message is not sent to user) + FALSE OK + TRUE error */ -int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) +bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) { - TABLE_LIST *delete_table_list= ((TABLE_LIST*) thd->lex-> - select_lex.table_list.first); + Item *fake_conds= 0; + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_delete"); - - thd->allow_sum_func= 0; - if (setup_conds(thd, delete_table_list, conds) || - setup_ftfuncs(&thd->lex->select_lex)) - DBUG_RETURN(-1); - if (find_real_table_in_list(table_list->next, - table_list->db, table_list->real_name)) + List<Item> all_fields; + + thd->lex->allow_sum_func= 0; + if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context, + &thd->lex->select_lex.top_join_list, + table_list, conds, + &select_lex->leaf_tables, FALSE, + DELETE_ACL, SELECT_ACL) || + setup_conds(thd, table_list, select_lex->leaf_tables, conds) || + setup_ftfuncs(select_lex)) + DBUG_RETURN(TRUE); + if (!table_list->updatable || check_key_in_view(thd, table_list)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); - DBUG_RETURN(-1); + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE"); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + { + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0))) + { + update_non_unique_table_error(table_list, "DELETE", duplicate); + DBUG_RETURN(TRUE); + } + } + + if (select_lex->inner_refs_list.elements && + fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array)) + DBUG_RETURN(-1); + + select_lex->fix_prepare_information(thd, conds, &fake_conds); + DBUG_RETURN(FALSE); } @@ -318,19 +398,99 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) #define MEM_STRIP_BUF_SIZE current_thd->variables.sortbuff_size -extern "C" int refposcmp2(void* arg, const void *a,const void *b) +extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b) { - /* arg is a pointer to file->ref_length */ - return memcmp(a,b, *(int*) arg); + handler *file= (handler*)arg; + return file->cmp_ref((const byte*)a, (const byte*)b); } -multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt, - uint num_of_tables_arg) - : delete_tables(dt), thd(thd_arg), deleted(0), found(0), +/* + make delete specific preparation and checks after opening tables + + SYNOPSIS + mysql_multi_delete_prepare() + thd thread handler + + RETURN + FALSE OK + TRUE Error +*/ + +bool mysql_multi_delete_prepare(THD *thd) +{ + LEX *lex= thd->lex; + TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxiliary_table_list.first; + TABLE_LIST *target_tbl; + DBUG_ENTER("mysql_multi_delete_prepare"); + + /* + setup_tables() need for VIEWs. JOIN::prepare() will not do it second + time. + + lex->query_tables also point on local list of DELETE SELECT_LEX + */ + if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context, + &thd->lex->select_lex.top_join_list, + lex->query_tables, &lex->select_lex.where, + &lex->select_lex.leaf_tables, FALSE, + DELETE_ACL, SELECT_ACL)) + DBUG_RETURN(TRUE); + + + /* + Multi-delete can't be constructed over-union => we always have + single SELECT on top and have to check underlying SELECTs of it + */ + lex->select_lex.exclude_from_table_unique_test= TRUE; + /* Fix tables-to-be-deleted-from list to point at opened tables */ + for (target_tbl= (TABLE_LIST*) aux_tables; + target_tbl; + target_tbl= target_tbl->next_local) + { + if (!(target_tbl->table= target_tbl->correspondent_table->table)) + { + DBUG_ASSERT(target_tbl->correspondent_table->view && + target_tbl->correspondent_table->merge_underlying_list && + target_tbl->correspondent_table->merge_underlying_list-> + next_local); + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + target_tbl->correspondent_table->view_db.str, + target_tbl->correspondent_table->view_name.str); + DBUG_RETURN(TRUE); + } + + if (!target_tbl->correspondent_table->updatable || + check_key_in_view(thd, target_tbl->correspondent_table)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), + target_tbl->table_name, "DELETE"); + DBUG_RETURN(TRUE); + } + /* + Check that table from which we delete is not used somewhere + inside subqueries/view. + */ + { + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, target_tbl->correspondent_table, + lex->query_tables, 0))) + { + update_non_unique_table_error(target_tbl->correspondent_table, + "DELETE", duplicate); + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} + + +multi_delete::multi_delete(TABLE_LIST *dt, uint num_of_tables_arg) + : delete_tables(dt), deleted(0), found(0), num_of_tables(num_of_tables_arg), error(0), - do_delete(0), transactional_tables(0), log_delayed(0), normal_tables(0) + do_delete(0), transactional_tables(0), normal_tables(0) { - tempfiles = (Unique **) sql_calloc(sizeof(Unique *) * (num_of_tables-1)); + tempfiles= (Unique **) sql_calloc(sizeof(Unique *) * num_of_tables); } @@ -356,10 +516,11 @@ multi_delete::initialize_tables(JOIN *join) DBUG_RETURN(1); table_map tables_to_delete_from=0; - for (walk= delete_tables ; walk ; walk=walk->next) + for (walk= delete_tables; walk; walk= walk->next_local) tables_to_delete_from|= walk->table->map; walk= delete_tables; + delete_while_scanning= 1; for (JOIN_TAB *tab=join->join_tab, *end=join->join_tab+join->tables; tab < end; tab++) @@ -368,27 +529,42 @@ multi_delete::initialize_tables(JOIN *join) { /* We are going to delete from this table */ TABLE *tbl=walk->table=tab->table; - walk=walk->next; + walk= walk->next_local; /* Don't use KEYREAD optimization on this table */ tbl->no_keyread=1; /* Don't use record cache */ tbl->no_cache= 1; tbl->used_keys.clear_all(); if (tbl->file->has_transactions()) - log_delayed= transactional_tables= 1; - else if (tbl->tmp_table != NO_TMP_TABLE) - log_delayed= 1; + transactional_tables= 1; else normal_tables= 1; + if (tbl->triggers) + tbl->triggers->mark_fields_used(thd, TRG_EVENT_DELETE); + } + else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) && + walk == delete_tables) + { + /* + We are not deleting from the table we are scanning. In this + case send_data() shouldn't delete any rows a we may touch + the rows in the deleted table many times + */ + delete_while_scanning= 0; } } walk= delete_tables; tempfiles_ptr= tempfiles; - for (walk=walk->next ; walk ; walk=walk->next) + if (delete_while_scanning) + { + table_being_deleted= delete_tables; + walk= walk->next_local; + } + for (;walk ;walk= walk->next_local) { TABLE *table=walk->table; - *tempfiles_ptr++= new Unique (refposcmp2, - (void *) &table->file->ref_length, + *tempfiles_ptr++= new Unique (refpos_order_cmp, + (void *) table->file, table->file->ref_length, MEM_STRIP_BUF_SIZE); } @@ -399,16 +575,16 @@ multi_delete::initialize_tables(JOIN *join) multi_delete::~multi_delete() { - for (table_being_deleted=delete_tables ; - table_being_deleted ; - table_being_deleted=table_being_deleted->next) + for (table_being_deleted= delete_tables; + table_being_deleted; + table_being_deleted= table_being_deleted->next_local) { - TABLE *t=table_being_deleted->table; - free_io_cache(t); // Alloced by unique - t->no_keyread=0; + TABLE *table= table_being_deleted->table; + free_io_cache(table); // Alloced by unique + table->no_keyread=0; } - for (uint counter= 0; counter < num_of_tables-1; counter++) + for (uint counter= 0; counter < num_of_tables; counter++) { if (tempfiles[counter]) delete tempfiles[counter]; @@ -418,14 +594,15 @@ multi_delete::~multi_delete() bool multi_delete::send_data(List<Item> &values) { - int secure_counter= -1; + int secure_counter= delete_while_scanning ? -1 : 0; + TABLE_LIST *del_table; DBUG_ENTER("multi_delete::send_data"); - for (table_being_deleted=delete_tables ; - table_being_deleted ; - table_being_deleted=table_being_deleted->next, secure_counter++) + for (del_table= delete_tables; + del_table; + del_table= del_table->next_local, secure_counter++) { - TABLE *table=table_being_deleted->table; + TABLE *table= del_table->table; /* Check if we are using outer join and we didn't find the row */ if (table->status & (STATUS_NULL_ROW | STATUS_DELETED)) @@ -436,11 +613,22 @@ bool multi_delete::send_data(List<Item> &values) if (secure_counter < 0) { - /* If this is the table we are scanning */ + /* We are scanning the current table */ + DBUG_ASSERT(del_table == table_being_deleted); + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE, FALSE)) + DBUG_RETURN(1); table->status|= STATUS_DELETED; if (!(error=table->file->delete_row(table->record[0]))) + { deleted++; - else if (!table_being_deleted->next || table_being_deleted->table->file->has_transactions()) + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_AFTER, FALSE)) + DBUG_RETURN(1); + } + else { table->file->print_error(error,MYF(0)); DBUG_RETURN(1); @@ -451,7 +639,7 @@ bool multi_delete::send_data(List<Item> &values) error=tempfiles[secure_counter]->unique_add((char*) table->file->ref); if (error) { - error=-1; + error= 1; // Fatal error DBUG_RETURN(1); } } @@ -465,7 +653,7 @@ void multi_delete::send_error(uint errcode,const char *err) DBUG_ENTER("multi_delete::send_error"); /* First send error what ever it is ... */ - ::send_error(thd,errcode,err); + my_message(errcode, err, MYF(0)); /* If nothing deleted return */ if (!deleted) @@ -474,22 +662,24 @@ void multi_delete::send_error(uint errcode,const char *err) /* Something already deleted so we have to invalidate cache */ query_cache_invalidate3(thd, delete_tables, 1); - /* Below can happen when thread is killed early ... */ - if (!table_being_deleted) - table_being_deleted=delete_tables; - /* If rows from the first table only has been deleted and it is transactional, just do rollback. The same if all tables are transactional, regardless of where we are. In all other cases do attempt deletes ... */ - if ((table_being_deleted->table->file->has_transactions() && - table_being_deleted == delete_tables) || !normal_tables) + if ((table_being_deleted == delete_tables && + table_being_deleted->table->file->has_transactions()) || + !normal_tables) ha_rollback_stmt(thd); else if (do_delete) { - VOID(do_deletes(1)); + /* + We have to execute the recorded do_deletes() and write info into the + error log + */ + error= 1; + send_eof(); } DBUG_VOID_RETURN; } @@ -502,28 +692,21 @@ void multi_delete::send_error(uint errcode,const char *err) 1 error */ -int multi_delete::do_deletes(bool from_send_error) +int multi_delete::do_deletes() { int local_error= 0, counter= 0; DBUG_ENTER("do_deletes"); + DBUG_ASSERT(do_delete); - if (from_send_error) - { - /* Found out table number for 'table_being_deleted*/ - for (TABLE_LIST *aux=delete_tables; - aux != table_being_deleted; - aux=aux->next) - counter++; - } - else - table_being_deleted = delete_tables; - - do_delete= 0; + do_delete= 0; // Mark called if (!found) DBUG_RETURN(0); - for (table_being_deleted=table_being_deleted->next; - table_being_deleted ; - table_being_deleted=table_being_deleted->next, counter++) + + table_being_deleted= (delete_while_scanning ? delete_tables->next_local : + delete_tables); + + for (; table_being_deleted; + table_being_deleted= table_being_deleted->next_local, counter++) { TABLE *table = table_being_deleted->table; if (tempfiles[counter]->get(table)) @@ -541,12 +724,26 @@ int multi_delete::do_deletes(bool from_send_error) info.ignore_not_found_rows= 1; while (!(local_error=info.read_record(&info)) && !thd->killed) { + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE, FALSE)) + { + local_error= 1; + break; + } if ((local_error=table->file->delete_row(table->record[0]))) { table->file->print_error(local_error,MYF(0)); break; } deleted++; + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_AFTER, FALSE)) + { + local_error= 1; + break; + } } end_read_record(&info); if (thd->killed && !local_error) @@ -570,7 +767,10 @@ bool multi_delete::send_eof() thd->proc_info="deleting from reference tables"; /* Does deletes for the last n - 1 tables, returns 0 if ok */ - int local_error= do_deletes(0); // returns 0 if success + int local_error= do_deletes(); // returns 0 if success + + /* compute a total error to know if something failed */ + local_error= local_error || error; /* reset used flags */ thd->proc_info="end"; @@ -580,40 +780,34 @@ bool multi_delete::send_eof() ha_autocommit_... */ if (deleted) + { query_cache_invalidate3(thd, delete_tables, 1); + } - /* - Write the SQL statement to the binlog if we deleted - rows and we succeeded, or also in an error case when there - was a non-transaction-safe table involved, since - modifications in it cannot be rolled back. - Note that if we deleted nothing we don't write to the binlog (TODO: - fix this). - */ - if (deleted && (error <= 0 || normal_tables)) + if ((local_error == 0) || (deleted && normal_tables)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { - if (error <= 0) + if (local_error == 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed, FALSE); + transactional_tables, FALSE); if (mysql_bin_log.write(&qinfo) && !normal_tables) local_error=1; // Log write failed: roll back the SQL statement } - if (!log_delayed) + if (!transactional_tables) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } - /* Commit or rollback the current SQL statement */ + /* Commit or rollback the current SQL statement */ if (transactional_tables) if (ha_autocommit_or_rollback(thd,local_error > 0)) local_error=1; - if (local_error) - ::send_error(thd); - else + if (!local_error) + { + thd->row_count_func= deleted; ::send_ok(thd, deleted); + } return 0; } @@ -634,25 +828,25 @@ bool multi_delete::send_eof() - If we want to have a name lock on the table on exit without errors. */ -int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) +bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) { HA_CREATE_INFO create_info; char path[FN_REFLEN]; TABLE **table_ptr; - int error; + bool error; DBUG_ENTER("mysql_truncate"); bzero((char*) &create_info,sizeof(create_info)); /* If it is a temporary table, close and regenerate it */ if (!dont_send_ok && (table_ptr=find_temporary_table(thd,table_list->db, - table_list->real_name))) + table_list->table_name))) { TABLE *table= *table_ptr; table->file->info(HA_STATUS_AUTO | HA_STATUS_NO_LOCK); - db_type table_type=table->db_type; - if (!ha_supports_generate(table_type)) + db_type table_type= table->s->db_type; + if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE)) goto trunc_by_del; - strmov(path,table->path); + strmov(path, table->s->path); *table_ptr= table->next; // Unlink table from list close_temporary(table,0); if (thd->slave_thread) @@ -661,49 +855,49 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) ha_create_table(path, &create_info,1); // We don't need to call invalidate() because this table is not in cache if ((error= (int) !(open_temporary_table(thd, path, table_list->db, - table_list->real_name, 1)))) + table_list->table_name, 1)))) (void) rm_temporary_table(table_type, path); /* If we return here we will not have logged the truncation to the bin log and we will not send_ok() to the client. */ - goto end; + goto end; } (void) sprintf(path,"%s/%s/%s%s",mysql_data_home,table_list->db, - table_list->real_name,reg_ext); + table_list->table_name,reg_ext); fn_format(path, path, "", "", MY_UNPACK_FILENAME); if (!dont_send_ok) { db_type table_type; - if ((table_type=get_table_type(path)) == DB_TYPE_UNKNOWN) + mysql_frm_type(thd, path, &table_type); + if (table_type == DB_TYPE_UNKNOWN) { - my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, - table_list->real_name); - DBUG_RETURN(-1); + my_error(ER_NO_SUCH_TABLE, MYF(0), + table_list->db, table_list->table_name); + DBUG_RETURN(TRUE); } - if (!ha_supports_generate(table_type)) + if (!ha_check_storage_engine_flag(table_type, HTON_CAN_RECREATE)) goto trunc_by_del; if (lock_and_wait_for_table_name(thd, table_list)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } *fn_ext(path)=0; // Remove the .frm extension - error= ha_create_table(path,&create_info,1) ? -1 : 0; - query_cache_invalidate3(thd, table_list, 0); + error= ha_create_table(path,&create_info,1); + query_cache_invalidate3(thd, table_list, 0); end: if (!dont_send_ok) { if (!error) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - thd->tmp_table, FALSE); + 0, FALSE); mysql_bin_log.write(&qinfo); } send_ok(thd); // This should return record count @@ -718,16 +912,17 @@ end: unlock_table_name(thd, table_list); VOID(pthread_mutex_unlock(&LOCK_open)); } - DBUG_RETURN(error ? -1 : 0); + DBUG_RETURN(error); trunc_by_del: /* Probably InnoDB table */ - ulong save_options= thd->options; + ulonglong save_options= thd->options; table_list->lock_type= TL_WRITE; thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT); ha_enable_transaction(thd, FALSE); + mysql_init_select(thd->lex); error= mysql_delete(thd, table_list, (COND*) 0, (SQL_LIST*) 0, - HA_POS_ERROR, 0); + HA_POS_ERROR, LL(0), TRUE); ha_enable_transaction(thd, TRUE); thd->options= save_options; DBUG_RETURN(error); diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index e9f9b432c21..cd46f3bcc0e 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -24,42 +23,38 @@ #include "mysql_priv.h" #include "sql_select.h" -static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *s, - TABLE_LIST *t); + /* - Resolve derived tables in all queries + Call given derived table processor (preparing or filling tables) SYNOPSIS mysql_handle_derived() lex LEX for this thread + processor procedure of derived table processing RETURN - 0 ok - -1 Error - 1 Error and error message given + FALSE OK + TRUE Error */ -int -mysql_handle_derived(LEX *lex) +bool +mysql_handle_derived(LEX *lex, bool (*processor)(THD*, LEX*, TABLE_LIST*)) { + bool res= FALSE; if (lex->derived_tables) { + lex->thd->derived_tables_processing= TRUE; for (SELECT_LEX *sl= lex->all_selects_list; sl; sl= sl->next_select_in_list()) { for (TABLE_LIST *cursor= sl->get_table_list(); cursor; - cursor= cursor->next) + cursor= cursor->next_local) { - int res; - if (cursor->derived && (res=mysql_derived(lex->thd, lex, - cursor->derived, - cursor))) - { - return res; - } + if ((res= (*processor)(lex->thd, lex, cursor))) + goto out; } if (lex->describe) { @@ -72,25 +67,23 @@ mysql_handle_derived(LEX *lex) } } } - return 0; +out: + lex->thd->derived_tables_processing= FALSE; + return res; } /* - Resolve derived tables in all queries + Create temporary table structure (but do not fill it) SYNOPSIS - mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, TABLE_LIST *t) + mysql_derived_prepare() thd Thread handle lex LEX for this thread - unit node that contains all SELECT's for derived tables - t TABLE_LIST for the upper SELECT + orig_table_list TABLE_LIST for the upper SELECT IMPLEMENTATION - Derived table is resolved with temporary table. It is created based on the - queries defined. After temporary table is created, if this is not EXPLAIN, - then the entire unit / node is deleted. unit is deleted if UNION is used - for derived table and node is deleted is it is a simple SELECT. + Derived table is resolved with temporary table. After table creation, the above TABLE_LIST is updated with a new table. @@ -101,81 +94,163 @@ mysql_handle_derived(LEX *lex) close_thread_tables() RETURN - 0 ok - 1 Error - -1 Error and error message given -*/ - + FALSE OK + TRUE Error +*/ -static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, - TABLE_LIST *org_table_list) +bool mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) { - SELECT_LEX *first_select= unit->first_select(); - TABLE *table; - int res; - select_union *derived_result; - bool is_union= first_select->next_select() && - first_select->next_select()->linkage == UNION_TYPE; - SELECT_LEX *save_current_select= lex->current_select; - DBUG_ENTER("mysql_derived"); - - if (!(derived_result= new select_union(0))) - DBUG_RETURN(1); // out of memory - - // st_select_lex_unit::prepare correctly work for single select - if ((res= unit->prepare(thd, derived_result, 0, org_table_list->alias))) - goto exit; - - - derived_result->tmp_table_param.init(); - derived_result->tmp_table_param.field_count= unit->types.elements; - /* - Temp table is created so that it hounours if UNION without ALL is to be - processed - - As 'distinct' parameter we always pass FALSE (0), because underlying - query will control distinct condition by itself. Correct test of - distinct underlying query will be is_union && - !unit->union_distinct->next_select() (i.e. it is union and last distinct - SELECT is last SELECT of UNION). - */ - if (!(table= create_tmp_table(thd, &derived_result->tmp_table_param, - unit->types, (ORDER*) 0, - FALSE, 1, - (first_select->options | thd->options | - TMP_TABLE_ALL_COLUMNS), - HA_POS_ERROR, - org_table_list->alias))) + SELECT_LEX_UNIT *unit= orig_table_list->derived; + ulonglong create_options; + DBUG_ENTER("mysql_derived_prepare"); + bool res= FALSE; + if (unit) { - res= -1; - goto exit; + SELECT_LEX *first_select= unit->first_select(); + TABLE *table= 0; + select_union *derived_result; + + /* prevent name resolving out of derived table */ + for (SELECT_LEX *sl= first_select; sl; sl= sl->next_select()) + sl->context.outer_context= 0; + + if (!(derived_result= new select_union)) + DBUG_RETURN(TRUE); // out of memory + + // st_select_lex_unit::prepare correctly work for single select + if ((res= unit->prepare(thd, derived_result, 0))) + goto exit; + + if ((res= check_duplicate_names(unit->types, 0))) + goto exit; + + create_options= (first_select->options | thd->options | + TMP_TABLE_ALL_COLUMNS); + /* + Temp table is created so that it hounours if UNION without ALL is to be + processed + + As 'distinct' parameter we always pass FALSE (0), because underlying + query will control distinct condition by itself. Correct test of + distinct underlying query will be is_union && + !unit->union_distinct->next_select() (i.e. it is union and last distinct + SELECT is last SELECT of UNION). + */ + if ((res= derived_result->create_result_table(thd, &unit->types, FALSE, + create_options, + orig_table_list->alias))) + goto exit; + + table= derived_result->table; + +exit: + /* Hide "Unknown column" or "Unknown function" error */ + if (orig_table_list->view) + { + if (thd->net.last_errno == ER_BAD_FIELD_ERROR || + thd->net.last_errno == ER_SP_DOES_NOT_EXIST) + { + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), orig_table_list->db, + orig_table_list->table_name); + } + } + + /* + if it is preparation PS only or commands that need only VIEW structure + then we do not need real data and we can skip execution (and parameters + is not defined, too) + */ + if (res) + { + if (table) + free_tmp_table(thd, table); + delete derived_result; + } + else + { + if (!thd->fill_derived_tables()) + { + delete derived_result; + derived_result= NULL; + } + orig_table_list->derived_result= derived_result; + orig_table_list->table= table; + orig_table_list->table_name= (char*) table->s->table_name; + orig_table_list->table_name_length= strlen((char*)table->s->table_name); + table->derived_select_number= first_select->select_number; + table->s->tmp_table= TMP_TABLE; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (orig_table_list->referencing_view) + table->grant= orig_table_list->grant; + else + table->grant.privilege= SELECT_ACL; +#endif + orig_table_list->db= (char *)""; + orig_table_list->db_length= 0; + // Force read of table stats in the optimizer + table->file->info(HA_STATUS_VARIABLE); + /* Add new temporary table to list of open derived tables */ + table->next= thd->derived_tables; + thd->derived_tables= table; + } } - derived_result->set_table(table); + else if (orig_table_list->merge_underlying_list) + orig_table_list->set_underlying_merge(); + DBUG_RETURN(res); +} + - /* - if it is preparation PS only then we do not need real data and we - can skip execution (and parameters is not defined, too) - */ - if (! thd->current_arena->is_stmt_prepare()) +/* + fill derived table + + SYNOPSIS + mysql_derived_filling() + thd Thread handle + lex LEX for this thread + unit node that contains all SELECT's for derived tables + orig_table_list TABLE_LIST for the upper SELECT + + IMPLEMENTATION + Derived table is resolved with temporary table. It is created based on the + queries defined. After temporary table is filled, if this is not EXPLAIN, + then the entire unit / node is deleted. unit is deleted if UNION is used + for derived table and node is deleted is it is a simple SELECT. + If you use this function, make sure it's not called at prepare. + Due to evaluation of LIMIT clause it can not be used at prepared stage. + + RETURN + FALSE OK + TRUE Error +*/ + +bool mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) +{ + TABLE *table= orig_table_list->table; + SELECT_LEX_UNIT *unit= orig_table_list->derived; + bool res= FALSE; + + /*check that table creation pass without problem and it is derived table */ + if (table && unit) { + SELECT_LEX *first_select= unit->first_select(); + select_union *derived_result= orig_table_list->derived_result; + SELECT_LEX *save_current_select= lex->current_select; + bool is_union= first_select->next_select() && + first_select->next_select()->linkage == UNION_TYPE; if (is_union) { // execute union without clean up - if (!(res= unit->prepare(thd, derived_result, SELECT_NO_UNLOCK, ""))) - res= unit->exec(); + res= unit->exec(); } else { - unit->offset_limit_cnt= first_select->offset_limit; - unit->select_limit_cnt= first_select->select_limit+ - first_select->offset_limit; - if (unit->select_limit_cnt < first_select->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; + unit->set_limit(first_select); if (unit->select_limit_cnt == HA_POS_ERROR) first_select->options&= ~OPTION_FOUND_ROWS; lex->current_select= first_select; - res= mysql_select(thd, &first_select->ref_pointer_array, + res= mysql_select(thd, &first_select->ref_pointer_array, (TABLE_LIST*) first_select->table_list.first, first_select->with_wild, first_select->item_list, first_select->where, @@ -188,54 +263,22 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, SELECT_NO_UNLOCK), derived_result, unit, first_select); } - } - if (!res) - { - /* - Here we entirely fix both TABLE_LIST and list of SELECT's as - there were no derived tables - */ - if (derived_result->flush()) - res= 1; - else + if (!res) { - org_table_list->real_name= table->real_name; - org_table_list->table= table; - if (org_table_list->table_list) - { - org_table_list->table_list->real_name= table->real_name; - org_table_list->table_list->table= table; - } - table->derived_select_number= first_select->select_number; - table->tmp_table= TMP_TABLE; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.privilege= SELECT_ACL; -#endif - org_table_list->db= (char *)""; - // Force read of table stats in the optimizer - table->file->info(HA_STATUS_VARIABLE); - } + /* + Here we entirely fix both TABLE_LIST and list of SELECT's as + there were no derived tables + */ + if (derived_result->flush()) + res= TRUE; - if (!lex->describe) - unit->cleanup(); - if (res) - free_tmp_table(thd, table); - else - { - /* Add new temporary table to list of open derived tables */ - table->next= thd->derived_tables; - thd->derived_tables= table; + if (!lex->describe) + unit->cleanup(); } + else + unit->cleanup(); + lex->current_select= save_current_select; } - else - { - free_tmp_table(thd, table); - unit->cleanup(); - } - -exit: - delete derived_result; - lex->current_select= save_current_select; - DBUG_RETURN(res); + return res; } diff --git a/sql/sql_do.cc b/sql/sql_do.cc index af72632199f..2330339db8e 100644 --- a/sql/sql_do.cc +++ b/sql/sql_do.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -19,17 +18,17 @@ #include "mysql_priv.h" -int mysql_do(THD *thd, List<Item> &values) +bool mysql_do(THD *thd, List<Item> &values) { List_iterator<Item> li(values); Item *value; DBUG_ENTER("mysql_do"); - if (setup_fields(thd, 0, 0, values, 0, 0, 0)) - DBUG_RETURN(-1); + if (setup_fields(thd, 0, values, 0, 0, 0)) + DBUG_RETURN(TRUE); while ((value = li++)) value->val_int(); free_underlaid_joins(thd, &thd->lex->select_lex); thd->clear_error(); // DO always is OK send_ok(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_error.cc b/sql/sql_error.cc index b24d15b6e3b..61a7581908c 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -43,6 +42,7 @@ This file contains the implementation of error and warnings related ***********************************************************************/ #include "mysql_priv.h" +#include "sp_rcontext.h" /* Store a new message in an error object @@ -63,6 +63,7 @@ void MYSQL_ERROR::set_msg(THD *thd, const char *msg_arg) SYNOPSIS mysql_reset_errors() thd Thread handle + force Reset warnings even if it has been done before IMPLEMENTATION Don't reset warnings if this has already been called for this query. @@ -70,14 +71,16 @@ void MYSQL_ERROR::set_msg(THD *thd, const char *msg_arg) in which case push_warnings() has already called this function. */ -void mysql_reset_errors(THD *thd) +void mysql_reset_errors(THD *thd, bool force) { DBUG_ENTER("mysql_reset_errors"); - if (thd->query_id != thd->warn_id) + if (thd->query_id != thd->warn_id || force) { thd->warn_id= thd->query_id; free_root(&thd->warn_root,MYF(0)); bzero((char*) thd->warn_count, sizeof(thd->warn_count)); + if (force) + thd->total_warn_count= 0; thd->warn_list.empty(); thd->row_count= 1; // by default point to row 1 } @@ -104,14 +107,43 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, { MYSQL_ERROR *err= 0; DBUG_ENTER("push_warning"); + DBUG_PRINT("enter", ("code: %d, msg: %s", code, msg)); - if (level == MYSQL_ERROR::WARN_LEVEL_NOTE && !(thd->options & OPTION_SQL_NOTES)) - return(0); + if (level == MYSQL_ERROR::WARN_LEVEL_NOTE && + !(thd->options & OPTION_SQL_NOTES)) + DBUG_RETURN(0); + if (thd->query_id != thd->warn_id && !thd->spcont) + mysql_reset_errors(thd, 0); + thd->got_warning= 1; + + /* Abort if we are using strict mode and we are not using IGNORE */ + if ((int) level >= (int) MYSQL_ERROR::WARN_LEVEL_WARN && + thd->really_abort_on_warning()) + { + /* Avoid my_message() calling push_warning */ + bool no_warnings_for_error= thd->no_warnings_for_error; + sp_rcontext *spcont= thd->spcont; + + thd->no_warnings_for_error= 1; + thd->spcont= 0; + + thd->killed= THD::KILL_BAD_DATA; + my_message(code, msg, MYF(0)); + + thd->spcont= spcont; + thd->no_warnings_for_error= no_warnings_for_error; + /* Store error in error list (as my_message() didn't do it) */ + level= MYSQL_ERROR::WARN_LEVEL_ERROR; + } + + if (thd->spcont && + thd->spcont->handle_error(code, level, thd)) + { + DBUG_RETURN(NULL); + } query_cache_abort(&thd->net); - if (thd->query_id != thd->warn_id) - mysql_reset_errors(thd); if (thd->warn_list.elements < thd->variables.max_error_count) { @@ -121,8 +153,7 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, */ MEM_ROOT *old_root= thd->mem_root; thd->mem_root= &thd->warn_root; - err= new MYSQL_ERROR(thd, code, level, msg); - if (err) + if ((err= new MYSQL_ERROR(thd, code, level, msg))) thd->warn_list.push_back(err); thd->mem_root= old_root; } @@ -170,14 +201,14 @@ void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, Takes into account the current LIMIT RETURN VALUES - 0 ok - 1 Error sending data to client + FALSE ok + TRUE Error sending data to client */ static const char *warning_level_names[]= {"Note", "Warning", "Error", "?"}; static int warning_level_length[]= { 4, 7, 5, 1 }; -my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) +bool mysqld_show_warnings(THD *thd, ulong levels_to_show) { List<Item> field_list; DBUG_ENTER("mysqld_show_warnings"); @@ -186,26 +217,27 @@ my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) field_list.push_back(new Item_return_int("Code",4, MYSQL_TYPE_LONG)); field_list.push_back(new Item_empty_string("Message",MYSQL_ERRMSG_SIZE)); - if (thd->protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (thd->protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); MYSQL_ERROR *err; SELECT_LEX *sel= &thd->lex->select_lex; - ha_rows offset= sel->offset_limit, limit= sel->select_limit; + SELECT_LEX_UNIT *unit= &thd->lex->unit; + ha_rows idx= 0; Protocol *protocol=thd->protocol; - + + unit->set_limit(sel); + List_iterator_fast<MYSQL_ERROR> it(thd->warn_list); while ((err= it++)) { /* Skip levels that the user is not interested in */ if (!(levels_to_show & ((ulong) 1 << err->level))) continue; - if (offset) - { - offset--; + if (++idx <= unit->offset_limit_cnt) continue; - } - if (limit-- == 0) + if (idx > unit->select_limit_cnt) break; protocol->prepare_for_resend(); protocol->store(warning_level_names[err->level], @@ -213,8 +245,8 @@ my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) protocol->store((uint32) err->code); protocol->store(err->msg, strlen(err->msg), system_charset_info); if (protocol->write()) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } - send_eof(thd); - DBUG_RETURN(0); + send_eof(thd); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_error.h b/sql/sql_error.h new file mode 100644 index 00000000000..28d946f14f8 --- /dev/null +++ b/sql/sql_error.h @@ -0,0 +1,41 @@ +/* Copyright (C) 2000-2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +class MYSQL_ERROR: public Sql_alloc +{ +public: + enum enum_warning_level + { WARN_LEVEL_NOTE, WARN_LEVEL_WARN, WARN_LEVEL_ERROR, WARN_LEVEL_END}; + + uint code; + enum_warning_level level; + char *msg; + + MYSQL_ERROR(THD *thd, uint code_arg, enum_warning_level level_arg, + const char *msg_arg) + :code(code_arg), level(level_arg) + { + if (msg_arg) + set_msg(thd, msg_arg); + } + void set_msg(THD *thd, const char *msg_arg); +}; + +MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, + uint code, const char *msg); +void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, + uint code, const char *format, ...); +void mysql_reset_errors(THD *thd, bool force); +bool mysqld_show_warnings(THD *thd, ulong levels_to_show); diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 1c5381a9fa0..d1a5ab7dfa8 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -1,8 +1,7 @@ /* Copyright (C) 2000-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -64,7 +63,7 @@ #define HANDLER_TABLES_HASH_SIZE 120 static enum enum_ha_read_modes rkey_to_rnext[]= - { RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; +{ RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; #define HANDLER_TABLES_HACK(thd) { \ TABLE *tmp=thd->open_tables; \ @@ -140,19 +139,19 @@ static void mysql_ha_hash_free(TABLE_LIST *tables) error messages. RETURN - 0 ok - != 0 error + FALSE OK + TRUE Error */ -int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) +bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) { TABLE_LIST *hash_tables; char *db, *name, *alias; uint dblen, namelen, aliaslen, counter; - int err; + int error; DBUG_ENTER("mysql_ha_open"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' reopen: %d", - tables->db, tables->real_name, tables->alias, + tables->db, tables->table_name, tables->alias, (int) reopen)); if (! hash_inited(&thd->handler_tables_hash)) @@ -173,8 +172,7 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) { DBUG_PRINT("info",("duplicate '%s'", tables->alias)); if (! reopen) - my_printf_error(ER_NONUNIQ_TABLE, ER(ER_NONUNIQ_TABLE), - MYF(0), tables->alias); + my_error(ER_NONUNIQ_TABLE, MYF(0), tables->alias); goto err; } } @@ -185,16 +183,20 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) */ DBUG_ASSERT(! tables->table); HANDLER_TABLES_HACK(thd); - err=open_tables(thd, tables, &counter); + + /* for now HANDLER can be used only for real TABLES */ + tables->required_type= FRMTYPE_TABLE; + error= open_tables(thd, &tables, &counter, 0); + HANDLER_TABLES_HACK(thd); - if (err) + if (error) goto err; /* There can be only one table in '*tables'. */ if (! (tables->table->file->table_flags() & HA_CAN_SQL_HANDLER)) { if (! reopen) - my_printf_error(ER_ILLEGAL_HA,ER(ER_ILLEGAL_HA),MYF(0), tables->alias); + my_error(ER_ILLEGAL_HA, MYF(0), tables->alias); mysql_ha_close(thd, tables); goto err; } @@ -203,7 +205,7 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) { /* copy the TABLE_LIST struct */ dblen= strlen(tables->db) + 1; - namelen= strlen(tables->real_name) + 1; + namelen= strlen(tables->table_name) + 1; aliaslen= strlen(tables->alias) + 1; if (!(my_multi_malloc(MYF(MY_WME), &hash_tables, sizeof(*hash_tables), @@ -215,15 +217,16 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) /* structure copy */ *hash_tables= *tables; hash_tables->db= db; - hash_tables->real_name= name; + hash_tables->table_name= name; hash_tables->alias= alias; memcpy(hash_tables->db, tables->db, dblen); - memcpy(hash_tables->real_name, tables->real_name, namelen); + memcpy(hash_tables->table_name, tables->table_name, namelen); memcpy(hash_tables->alias, tables->alias, aliaslen); /* add to hash */ if (my_hash_insert(&thd->handler_tables_hash, (byte*) hash_tables)) { + my_free((char*) hash_tables, MYF(0)); mysql_ha_close(thd, tables); goto err; } @@ -232,11 +235,11 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) if (! reopen) send_ok(thd); DBUG_PRINT("exit",("OK")); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: DBUG_PRINT("exit",("ERROR")); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } @@ -250,20 +253,21 @@ err: DESCRIPTION Though this function takes a list of tables, only the first list entry - will be closed. Broadcasts a COND_refresh condition. + will be closed. + Broadcasts refresh if it closed the table. RETURN - 0 ok - != 0 error + FALSE ok + TRUE error */ -int mysql_ha_close(THD *thd, TABLE_LIST *tables) +bool mysql_ha_close(THD *thd, TABLE_LIST *tables) { TABLE_LIST *hash_tables; TABLE **table_ptr; DBUG_ENTER("mysql_ha_close"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", - tables->db, tables->real_name, tables->alias)); + tables->db, tables->table_name, tables->alias)); if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash, (byte*) tables->alias, @@ -277,54 +281,32 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) */ for (table_ptr= &(thd->handler_tables); *table_ptr && (*table_ptr != hash_tables->table); - table_ptr= &(*table_ptr)->next); + table_ptr= &(*table_ptr)->next) + ; -#if MYSQL_VERSION_ID < 40100 - if (*tables->db && strcmp(hash_tables->db, tables->db)) - { - DBUG_PRINT("info",("wrong db")); - hash_tables= NULL; - } - else -#endif + if (*table_ptr) { - if (*table_ptr) + (*table_ptr)->file->ha_index_or_rnd_end(); + VOID(pthread_mutex_lock(&LOCK_open)); + if (close_thread_table(thd, table_ptr)) { - (*table_ptr)->file->ha_index_or_rnd_end(); - VOID(pthread_mutex_lock(&LOCK_open)); - if (close_thread_table(thd, table_ptr)) - { - /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); - } - VOID(pthread_mutex_unlock(&LOCK_open)); + /* Tell threads waiting for refresh that something has happened */ + broadcast_refresh(); } - - hash_delete(&thd->handler_tables_hash, (byte*) hash_tables); + VOID(pthread_mutex_unlock(&LOCK_open)); } + hash_delete(&thd->handler_tables_hash, (byte*) hash_tables); } - - if (! hash_tables) + else { -#if MYSQL_VERSION_ID < 40100 - char buff[MAX_DBKEY_LENGTH]; - if (*tables->db) - strxnmov(buff, sizeof(buff), tables->db, ".", tables->real_name, NullS); - else - strncpy(buff, tables->alias, sizeof(buff)); - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - buff, "HANDLER"); -#else - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - tables->alias, "HANDLER"); -#endif + my_error(ER_UNKNOWN_TABLE, MYF(0), tables->alias, "HANDLER"); DBUG_PRINT("exit",("ERROR")); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_ok(thd); DBUG_PRINT("exit", ("OK")); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -340,39 +322,42 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) key_expr ha_rkey_mode cond - select_limit - offset_limit + select_limit_cnt + offset_limit_cnt RETURN - 0 ok - != 0 error + FALSE ok + TRUE error */ -int mysql_ha_read(THD *thd, TABLE_LIST *tables, - enum enum_ha_read_modes mode, char *keyname, List<Item> *key_expr, - enum ha_rkey_function ha_rkey_mode, Item *cond, - ha_rows select_limit,ha_rows offset_limit) +bool mysql_ha_read(THD *thd, TABLE_LIST *tables, + enum enum_ha_read_modes mode, char *keyname, + List<Item> *key_expr, + enum ha_rkey_function ha_rkey_mode, Item *cond, + ha_rows select_limit_cnt, ha_rows offset_limit_cnt) { TABLE_LIST *hash_tables; - TABLE **table_ptr; TABLE *table; MYSQL_LOCK *lock; List<Item> list; Protocol *protocol= thd->protocol; char buff[MAX_FIELD_WIDTH]; String buffer(buff, sizeof(buff), system_charset_info); - int err, keyno= -1; + int error, keyno= -1; uint num_rows; byte *key; uint key_len; + bool not_used; DBUG_ENTER("mysql_ha_read"); DBUG_PRINT("enter",("'%s'.'%s' as '%s'", - tables->db, tables->real_name, tables->alias)); + tables->db, tables->table_name, tables->alias)); LINT_INIT(key); LINT_INIT(key_len); - list.push_front(new Item_field(NULL,NULL,"*")); + thd->lex->select_lex.context.resolve_in_table_list_only(tables); + list.push_front(new Item_field(&thd->lex->select_lex.context, + NULL, NULL, "*")); List_iterator<Item> it(list); it++; @@ -382,7 +367,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, { table= hash_tables->table; DBUG_PRINT("info-in-hash",("'%s'.'%s' as '%s' tab %p", - hash_tables->db, hash_tables->real_name, + hash_tables->db, hash_tables->table_name, hash_tables->alias, table)); if (!table) { @@ -397,7 +382,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, table= hash_tables->table; DBUG_PRINT("info",("re-opened '%s'.'%s' as '%s' tab %p", - hash_tables->db, hash_tables->real_name, + hash_tables->db, hash_tables->table_name, hash_tables->alias, table)); } @@ -417,21 +402,19 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, #if MYSQL_VERSION_ID < 40100 char buff[MAX_DBKEY_LENGTH]; if (*tables->db) - strxnmov(buff, sizeof(buff), tables->db, ".", tables->real_name, NullS); + strxnmov(buff, sizeof(buff), tables->db, ".", tables->table_name, NullS); else strncpy(buff, tables->alias, sizeof(buff)); - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - buff, "HANDLER"); + my_error(ER_UNKNOWN_TABLE, MYF(0), buff, "HANDLER"); #else - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - tables->alias, "HANDLER"); + my_error(ER_UNKNOWN_TABLE, MYF(0), tables->alias, "HANDLER"); #endif goto err0; } tables->table=table; HANDLER_TABLES_HACK(thd); - lock= mysql_lock_tables(thd, &tables->table, 1, 0); + lock= mysql_lock_tables(thd, &tables->table, 1, 0, ¬_used); HANDLER_TABLES_HACK(thd); if (!lock) @@ -442,25 +425,24 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, if (table->query_id != thd->query_id) cond->cleanup(); // File was reopened if ((!cond->fixed && - cond->fix_fields(thd, tables, &cond)) || cond->check_cols(1)) + cond->fix_fields(thd, &cond)) || cond->check_cols(1)) goto err0; } if (keyname) { - if ((keyno=find_type(keyname, &table->keynames, 1+2)-1)<0) + if ((keyno=find_type(keyname, &table->s->keynames, 1+2)-1)<0) { - my_printf_error(ER_KEY_DOES_NOT_EXITS,ER(ER_KEY_DOES_NOT_EXITS),MYF(0), - keyname,tables->alias); + my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), keyname, tables->alias); goto err0; } } - if (insert_fields(thd,tables,tables->db,tables->alias,&it)) + if (insert_fields(thd, &thd->lex->select_lex.context, + tables->db, tables->alias, &it, 0)) goto err0; - select_limit+=offset_limit; - protocol->send_fields(&list,1); + protocol->send_fields(&list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); /* In ::external_lock InnoDB resets the fields which tell it that @@ -470,13 +452,13 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, table->file->init_table_handle_for_HANDLER(); - for (num_rows=0; num_rows < select_limit; ) + for (num_rows=0; num_rows < select_limit_cnt; ) { switch (mode) { case RNEXT: if (table->file->inited != handler::NONE) { - err=keyname ? + error=keyname ? table->file->index_next(table->record[0]) : table->file->rnd_next(table->record[0]); break; @@ -487,13 +469,13 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, { table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno); - err=table->file->index_first(table->record[0]); + error= table->file->index_first(table->record[0]); } else { table->file->ha_index_or_rnd_end(); - if (!(err=table->file->ha_rnd_init(1))) - err=table->file->rnd_next(table->record[0]); + if (!(error= table->file->ha_rnd_init(1))) + error= table->file->rnd_next(table->record[0]); } mode=RNEXT; break; @@ -501,7 +483,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, DBUG_ASSERT(keyname != 0); if (table->file->inited != handler::NONE) { - err=table->file->index_prev(table->record[0]); + error=table->file->index_prev(table->record[0]); break; } /* else fall through */ @@ -509,13 +491,13 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, DBUG_ASSERT(keyname != 0); table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno); - err=table->file->index_last(table->record[0]); + error= table->file->index_last(table->record[0]); mode=RPREV; break; case RNEXT_SAME: /* Continue scan on "(keypart1,keypart2,...)=(c1, c2, ...) */ DBUG_ASSERT(keyname != 0); - err= table->file->index_next_same(table->record[0], key, key_len); + error= table->file->index_next_same(table->record[0], key, key_len); break; case RKEY: { @@ -524,8 +506,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, KEY_PART_INFO *key_part=keyinfo->key_part; if (key_expr->elements > keyinfo->key_parts) { - my_printf_error(ER_TOO_MANY_KEY_PARTS,ER(ER_TOO_MANY_KEY_PARTS), - MYF(0),keyinfo->key_parts); + my_error(ER_TOO_MANY_KEY_PARTS, MYF(0), keyinfo->key_parts); goto err; } List_iterator<Item> it_ke(*key_expr); @@ -533,8 +514,8 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, for (key_len=0 ; (item=it_ke++) ; key_part++) { // 'item' can be changed by fix_fields() call - if ((!item->fixed && - item->fix_fields(thd, tables, it_ke.ref())) || + if ((!item->fixed && + item->fix_fields(thd, it_ke.ref())) || (item= *it_ke.ref())->check_cols(1)) goto err; if (item->used_tables() & ~RAND_TABLE_BIT) @@ -546,39 +527,36 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, key_len+=key_part->store_length; } if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len)))) - { - send_error(thd,ER_OUTOFMEMORY); goto err; - } - key_copy(key, table, keyno, key_len); table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno); - err=table->file->index_read(table->record[0], + key_copy(key, table->record[0], table->key_info + keyno, key_len); + error= table->file->index_read(table->record[0], key,key_len,ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; } default: - send_error(thd,ER_ILLEGAL_HA); + my_message(ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), MYF(0)); goto err; } - if (err == HA_ERR_RECORD_DELETED) - continue; - if (err) + if (error) { - if (err != HA_ERR_KEY_NOT_FOUND && err != HA_ERR_END_OF_FILE) + if (error == HA_ERR_RECORD_DELETED) + continue; + if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { sql_print_error("mysql_ha_read: Got error %d when reading table '%s'", - err, tables->real_name); - table->file->print_error(err,MYF(0)); + error, tables->table_name); + table->file->print_error(error,MYF(0)); goto err; } goto ok; } if (cond && !cond->val_int()) continue; - if (num_rows >= offset_limit) + if (num_rows >= offset_limit_cnt) { Item *item; protocol->prepare_for_resend(); @@ -588,7 +566,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, if (item->send(thd->protocol, &buffer)) { protocol->free(); // Free used - my_error(ER_OUT_OF_RESOURCES,MYF(0)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); goto err; } } @@ -600,13 +578,13 @@ ok: mysql_unlock_tables(thd,lock); send_eof(thd); DBUG_PRINT("exit",("OK")); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: mysql_unlock_tables(thd,lock); err0: DBUG_PRINT("exit",("ERROR")); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } @@ -629,7 +607,7 @@ err0: tables are closed (if MYSQL_HA_FLUSH_ALL) is set. If 'tables' is NULL and MYSQL_HA_FLUSH_ALL is not set, all HANDLER tables marked for flush are closed. - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh for every table closed. NOTE Since mysql_ha_flush() is called when the base table has to be closed, @@ -651,25 +629,25 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, if (tables) { /* Close all tables in the list. */ - for (tmp_tables= tables ; tmp_tables; tmp_tables= tmp_tables->next) + for (tmp_tables= tables ; tmp_tables; tmp_tables= tmp_tables->next_local) { DBUG_PRINT("info-in-tables-list",("'%s'.'%s' as '%s'", - tmp_tables->db, tmp_tables->real_name, + tmp_tables->db, tmp_tables->table_name, tmp_tables->alias)); /* Close all currently open handler tables with the same base table. */ table_ptr= &(thd->handler_tables); while (*table_ptr) { - if ((! *tmp_tables->db || - ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->table_cache_key, + if ((!*tmp_tables->db || + !my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->db, tmp_tables->db)) && - ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->real_name, - tmp_tables->real_name)) + ! my_strcasecmp(&my_charset_latin1, (*table_ptr)->s->table_name, + tmp_tables->table_name)) { DBUG_PRINT("info",("*table_ptr '%s'.'%s' as '%s'", - (*table_ptr)->table_cache_key, - (*table_ptr)->real_name, - (*table_ptr)->table_name)); + (*table_ptr)->s->db, + (*table_ptr)->s->table_name, + (*table_ptr)->alias)); /* The first time it is required, lock for close_thread_table(). */ if (! did_lock && ! is_locked) { @@ -692,7 +670,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, while (*table_ptr) { if ((mode_flags & MYSQL_HA_FLUSH_ALL) || - ((*table_ptr)->version != refresh_version)) + ((*table_ptr)->s->version != refresh_version)) { /* The first time it is required, lock for close_thread_table(). */ if (! did_lock && ! is_locked) @@ -725,7 +703,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, MYSQL_HA_REOPEN_ON_USAGE mark for reopen. DESCRIPTION - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh if it closed the table. The caller must lock LOCK_open. RETURN @@ -738,12 +716,12 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) TABLE *table= *table_ptr; DBUG_ENTER("mysql_ha_flush_table"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' flags: 0x%02x", - table->table_cache_key, table->real_name, - table->table_name, mode_flags)); + table->s->db, table->s->table_name, + table->alias, mode_flags)); if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash, - (byte*) table->table_name, - strlen(table->table_name) + 1))) + (byte*) table->alias, + strlen(table->alias) + 1))) { if (! (mode_flags & MYSQL_HA_REOPEN_ON_USAGE)) { @@ -759,11 +737,50 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) safe_mutex_assert_owner(&LOCK_open); (*table_ptr)->file->ha_index_or_rnd_end(); + safe_mutex_assert_owner(&LOCK_open); if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } DBUG_RETURN(0); } + + +/* + Mark tables for reopen. + + SYNOPSIS + mysql_ha_mark_tables_for_reopen() + thd Thread identifier. + table Table list to mark for reopen. + + DESCRIPTION + For each table found in the handler hash mark it as closed + (ready for reopen) and end all index/table scans. + + NOTE + The caller must lock LOCK_open. +*/ + +void mysql_ha_mark_tables_for_reopen(THD *thd, TABLE *table) +{ + DBUG_ENTER("mysql_ha_mark_tables_for_reopen"); + + safe_mutex_assert_owner(&LOCK_open); + for (; table; table= table->next) + { + TABLE_LIST *hash_tables; + if ((hash_tables= (TABLE_LIST*) hash_search(&thd->handler_tables_hash, + (byte*) table->alias, + strlen(table->alias) + 1))) + { + /* Mark table as ready for reopen. */ + hash_tables->table= NULL; + /* End open index/table scans. */ + table->file->ha_index_or_rnd_end(); + } + } + DBUG_VOID_RETURN; +} diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 0e0d32a922d..ba7f1a534ea 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -75,21 +74,24 @@ enum enum_used_fields RETURN VALUES 0 all ok - 1 one of the fileds didn't finded + 1 one of the fileds was not found */ static bool init_fields(THD *thd, TABLE_LIST *tables, struct st_find_field *find_fields, uint count) { + Name_resolution_context *context= &thd->lex->select_lex.context; DBUG_ENTER("init_fields"); + context->resolve_in_table_list_only(tables); for (; count-- ; find_fields++) { - TABLE_LIST *not_used; /* We have to use 'new' here as field will be re_linked on free */ - Item_field *field= new Item_field("mysql", find_fields->table_name, + Item_field *field= new Item_field(context, + "mysql", find_fields->table_name, find_fields->field_name); - if (!(find_fields->field= find_field_in_tables(thd, field, tables, - ¬_used, TRUE))) + if (!(find_fields->field= find_field_in_tables(thd, field, tables, NULL, + 0, REPORT_ALL_ERRORS, 1, + TRUE))) DBUG_RETURN(1); } DBUG_RETURN(0); @@ -273,11 +275,11 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, DBUG_ENTER("get_topics_for_keyword"); if ((iindex_topic= find_type((char*) primary_key_name, - &topics->keynames, 1+2)-1)<0 || + &topics->s->keynames, 1+2)-1)<0 || (iindex_relations= find_type((char*) primary_key_name, - &relations->keynames, 1+2)-1)<0) + &relations->s->keynames, 1+2)-1)<0) { - send_error(thd,ER_CORRUPT_HELP_DB); + my_message(ER_CORRUPT_HELP_DB, ER(ER_CORRUPT_HELP_DB), MYF(0)); DBUG_RETURN(-1); } rtopic_id= find_fields[help_relation_help_topic_id].field; @@ -286,9 +288,8 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, topics->file->ha_index_init(iindex_topic); relations->file->ha_index_init(iindex_relations); - rkey_id->store((longlong) key_id); - rkey_id->get_key_image(buff, rkey_id->pack_length(), rkey_id->charset(), - Field::itRAW); + rkey_id->store((longlong) key_id, TRUE); + rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW); int key_res= relations->file->index_read(relations->record[0], (byte *)buff, rkey_id->pack_length(), HA_READ_KEY_EXACT); @@ -300,9 +301,8 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, char topic_id_buff[8]; longlong topic_id= rtopic_id->val_int(); Field *field= find_fields[help_topic_help_topic_id].field; - field->store((longlong) topic_id); - field->get_key_image(topic_id_buff, field->pack_length(), field->charset(), - Field::itRAW); + field->store((longlong) topic_id, TRUE); + field->get_key_image(topic_id_buff, field->pack_length(), Field::itRAW); if (!topics->file->index_read(topics->record[0], (byte *)topic_id_buff, field->pack_length(), HA_READ_KEY_EXACT)) @@ -427,7 +427,8 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) field_list.push_back(new Item_empty_string("description",1000)); field_list.push_back(new Item_empty_string("example",1000)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); protocol->prepare_for_resend(); @@ -469,7 +470,8 @@ int send_header_2(Protocol *protocol, bool for_category) field_list.push_back(new Item_empty_string("source_category_name",64)); field_list.push_back(new Item_empty_string("name",64)); field_list.push_back(new Item_empty_string("is_it_category",1)); - DBUG_RETURN(protocol->send_fields(&field_list,1)); + DBUG_RETURN(protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)); } /* @@ -545,7 +547,6 @@ int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol, prepare_simple_select() thd Thread handler cond WHERE part of select - tables list of tables, used in WHERE table goal table error code of error (out) @@ -554,13 +555,18 @@ int send_variant_2_list(MEM_ROOT *mem_root, Protocol *protocol, # created SQL_SELECT */ -SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE_LIST *tables, +SQL_SELECT *prepare_simple_select(THD *thd, Item *cond, TABLE *table, int *error) { if (!cond->fixed) - cond->fix_fields(thd, tables, &cond); // can never fail - SQL_SELECT *res= make_select(table,0,0,cond,error); - if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR))) + cond->fix_fields(thd, &cond); // can never fail + + /* Assume that no indexes cover all required fields */ + table->used_keys.clear_all(); + + SQL_SELECT *res= make_select(table, 0, 0, cond, 0, error); + if (*error || (res && res->check_quick(thd, 0, HA_POS_ERROR)) || + (res && res->quick && res->quick->reset())) { delete res; res=0; @@ -592,10 +598,11 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen, { Item *cond= new Item_func_like(new Item_field(pfname), new Item_string(mask,mlen,pfname->charset()), - new Item_string("\\",1,&my_charset_latin1)); + new Item_string("\\",1,&my_charset_latin1), + FALSE); if (thd->is_fatal_error) return 0; // OOM - return prepare_simple_select(thd,cond,tables,table,error); + return prepare_simple_select(thd, cond, table, error); } @@ -607,66 +614,64 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen, thd Thread handler RETURN VALUES - 0 Success - 1 Error and send_error already commited - -1 error && send_error should be issued (normal case) + FALSE Success + TRUE Error and send_error already commited */ -int mysqld_help(THD *thd, const char *mask) +bool mysqld_help(THD *thd, const char *mask) { Protocol *protocol= thd->protocol; SQL_SELECT *select; st_find_field used_fields[array_elements(init_used_fields)]; + TABLE_LIST *leaves= 0; + TABLE_LIST tables[4]; + List<String> topics_list, categories_list, subcategories_list; + String name, description, example; + int count_topics, count_categories, error; + uint mlen= strlen(mask); + size_t i; + MEM_ROOT *mem_root= thd->mem_root; DBUG_ENTER("mysqld_help"); - TABLE_LIST tables[4]; bzero((gptr)tables,sizeof(tables)); - tables[0].alias= tables[0].real_name= (char*) "help_topic"; + tables[0].alias= tables[0].table_name= (char*) "help_topic"; tables[0].lock_type= TL_READ; - tables[0].next= &tables[1]; - tables[1].alias= tables[1].real_name= (char*) "help_category"; + tables[0].next_global= tables[0].next_local= + tables[0].next_name_resolution_table= &tables[1]; + tables[1].alias= tables[1].table_name= (char*) "help_category"; tables[1].lock_type= TL_READ; - tables[1].next= &tables[2]; - tables[2].alias= tables[2].real_name= (char*) "help_relation"; + tables[1].next_global= tables[1].next_local= + tables[1].next_name_resolution_table= &tables[2]; + tables[2].alias= tables[2].table_name= (char*) "help_relation"; tables[2].lock_type= TL_READ; - tables[2].next= &tables[3]; - tables[3].alias= tables[3].real_name= (char*) "help_keyword"; + tables[2].next_global= tables[2].next_local= + tables[2].next_name_resolution_table= &tables[3]; + tables[3].alias= tables[3].table_name= (char*) "help_keyword"; tables[3].lock_type= TL_READ; - tables[3].next= 0; tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql"; - List<String> topics_list, categories_list, subcategories_list; - String name, description, example; - int res, count_topics, count_categories, error; - uint mlen= strlen(mask); - MEM_ROOT *mem_root= thd->mem_root; - if (open_and_lock_tables(thd, tables)) - { - res= -1; - goto end; - } - /* Init tables and fields to be usable from items */ - setup_tables(tables); + goto error; + /* + Init tables and fields to be usable from items + tables do not contain VIEWs => we can pass 0 as conds + */ + thd->lex->select_lex.context.table_list= + thd->lex->select_lex.context.first_name_resolution_table= &tables[0]; + setup_tables(thd, &thd->lex->select_lex.context, + &thd->lex->select_lex.top_join_list, + tables, 0, &leaves, FALSE); memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields)); if (init_fields(thd, tables, used_fields, array_elements(used_fields))) - { - res= -1; - goto end; - } - size_t i; + goto error; for (i=0; i<sizeof(tables)/sizeof(TABLE_LIST); i++) tables[i].table->file->init_table_handle_for_HANDLER(); if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[0].table, used_fields[help_topic_name].field,&error))) - { - res= -1; - goto end; - } + goto error; - res= 1; count_topics= search_topics(thd,tables[0].table,used_fields, select,&topics_list, &name, &description, &example); @@ -678,10 +683,8 @@ int mysqld_help(THD *thd, const char *mask) if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[3].table, used_fields[help_keyword_name].field,&error))) - { - res= -1; - goto end; - } + goto error; + count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id); delete select; count_topics= (count_topics != 1) ? 0 : @@ -697,10 +700,7 @@ int mysqld_help(THD *thd, const char *mask) if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, used_fields[help_category_name].field,&error))) - { - res= -1; - goto end; - } + goto error; count_categories= search_categories(thd, tables[1].table, used_fields, select, @@ -709,13 +709,13 @@ int mysqld_help(THD *thd, const char *mask) if (!count_categories) { if (send_header_2(protocol,FALSE)) - goto end; + goto error; } else if (count_categories > 1) { if (send_header_2(protocol,FALSE) || send_variant_2_list(mem_root,protocol,&categories_list,"Y",0)) - goto end; + goto error; } else { @@ -726,22 +726,16 @@ int mysqld_help(THD *thd, const char *mask) Item *cond_cat_by_cat= new Item_func_equal(new Item_field(cat_cat_id), new Item_int((int32)category_id)); - if (!(select= prepare_simple_select(thd,cond_topic_by_cat, - tables,tables[0].table,&error))) - { - res= -1; - goto end; - } + if (!(select= prepare_simple_select(thd, cond_topic_by_cat, + tables[0].table, &error))) + goto error; get_all_items_for_category(thd,tables[0].table, used_fields[help_topic_name].field, select,&topics_list); delete select; - if (!(select= prepare_simple_select(thd,cond_cat_by_cat,tables, - tables[1].table,&error))) - { - res= -1; - goto end; - } + if (!(select= prepare_simple_select(thd, cond_cat_by_cat, + tables[1].table, &error))) + goto error; get_all_items_for_category(thd,tables[1].table, used_fields[help_category_name].field, select,&subcategories_list); @@ -750,39 +744,35 @@ int mysqld_help(THD *thd, const char *mask) if (send_header_2(protocol, TRUE) || send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) || send_variant_2_list(mem_root,protocol,&subcategories_list,"Y",cat)) - goto end; + goto error; } } else if (count_topics == 1) { if (send_answer_1(protocol,&name,&description,&example)) - goto end; + goto error; } else { /* First send header and functions */ if (send_header_2(protocol, FALSE) || send_variant_2_list(mem_root,protocol, &topics_list, "N", 0)) - goto end; + goto error; if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, used_fields[help_category_name].field,&error))) - { - res= -1; - goto end; - } + goto error; search_categories(thd, tables[1].table, used_fields, select,&categories_list, 0); delete select; /* Then send categories */ if (send_variant_2_list(mem_root,protocol, &categories_list, "Y", 0)) - goto end; + goto error; } - res= 0; - send_eof(thd); -end: - DBUG_RETURN(res); + DBUG_RETURN(FALSE); +error: + DBUG_RETURN(TRUE); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index f5bbba742a5..0fa027f89d6 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -17,17 +16,59 @@ /* Insert of records */ +/* + INSERT DELAYED + + Insert delayed is distinguished from a normal insert by lock_type == + TL_WRITE_DELAYED instead of TL_WRITE. It first tries to open a + "delayed" table (delayed_get_table()), but falls back to + open_and_lock_tables() on error and proceeds as normal insert then. + + Opening a "delayed" table means to find a delayed insert thread that + has the table open already. If this fails, a new thread is created and + waited for to open and lock the table. + + If accessing the thread succeeded, in + delayed_insert::get_local_table() the table of the thread is copied + for local use. A copy is required because the normal insert logic + works on a target table, but the other threads table object must not + be used. The insert logic uses the record buffer to create a record. + And the delayed insert thread uses the record buffer to pass the + record to the table handler. So there must be different objects. Also + the copied table is not included in the lock, so that the statement + can proceed even if the real table cannot be accessed at this moment. + + Copying a table object is not a trivial operation. Besides the TABLE + object there are the field pointer array, the field objects and the + record buffer. After copying the field objects, their pointers into + the record must be "moved" to point to the new record buffer. + + After this setup the normal insert logic is used. Only that for + delayed inserts write_delayed() is called instead of write_record(). + It inserts the rows into a queue and signals the delayed insert thread + instead of writing directly to the table. + + The delayed insert thread awakes from the signal. It locks the table, + inserts the rows from the queue, unlocks the table, and waits for the + next signal. It does normally live until a FLUSH TABLES or SHUTDOWN. + +*/ + #include "mysql_priv.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "sql_select.h" +#include "slave.h" -static int check_null_fields(THD *thd,TABLE *entry); #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore, - char *query, uint query_length, int log_on); + char *query, uint query_length, bool log_on); static void end_delayed_insert(THD *thd); -extern "C" pthread_handler_decl(handle_delayed_insert,arg); +pthread_handler_t handle_delayed_insert(void *arg); static void unlink_blobs(register TABLE *table); #endif +static bool check_view_insertability(THD *thd, TABLE_LIST *view); /* Define to force use of my_malloc() if the allocated memory block is big */ @@ -39,8 +80,64 @@ static void unlink_blobs(register TABLE *table); #define my_safe_afree(ptr, size, min_length) if (size > min_length) my_free(ptr,MYF(0)) #endif -#define DELAYED_LOG_UPDATE 1 -#define DELAYED_LOG_BIN 2 +/* + Check that insert/update fields are from the same single table of a view. + + SYNOPSIS + check_view_single_update() + fields The insert/update fields to be checked. + view The view for insert. + map [in/out] The insert table map. + + DESCRIPTION + This function is called in 2 cases: + 1. to check insert fields. In this case *map will be set to 0. + Insert fields are checked to be all from the same single underlying + table of the given view. Otherwise the error is thrown. Found table + map is returned in the map parameter. + 2. to check update fields of the ON DUPLICATE KEY UPDATE clause. + In this case *map contains table_map found on the previous call of + the function to check insert fields. Update fields are checked to be + from the same table as the insert fields. + + RETURN + 0 OK + 1 Error +*/ + +bool check_view_single_update(List<Item> &fields, TABLE_LIST *view, + table_map *map) +{ + /* it is join view => we need to find the table for update */ + List_iterator_fast<Item> it(fields); + Item *item; + TABLE_LIST *tbl= 0; // reset for call to check_single_table() + table_map tables= 0; + + while ((item= it++)) + tables|= item->used_tables(); + + /* Check found map against provided map */ + if (*map) + { + if (tables != *map) + goto error; + return FALSE; + } + + if (view->check_single_table(&tbl, tables, view) || tbl == 0) + goto error; + + view->table= tbl->table; + *map= tables; + + return FALSE; + +error: + my_error(ER_VIEW_MULTIUPDATE, MYF(0), + view->view_db.str, view->view_name.str); + return TRUE; +} /* @@ -52,6 +149,7 @@ static void unlink_blobs(register TABLE *table); table The table for insert. fields The insert fields. values The insert values. + check_unique If duplicate values should be rejected. NOTE Clears TIMESTAMP_AUTO_SET_ON_INSERT from table->timestamp_field_type @@ -63,50 +161,89 @@ static void unlink_blobs(register TABLE *table); -1 Error */ -static int check_insert_fields(THD *thd, TABLE *table, List<Item> &fields, - List<Item> &values) +static int check_insert_fields(THD *thd, TABLE_LIST *table_list, + List<Item> &fields, List<Item> &values, + bool check_unique, table_map *map) { + TABLE *table= table_list->table; + + if (!table_list->updatable) + { + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + return -1; + } + if (fields.elements == 0 && values.elements != 0) { - if (values.elements != table->fields) + if (!table) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0), 1L); + my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0), + table_list->view_db.str, table_list->view_name.str); return -1; } -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (grant_option && - check_grant_all_columns(thd,INSERT_ACL,table)) + if (values.elements != table->s->fields) + { + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1L); return -1; + } +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (grant_option) + { + Field_iterator_table field_it; + field_it.set_table(table); + if (check_grant_all_columns(thd, INSERT_ACL, &table->grant, + table->s->db, table->s->table_name, + &field_it)) + return -1; + } #endif clear_timestamp_auto_bits(table->timestamp_field_type, TIMESTAMP_AUTO_SET_ON_INSERT); } else { // Part field list + SELECT_LEX *select_lex= &thd->lex->select_lex; + Name_resolution_context *context= &select_lex->context; + Name_resolution_context_state ctx_state; + int res; + if (fields.elements != values.elements) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0), 1L); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1L); return -1; } - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= table->table_cache_key; - table_list.real_name= table_list.alias= table->table_name; - table_list.table=table; - table_list.grant=table->grant; thd->dupp_field=0; - if (setup_tables(&table_list) || - setup_fields(thd, 0, &table_list,fields,1,0,0)) + select_lex->no_wrap_view_item= TRUE; + + /* Save the state of the current name resolution context. */ + ctx_state.save_state(context, table_list); + + /* + Perform name resolution only in the first table - 'table_list', + which is the table that is inserted into. + */ + table_list->next_local= 0; + context->resolve_in_table_list_only(table_list); + res= setup_fields(thd, 0, fields, 1, 0, 0); + + /* Restore the current context. */ + ctx_state.restore_state(context, table_list); + thd->lex->select_lex.no_wrap_view_item= FALSE; + + if (res) return -1; - if (thd->dupp_field) + if (table_list->effective_algorithm == VIEW_ALGORITHM_MERGE) { - my_error(ER_FIELD_SPECIFIED_TWICE,MYF(0), thd->dupp_field->field_name); + if (check_view_single_update(fields, table_list, map)) + return -1; + table= table_list->table; + } + + if (check_unique && thd->dupp_field) + { + my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dupp_field->field_name); return -1; } if (table->timestamp_field && // Don't set timestamp if used @@ -116,8 +253,17 @@ static int check_insert_fields(THD *thd, TABLE *table, List<Item> &fields, } // For the values we need select_priv #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); + table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); #endif + + if (check_key_in_view(thd, table_list) || + (table_list->view && + check_view_insertability(thd, table_list))) + { + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + return -1; + } + return 0; } @@ -141,11 +287,11 @@ static int check_insert_fields(THD *thd, TABLE *table, List<Item> &fields, -1 Error */ -static int check_update_fields(THD *thd, TABLE *table, - TABLE_LIST *insert_table_list, - List<Item> &update_fields) +static int check_update_fields(THD *thd, TABLE_LIST *insert_table_list, + List<Item> &update_fields, table_map *map) { - ulong timestamp_query_id; + TABLE *table= insert_table_list->table; + query_id_t timestamp_query_id; LINT_INIT(timestamp_query_id); /* @@ -155,14 +301,18 @@ static int check_update_fields(THD *thd, TABLE *table, if (table->timestamp_field) { timestamp_query_id= table->timestamp_field->query_id; - table->timestamp_field->query_id= thd->query_id-1; + table->timestamp_field->query_id= thd->query_id - 1; } /* Check the fields we are going to modify. This will set the query_id of all used fields to the threads query_id. */ - if (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0)) + if (setup_fields(thd, 0, update_fields, 1, 0, 0)) + return -1; + + if (insert_table_list->effective_algorithm == VIEW_ALGORITHM_MERGE && + check_view_single_update(update_fields, insert_table_list, map)) return -1; if (table->timestamp_field) @@ -179,13 +329,40 @@ static int check_update_fields(THD *thd, TABLE *table, } -int mysql_insert(THD *thd,TABLE_LIST *table_list, - List<Item> &fields, - List<List_item> &values_list, - List<Item> &update_fields, - List<Item> &update_values, - enum_duplicates duplic, - bool ignore) +/* + Mark fields used by triggers for INSERT-like statement. + + SYNOPSIS + mark_fields_used_by_triggers_for_insert_stmt() + thd The current thread + table Table to which insert will happen + duplic Type of duplicate handling for insert which will happen + + NOTE + For REPLACE there is no sense in marking particular fields + used by ON DELETE trigger as to execute it properly we have + to retrieve and store values for all table columns anyway. +*/ + +void mark_fields_used_by_triggers_for_insert_stmt(THD *thd, TABLE *table, + enum_duplicates duplic) +{ + if (table->triggers) + { + table->triggers->mark_fields_used(thd, TRG_EVENT_INSERT); + if (duplic == DUP_UPDATE) + table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); + } +} + + +bool mysql_insert(THD *thd,TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic, + bool ignore) { int error, res; /* @@ -193,27 +370,26 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, By default, both logs are enabled (this won't cause problems if the server runs without --log-update or --log-bin). */ - int log_on= DELAYED_LOG_UPDATE | DELAYED_LOG_BIN ; - bool transactional_table, log_delayed, joins_freed= FALSE; + bool transactional_table, joins_freed= FALSE; + bool changed; uint value_count; ulong counter = 1; ulonglong id; COPY_INFO info; - TABLE *table; + TABLE *table= 0; List_iterator_fast<List_item> its(values_list); List_item *values; + Name_resolution_context *context; + Name_resolution_context_state ctx_state; #ifndef EMBEDDED_LIBRARY char *query= thd->query; #endif + bool log_on= (thd->options & OPTION_BIN_LOG) || + (!(thd->security_ctx->master_access & SUPER_ACL)); thr_lock_type lock_type = table_list->lock_type; - TABLE_LIST *insert_table_list= (TABLE_LIST*) - thd->lex->select_lex.table_list.first; + Item *unused_conds= 0; DBUG_ENTER("mysql_insert"); - if (!(thd->options & OPTION_UPDATE_LOG)) - log_on&= ~(int) DELAYED_LOG_UPDATE; - if (!(thd->options & OPTION_BIN_LOG)) - log_on&= ~(int) DELAYED_LOG_BIN; /* in safe mode or with skip-new change delayed insert to be regular if we are told to replace duplicates, the insert cannot be concurrent @@ -230,30 +406,58 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, (duplic == DUP_UPDATE)) lock_type=TL_WRITE; #endif + if ((lock_type == TL_WRITE_DELAYED) && + log_on && mysql_bin_log.is_open() && + (values_list.elements > 1)) + { + /* + Statement-based binary logging does not work in this case, because: + a) two concurrent statements may have their rows intermixed in the + queue, leading to autoincrement replication problems on slave (because + the values generated used for one statement don't depend only on the + value generated for the first row of this statement, so are not + replicable) + b) if first row of the statement has an error the full statement is + not binlogged, while next rows of the statement may be inserted. + c) if first row succeeds, statement is binlogged immediately with a + zero error code (i.e. "no error"), if then second row fails, query + will fail on slave too and slave will stop (wrongly believing that the + master got no error). + So we fallback to non-delayed INSERT. + */ + lock_type= TL_WRITE; + } table_list->lock_type= lock_type; #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { + res= 1; if (thd->locked_tables) { - if (find_locked_table(thd, - table_list->db ? table_list->db : thd->db, - table_list->real_name)) + DBUG_ASSERT(table_list->db); /* Must be set in the parser */ + if (find_locked_table(thd, table_list->db, table_list->table_name)) { - my_printf_error(ER_DELAYED_INSERT_TABLE_LOCKED, - ER(ER_DELAYED_INSERT_TABLE_LOCKED), - MYF(0), table_list->real_name); - DBUG_RETURN(-1); + my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0), + table_list->table_name); + DBUG_RETURN(TRUE); } } if ((table= delayed_get_table(thd,table_list)) && !thd->is_fatal_error) { - res= 0; - if (table_list->next) /* if sub select */ - res= open_and_lock_tables(thd, table_list->next); + /* + Open tables used for sub-selects or in stored functions, will also + cache these functions. + */ + res= open_and_lock_tables(thd, table_list->next_global); + /* + First is not processed by open_and_lock_tables() => we need set + updateability flags "by hands". + */ + if (!table_list->derived && !table_list->view) + table_list->updatable= 1; // usual table } - else + else if (thd->net.last_errno != ER_WRONG_OBJECT) { /* Too many delayed insert threads; Use a normal insert */ table_list->lock_type= lock_type= TL_WRITE; @@ -264,83 +468,152 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, #endif /* EMBEDDED_LIBRARY */ res= open_and_lock_tables(thd, table_list); if (res || thd->is_fatal_error) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); - table= table_list->table; thd->proc_info="init"; thd->used_tables=0; values= its++; + value_count= values->elements; - if (mysql_prepare_insert(thd, table_list, insert_table_list, - insert_table_list, table, - fields, values, update_fields, - update_values, duplic)) + if (mysql_prepare_insert(thd, table_list, table, fields, values, + update_fields, update_values, duplic, &unused_conds, + FALSE, + (fields.elements || !value_count), + !ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)))) goto abort; - value_count= values->elements; + /* mysql_prepare_insert set table_list->table if it was not set */ + table= table_list->table; + + context= &thd->lex->select_lex.context; + /* + These three asserts test the hypothesis that the resetting of the name + resolution context below is not necessary at all since the list of local + tables for INSERT always consists of one table. + */ + DBUG_ASSERT(!table_list->next_local); + DBUG_ASSERT(!context->table_list->next_local); + DBUG_ASSERT(!context->first_name_resolution_table->next_name_resolution_table); + + /* Save the state of the current name resolution context. */ + ctx_state.save_state(context, table_list); + + /* + Perform name resolution only in the first table - 'table_list', + which is the table that is inserted into. + */ + table_list->next_local= 0; + context->resolve_in_table_list_only(table_list); + while ((values= its++)) { counter++; if (values->elements != value_count) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); goto abort; } - if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) + if (setup_fields(thd, 0, *values, 0, 0, 0)) goto abort; } its.rewind (); + + /* Restore the current context. */ + ctx_state.restore_state(context, table_list); + /* Fill in the given fields and dump it to the table file */ - - info.records= info.deleted= info.copied= info.updated= 0; + info.records= info.deleted= info.copied= info.updated= info.touched= 0; info.ignore= ignore; info.handle_duplicates=duplic; info.update_fields= &update_fields; info.update_values= &update_values; + info.view= (table_list->view ? table_list : 0); + /* Count warnings for all inserts. For single line insert, generate an error if try to set a NOT NULL field - to NULL + to NULL. */ - thd->count_cuted_fields= ((values_list.elements == 1) ? + thd->count_cuted_fields= ((values_list.elements == 1 && + !ignore) ? CHECK_FIELD_ERROR_FOR_NULL : CHECK_FIELD_WARN); thd->cuted_fields = 0L; table->next_number_field=table->found_next_number_field; +#ifdef HAVE_REPLICATION + if (thd->slave_thread && + (info.handle_duplicates == DUP_UPDATE) && + (table->next_number_field != NULL) && + rpl_master_has_bug(&active_mi->rli, 24432)) + goto abort; +#endif + error=0; id=0; thd->proc_info="update"; if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + if (duplic == DUP_REPLACE) + { + if (!table->triggers || !table->triggers->has_delete_triggers()) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + /* + REPLACE should change values of all columns so we should mark + all columns as columns to be set. As nice side effect we will + retrieve columns which values are needed for ON DELETE triggers. + */ + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + } /* let's *try* to start bulk inserts. It won't necessary start them as values_list.elements should be greater than some - handler dependent - threshold. + We should not start bulk inserts if this statement uses + functions or invokes triggers since they may access + to the same table and therefore should not see its + inconsistent state created by this optimization. So we call start_bulk_insert to perform nesessary checks on values_list.elements, and - if nothing else - to initialize the code to make the call of end_bulk_insert() below safe. */ - if (lock_type != TL_WRITE_DELAYED) + if (lock_type != TL_WRITE_DELAYED && !thd->prelocked_mode) table->file->start_bulk_insert(values_list.elements); + thd->no_trans_update= 0; + thd->abort_on_warning= (!ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + + mark_fields_used_by_triggers_for_insert_stmt(thd, table, duplic); + + if (table_list->prepare_where(thd, 0, TRUE) || + table_list->prepare_check_option(thd)) + error= 1; + while ((values= its++)) { if (fields.elements || !value_count) { - restore_record(table,default_values); // Get empty record - if (fill_record(fields, *values, 0)|| thd->net.report_error || - check_null_fields(thd,table)) + restore_record(table,s->default_values); // Get empty record + if (fill_record_n_invoke_before_triggers(thd, fields, *values, 0, + table->triggers, + TRG_EVENT_INSERT)) { if (values_list.elements != 1 && !thd->net.report_error) { info.records++; continue; } + /* + TODO: set thd->abort_on_warning if values_list.elements == 1 + and check that all items return warning in case of problem with + storing field. + */ error=1; break; } @@ -348,10 +621,19 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, else { if (thd->used_tables) // Column used in values() - restore_record(table,default_values); // Get empty record + restore_record(table,s->default_values); // Get empty record else - table->record[0][0]=table->default_values[0]; // Fix delete marker - if (fill_record(table->field,*values, 0) || thd->net.report_error) + { + /* + Fix delete marker. No need to restore rest of record since it will + be overwritten by fill_record() anyway (and fill_record() does not + use default values in this case). + */ + table->record[0][0]= table->s->default_values[0]; + } + if (fill_record_n_invoke_before_triggers(thd, table->field, *values, 0, + table->triggers, + TRG_EVENT_INSERT)) { if (values_list.elements != 1 && ! thd->net.report_error) { @@ -362,6 +644,18 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, break; } } + + if ((res= table_list->view_check_option(thd, + (values_list.elements == 1 ? + 0 : + ignore))) == + VIEW_CHECK_SKIP) + continue; + else if (res == VIEW_CHECK_ERROR) + { + error= 1; + break; + } #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { @@ -370,9 +664,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, } else #endif - error=write_record(table,&info); - if (error) - break; + error=write_record(thd, table ,&info); /* If auto_increment values are used, save the first one for LAST_INSERT_ID() and for the update log. @@ -381,6 +673,8 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, { // Get auto increment value id= thd->last_insert_id; } + if (error) + break; thd->row_count++; } @@ -405,7 +699,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, else #endif { - if (table->file->end_bulk_insert() && !error) + if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error) { table->file->print_error(my_errno,MYF(0)); error=1; @@ -415,32 +709,30 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, else if (table->next_number_field && info.copied) id=table->next_number_field->val_int(); // Return auto_increment value - /* - Invalidate the table in the query cache if something changed. - For the transactional algorithm to work the invalidation must be - before binlog writing and ha_autocommit_... - */ - if (info.copied || info.deleted || info.updated) - query_cache_invalidate3(thd, table_list, 1); - transactional_table= table->file->has_transactions(); - log_delayed= (transactional_table || table->tmp_table); - if ((info.copied || info.deleted || info.updated) && - (error <= 0 || !transactional_table)) + if ((changed= (info.copied || info.deleted || info.updated))) { - mysql_update_log.write(thd, thd->query, thd->query_length); - if (mysql_bin_log.is_open()) + /* + Invalidate the table in the query cache if something changed. + For the transactional algorithm to work the invalidation must be + before binlog writing and ha_autocommit_or_rollback + */ + query_cache_invalidate3(thd, table_list, 1); + if (error <= 0 || !transactional_table) { - if (error <= 0) - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed, FALSE); - if (mysql_bin_log.write(&qinfo) && transactional_table) - error=1; + if (mysql_bin_log.is_open()) + { + if (error <= 0) + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, + transactional_table, FALSE); + if (mysql_bin_log.write(&qinfo) && transactional_table) + error=1; + } + if (!transactional_table) + thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } - if (!log_delayed) - thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } if (transactional_table) error=ha_autocommit_or_rollback(thd,error); @@ -448,6 +740,16 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, if (thd->lock) { mysql_unlock_tables(thd, thd->lock); + /* + Invalidate the table in the query cache if something changed + after unlocking when changes become fisible. + TODO: this is workaround. right way will be move invalidating in + the unlock procedure. + */ + if (lock_type == TL_WRITE_CONCURRENT_INSERT && changed) + { + query_cache_invalidate3(thd, table_list, 1); + } thd->lock=0; } } @@ -455,11 +757,15 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, table->next_number_field=0; thd->count_cuted_fields= CHECK_FIELD_IGNORE; thd->next_insert_id=0; // Reset this if wrongly used + table->auto_increment_field_not_null= FALSE; if (duplic != DUP_ERROR || ignore) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + if (duplic == DUP_REPLACE && + (!table->triggers || !table->triggers->has_delete_triggers())) + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); - /* Reset value of LAST_INSERT_ID if no rows where inserted */ - if (!info.copied && thd->insert_id_used) + /* Reset value of LAST_INSERT_ID if no rows were inserted or touched */ + if (!info.copied && !info.touched && thd->insert_id_used) { thd->insert_id(0); id=0; @@ -468,7 +774,10 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, goto abort; if (values_list.elements == 1 && (!(thd->options & OPTION_WARNINGS) || !thd->cuted_fields)) - send_ok(thd,info.copied+info.deleted+info.updated,id); + { + thd->row_count_func= info.copied+info.deleted+info.updated; + send_ok(thd, (ulong) thd->row_count_func, id); + } else { char buff[160]; @@ -479,10 +788,11 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); - ::send_ok(thd,info.copied+info.deleted+info.updated,(ulonglong)id,buff); + thd->row_count_func= info.copied+info.deleted+info.updated; + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); } - table->insert_values=0; - DBUG_RETURN(0); + thd->abort_on_warning= 0; + DBUG_RETURN(FALSE); abort: #ifndef EMBEDDED_LIBRARY @@ -491,8 +801,151 @@ abort: #endif if (!joins_freed) free_underlaid_joins(thd, &thd->lex->select_lex); - table->insert_values=0; - DBUG_RETURN(-1); + thd->abort_on_warning= 0; + DBUG_RETURN(TRUE); +} + + +/* + Additional check for insertability for VIEW + + SYNOPSIS + check_view_insertability() + thd - thread handler + view - reference on VIEW + + IMPLEMENTATION + A view is insertable if the folloings are true: + - All columns in the view are columns from a table + - All not used columns in table have a default values + - All field in view are unique (not referring to the same column) + + RETURN + FALSE - OK + view->contain_auto_increment is 1 if and only if the view contains an + auto_increment field + + TRUE - can't be used for insert +*/ + +static bool check_view_insertability(THD * thd, TABLE_LIST *view) +{ + uint num= view->view->select_lex.item_list.elements; + TABLE *table= view->table; + Field_translator *trans_start= view->field_translation, + *trans_end= trans_start + num; + Field_translator *trans; + uint used_fields_buff_size= (table->s->fields + 7) / 8; + uchar *used_fields_buff= (uchar*)thd->alloc(used_fields_buff_size); + MY_BITMAP used_fields; + bool save_set_query_id= thd->set_query_id; + DBUG_ENTER("check_key_in_view"); + + if (!used_fields_buff) + DBUG_RETURN(TRUE); // EOM + + DBUG_ASSERT(view->table != 0 && view->field_translation != 0); + + VOID(bitmap_init(&used_fields, used_fields_buff, used_fields_buff_size * 8, + 0)); + bitmap_clear_all(&used_fields); + + view->contain_auto_increment= 0; + /* + we must not set query_id for fields as they're not + really used in this context + */ + thd->set_query_id= 0; + /* check simplicity and prepare unique test of view */ + for (trans= trans_start; trans != trans_end; trans++) + { + if (!trans->item->fixed && trans->item->fix_fields(thd, &trans->item)) + { + thd->set_query_id= save_set_query_id; + DBUG_RETURN(TRUE); + } + Item_field *field; + /* simple SELECT list entry (field without expression) */ + if (!(field= trans->item->filed_for_view_update())) + { + thd->set_query_id= save_set_query_id; + DBUG_RETURN(TRUE); + } + if (field->field->unireg_check == Field::NEXT_NUMBER) + view->contain_auto_increment= 1; + /* prepare unique test */ + /* + remove collation (or other transparent for update function) if we have + it + */ + trans->item= field; + } + thd->set_query_id= save_set_query_id; + /* unique test */ + for (trans= trans_start; trans != trans_end; trans++) + { + /* Thanks to test above, we know that all columns are of type Item_field */ + Item_field *field= (Item_field *)trans->item; + /* check fields belong to table in which we are inserting */ + if (field->field->table == table && + bitmap_fast_test_and_set(&used_fields, field->field->field_index)) + DBUG_RETURN(TRUE); + } + + DBUG_RETURN(FALSE); +} + + +/* + Check if table can be updated + + SYNOPSIS + mysql_prepare_insert_check_table() + thd Thread handle + table_list Table list + fields List of fields to be updated + where Pointer to where clause + select_insert Check is making for SELECT ... INSERT + + RETURN + FALSE ok + TRUE ERROR +*/ + +static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list, + List<Item> &fields, COND **where, + bool select_insert) +{ + bool insert_into_view= (table_list->view != 0); + DBUG_ENTER("mysql_prepare_insert_check_table"); + + /* + first table in list is the one we'll INSERT into, requires INSERT_ACL. + all others require SELECT_ACL only. the ACL requirement below is for + new leaves only anyway (view-constituents), so check for SELECT rather + than INSERT. + */ + + if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context, + &thd->lex->select_lex.top_join_list, + table_list, where, + &thd->lex->select_lex.leaf_tables, + select_insert, INSERT_ACL, SELECT_ACL)) + DBUG_RETURN(TRUE); + + if (insert_into_view && !fields.elements) + { + thd->lex->empty_field_list_on_rset= 1; + if (!table_list->table) + { + my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0), + table_list->view_db.str, table_list->view_name.str); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(insert_view_fields(thd, &fields, table_list)); + } + + DBUG_RETURN(FALSE); } @@ -501,15 +954,16 @@ abort: SYNOPSIS mysql_prepare_insert() - thd thread handler - table_list global table list (not including first table for - INSERT ... SELECT) - insert_table_list Table we are inserting into (for INSERT ... SELECT) - dup_table_list Tables to be used in ON DUPLICATE KEY - It's either all global tables or only the table we - insert into, depending on if we are using GROUP BY - in the SELECT clause). - values Values to insert. NULL for INSERT ... SELECT + thd Thread handler + table_list Global/local table list + table Table to insert into (can be NULL if table should + be taken from table_list->table) + where Where clause (for insert ... select) + select_insert TRUE if INSERT ... SELECT statement + check_fields TRUE if need to check that all INSERT fields are + given values. + abort_on_warning whether to report if some INSERT field is not + assigned as an error (TRUE) or as a warning (FALSE). TODO (in far future) In cases of: @@ -520,51 +974,130 @@ abort: WARNING You MUST set table->insert_values to 0 after calling this function before releasing the table object. - + RETURN VALUE - 0 OK - -1 error (message is not sent to user) + FALSE OK + TRUE error */ -int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *insert_table_list, - TABLE_LIST *dup_table_list, - TABLE *table, - List<Item> &fields, List_item *values, - List<Item> &update_fields, List<Item> &update_values, - enum_duplicates duplic) +bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, + TABLE *table, List<Item> &fields, List_item *values, + List<Item> &update_fields, List<Item> &update_values, + enum_duplicates duplic, + COND **where, bool select_insert, + bool check_fields, bool abort_on_warning) { + SELECT_LEX *select_lex= &thd->lex->select_lex; + Name_resolution_context *context= &select_lex->context; + Name_resolution_context_state ctx_state; + bool insert_into_view= (table_list->view != 0); + bool res= 0; + table_map map= 0; DBUG_ENTER("mysql_prepare_insert"); + DBUG_PRINT("enter", ("table_list 0x%lx, table 0x%lx, view %d", + (ulong)table_list, (ulong)table, + (int)insert_into_view)); + /* INSERT should have a SELECT or VALUES clause */ + DBUG_ASSERT (!select_insert || !values); + + /* + For subqueries in VALUES() we should not see the table in which we are + inserting (for INSERT ... SELECT this is done by changing table_list, + because INSERT ... SELECT share SELECT_LEX it with SELECT. + */ + if (!select_insert) + { + for (SELECT_LEX_UNIT *un= select_lex->first_inner_unit(); + un; + un= un->next_unit()) + { + for (SELECT_LEX *sl= un->first_select(); + sl; + sl= sl->next_select()) + { + sl->context.outer_context= 0; + } + } + } - if (duplic == DUP_UPDATE && !table->insert_values) + if (duplic == DUP_UPDATE) { /* it should be allocated before Item::fix_fields() */ - table->insert_values= - (byte *)alloc_root(thd->mem_root, table->rec_buff_length); - if (!table->insert_values) - DBUG_RETURN(-1); + if (table_list->set_insert_values(thd->mem_root)) + DBUG_RETURN(TRUE); } - if (setup_tables(insert_table_list)) - DBUG_RETURN(-1); + + if (mysql_prepare_insert_check_table(thd, table_list, fields, where, + select_insert)) + DBUG_RETURN(TRUE); + + + /* Prepare the fields in the statement. */ if (values) { - if (check_insert_fields(thd, table, fields, *values) || - setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0) || - (duplic == DUP_UPDATE && - (check_update_fields(thd, table, insert_table_list, update_fields) || - setup_fields(thd, 0, dup_table_list, update_values, 1, 0, 0)))) - DBUG_RETURN(-1); - if (find_real_table_in_list(table_list->next, table_list->db, - table_list->real_name)) + /* if we have INSERT ... VALUES () we cannot have a GROUP BY clause */ + DBUG_ASSERT (!select_lex->group_list.elements); + + /* Save the state of the current name resolution context. */ + ctx_state.save_state(context, table_list); + + /* + Perform name resolution only in the first table - 'table_list', + which is the table that is inserted into. + */ + table_list->next_local= 0; + context->resolve_in_table_list_only(table_list); + + res= check_insert_fields(thd, context->table_list, fields, *values, + !insert_into_view, &map) || + setup_fields(thd, 0, *values, 0, 0, 0); + + if (!res && check_fields) + { + bool saved_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= abort_on_warning; + res= check_that_all_fields_are_given_values(thd, + table ? table : + context->table_list->table, + context->table_list); + thd->abort_on_warning= saved_abort_on_warning; + } + + if (!res && duplic == DUP_UPDATE) + { + select_lex->no_wrap_view_item= TRUE; + res= check_update_fields(thd, context->table_list, update_fields, &map); + select_lex->no_wrap_view_item= FALSE; + } + + /* Restore the current context. */ + ctx_state.restore_state(context, table_list); + + if (!res) + res= setup_fields(thd, 0, update_values, 1, 0, 0); + } + + if (res) + DBUG_RETURN(res); + + if (!table) + table= table_list->table; + + if (!select_insert) + { + Item *fake_conds= 0; + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 1))) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); - DBUG_RETURN(-1); + update_non_unique_table_error(table_list, "INSERT", duplicate); + DBUG_RETURN(TRUE); } + select_lex->fix_prepare_information(thd, &fake_conds, &fake_conds); + select_lex->first_execution= 0; } if (duplic == DUP_UPDATE || duplic == DUP_REPLACE) table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY); - - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -572,7 +1105,7 @@ int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, static int last_uniq_key(TABLE *table,uint keynr) { - while (++keynr < table->keys) + while (++keynr < table->s->keys) if (table->key_info[keynr].flags & HA_NOSAME) return 0; return 1; @@ -580,13 +1113,35 @@ static int last_uniq_key(TABLE *table,uint keynr) /* - Write a record to table with optional deleting of conflicting records + Write a record to table with optional deleting of conflicting records, + invoke proper triggers if needed. + + SYNOPSIS + write_record() + thd - thread context + table - table to which record should be written + info - COPY_INFO structure describing handling of duplicates + and which is used for counting number of records inserted + and deleted. + + NOTE + Once this record will be written to table after insert trigger will + be invoked. If instead of inserting new record we will update old one + then both on update triggers will work instead. Similarly both on + delete triggers will be invoked if we will delete conflicting records. + + Sets thd->no_trans_update if table which is updated didn't have + transactions. + + RETURN VALUE + 0 - success + non-0 - error */ -int write_record(TABLE *table,COPY_INFO *info) +int write_record(THD *thd, TABLE *table,COPY_INFO *info) { - int error; + int error, trg_error= 0; char *key=0; DBUG_ENTER("write_record"); @@ -596,9 +1151,9 @@ int write_record(TABLE *table,COPY_INFO *info) { while ((error=table->file->write_row(table->record[0]))) { + uint key_nr; if (error != HA_WRITE_SKIP) goto err; - uint key_nr; if ((int) (key_nr = table->file->get_dup_key(error)) < 0) { error=HA_WRITE_SKIP; /* Database can't find key */ @@ -611,7 +1166,7 @@ int write_record(TABLE *table,COPY_INFO *info) */ if (info->handle_duplicates == DUP_REPLACE && table->next_number_field && - key_nr == table->next_number_index && + key_nr == table->s->next_number_index && table->file->auto_increment_column_changed) goto err; if (table->file->table_flags() & HA_DUPP_POS) @@ -629,14 +1184,14 @@ int write_record(TABLE *table,COPY_INFO *info) if (!key) { - if (!(key=(char*) my_safe_alloca(table->max_unique_length, + if (!(key=(char*) my_safe_alloca(table->s->max_unique_length, MAX_KEY_LENGTH))) { error=ENOMEM; goto err; } } - key_copy((byte*) key,table,key_nr,0); + key_copy((byte*) key,table->record[0],table->key_info+key_nr,0); if ((error=(table->file->index_read_idx(table->record[1],key_nr, (byte*) key, table->key_info[key_nr]. @@ -646,24 +1201,58 @@ int write_record(TABLE *table,COPY_INFO *info) } if (info->handle_duplicates == DUP_UPDATE) { - /* we don't check for other UNIQUE keys - the first row - that matches, is updated. If update causes a conflict again, - an error is returned + int res= 0; + /* + We don't check for other UNIQUE keys - the first row + that matches, is updated. If update causes a conflict again, + an error is returned */ DBUG_ASSERT(table->insert_values != NULL); store_record(table,insert_values); restore_record(table,record[1]); - DBUG_ASSERT(info->update_fields->elements==info->update_values->elements); - if (fill_record(*info->update_fields, *info->update_values, 0)) - goto err; + DBUG_ASSERT(info->update_fields->elements == + info->update_values->elements); + if (fill_record_n_invoke_before_triggers(thd, *info->update_fields, + *info->update_values, 0, + table->triggers, + TRG_EVENT_UPDATE)) + goto before_trg_err; + + /* CHECK OPTION for VIEW ... ON DUPLICATE KEY UPDATE ... */ + if (info->view && + (res= info->view->view_check_option(current_thd, info->ignore)) == + VIEW_CHECK_SKIP) + goto ok_or_after_trg_err; + if (res == VIEW_CHECK_ERROR) + goto before_trg_err; + + table->file->restore_auto_increment(); if ((error=table->file->update_row(table->record[1],table->record[0]))) - { - if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore) - break; + { + if ((error == HA_ERR_FOUND_DUPP_KEY) && info->ignore) + { + goto ok_or_after_trg_err; + } goto err; - } - info->updated++; - break; + } + + if (table->next_number_field) + table->file->adjust_next_insert_id_after_explicit_value( + table->next_number_field->val_int()); + info->touched++; + + if ((table->file->table_flags() & HA_PARTIAL_COLUMN_READ) || + compare_record(table, thd->query_id)) + { + info->updated++; + trg_error= (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, + TRUE)); + info->copied++; + } + + goto ok_or_after_trg_err; } else /* DUP_REPLACE */ { @@ -674,69 +1263,128 @@ int write_record(TABLE *table,COPY_INFO *info) to convert the latter operation internally to an UPDATE. We also should not perform this conversion if we have timestamp field with ON UPDATE which is different from DEFAULT. + Another case when conversion should not be performed is when + we have ON DELETE trigger on table so user may notice that + we cheat here. Note that it is ok to do such conversion for + tables which have ON UPDATE but have no ON DELETE triggers, + we just should not expose this fact to users by invoking + ON UPDATE triggers. */ if (last_uniq_key(table,key_nr) && !table->file->referenced_by_foreign_key() && (table->timestamp_field_type == TIMESTAMP_NO_AUTO_SET || - table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH)) + table->timestamp_field_type == TIMESTAMP_AUTO_SET_ON_BOTH) && + (!table->triggers || !table->triggers->has_delete_triggers())) { if ((error=table->file->update_row(table->record[1], table->record[0]))) goto err; info->deleted++; - break; /* Update logfile and count */ + /* + Since we pretend that we have done insert we should call + its after triggers. + */ + goto after_trg_n_copied_inc; + } + else + { + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE, TRUE)) + goto before_trg_err; + if ((error=table->file->delete_row(table->record[1]))) + goto err; + info->deleted++; + if (!table->file->has_transactions()) + thd->no_trans_update= 1; + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_AFTER, TRUE)) + { + trg_error= 1; + goto ok_or_after_trg_err; + } + /* Let us attempt do write_row() once more */ } - else if ((error=table->file->delete_row(table->record[1]))) - goto err; - info->deleted++; } } - info->copied++; } else if ((error=table->file->write_row(table->record[0]))) { if (!info->ignore || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) goto err; + table->file->restore_auto_increment(); + goto ok_or_after_trg_err; } - else - info->copied++; + +after_trg_n_copied_inc: + info->copied++; + trg_error= (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_INSERT, + TRG_ACTION_AFTER, TRUE)); + +ok_or_after_trg_err: if (key) - my_safe_afree(key,table->max_unique_length,MAX_KEY_LENGTH); - DBUG_RETURN(0); + my_safe_afree(key,table->s->max_unique_length,MAX_KEY_LENGTH); + if (!table->file->has_transactions()) + thd->no_trans_update= 1; + DBUG_RETURN(trg_error); err: - if (key) - my_safe_afree(key,table->max_unique_length,MAX_KEY_LENGTH); info->last_errno= error; + /* current_select is NULL if this is a delayed insert */ + if (thd->lex->current_select) + thd->lex->current_select->no_error= 0; // Give error table->file->print_error(error,MYF(0)); + +before_trg_err: + table->file->restore_auto_increment(); + if (key) + my_safe_afree(key, table->s->max_unique_length, MAX_KEY_LENGTH); DBUG_RETURN(1); } /****************************************************************************** Check that all fields with arn't null_fields are used - If DONT_USE_DEFAULT_FIELDS isn't defined use default value for not set - fields. ******************************************************************************/ -static int check_null_fields(THD *thd __attribute__((unused)), - TABLE *entry __attribute__((unused))) +int check_that_all_fields_are_given_values(THD *thd, TABLE *entry, + TABLE_LIST *table_list) { -#ifdef DONT_USE_DEFAULT_FIELDS + int err= 0; for (Field **field=entry->field ; *field ; field++) { - if ((*field)->query_id != thd->query_id && !(*field)->maybe_null() && - *field != entry->timestamp_field && - *field != entry->next_number_field) + if ((*field)->query_id != thd->query_id && + ((*field)->flags & NO_DEFAULT_VALUE_FLAG) && + ((*field)->real_type() != FIELD_TYPE_ENUM)) { - my_printf_error(ER_BAD_NULL_ERROR, ER(ER_BAD_NULL_ERROR),MYF(0), - (*field)->field_name); - return 1; + bool view= FALSE; + if (table_list) + { + table_list= table_list->top_table(); + view= test(table_list->view); + } + if (view) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_NO_DEFAULT_FOR_VIEW_FIELD, + ER(ER_NO_DEFAULT_FOR_VIEW_FIELD), + table_list->view_db.str, + table_list->view_name.str); + } + else + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_NO_DEFAULT_FOR_FIELD, + ER(ER_NO_DEFAULT_FOR_FIELD), + (*field)->field_name); + } + err= 1; } } -#endif - return 0; + return thd->abort_on_warning ? err : 0; } /***************************************************************************** @@ -751,14 +1399,16 @@ public: char *record,*query; enum_duplicates dup; time_t start_time; - bool query_start_used,last_insert_id_used,insert_id_used, ignore; - int log_query; + bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query; ulonglong last_insert_id; + ulonglong next_insert_id; + ulong auto_increment_increment; + ulong auto_increment_offset; timestamp_auto_set_type timestamp_field_type; uint query_length; - delayed_row(enum_duplicates dup_arg, bool ignore_arg, int log_query_arg) - :record(0),query(0),dup(dup_arg),ignore(ignore_arg),log_query(log_query_arg) {} + delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg) + :record(0), query(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {} ~delayed_row() { x_free(record); @@ -785,8 +1435,8 @@ public: table(0),tables_in_use(0),stacked_inserts(0), status(0), dead(0), group_count(0) { - thd.user=thd.priv_user=(char*) delayed_user; - thd.host=(char*) my_localhost; + thd.security_ctx->user=thd.security_ctx->priv_user=(char*) delayed_user; + thd.security_ctx->host=(char*) my_localhost; thd.current_tablenr=0; thd.version=refresh_version; thd.command=COM_DELAYED_INSERT; @@ -796,7 +1446,7 @@ public: bzero((char*) &thd.net, sizeof(thd.net)); // Safety bzero((char*) &table_list, sizeof(table_list)); // Safety thd.system_thread= SYSTEM_THREAD_DELAYED_INSERT; - thd.host_or_ip= ""; + thd.security_ctx->host_or_ip= ""; bzero((char*) &info,sizeof(info)); pthread_mutex_init(&mutex,MY_MUTEX_INIT_FAST); pthread_cond_init(&cond,NULL); @@ -819,7 +1469,7 @@ public: pthread_cond_destroy(&cond_client); thd.unlink(); // Must be unlinked under lock x_free(thd.query); - thd.user=thd.host=0; + thd.security_ctx->user= thd.security_ctx->host=0; thread_count--; delayed_insert_threads--; VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -865,7 +1515,7 @@ delayed_insert *find_handler(THD *thd, TABLE_LIST *table_list) while ((tmp=it++)) { if (!strcmp(tmp->thd.db,table_list->db) && - !strcmp(table_list->real_name,tmp->table->real_name)) + !strcmp(table_list->table_name,tmp->table->s->table_name)) { tmp->lock(); break; @@ -883,8 +1533,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) TABLE *table; DBUG_ENTER("delayed_get_table"); - if (!table_list->db) - table_list->db=thd->db; + /* Must be set in the parser */ + DBUG_ASSERT(table_list->db); /* Find the thread which handles this table. */ if (!(tmp=find_handler(thd,table_list))) @@ -903,18 +1553,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) */ if (! (tmp= find_handler(thd, table_list))) { - /* - Avoid that a global read lock steps in while we are creating the - new thread. It would block trying to open the table. Hence, the - DI thread and this thread would wait until after the global - readlock is gone. Since the insert thread needs to wait for a - global read lock anyway, we do it right now. Note that - wait_if_global_read_lock() sets a protection against a new - global read lock when it succeeds. This needs to be released by - start_waiting_global_read_lock(). - */ - if (wait_if_global_read_lock(thd, 0, 1)) - goto err; if (!(tmp=new delayed_insert())) { my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert)); @@ -923,16 +1561,16 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_mutex_lock(&LOCK_thread_count); thread_count++; pthread_mutex_unlock(&LOCK_thread_count); - if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) || - !(tmp->thd.query=my_strdup(table_list->real_name,MYF(MY_WME)))) + tmp->thd.set_db(table_list->db, strlen(table_list->db)); + tmp->thd.query= my_strdup(table_list->table_name,MYF(MY_WME)); + if (tmp->thd.db == NULL || tmp->thd.query == NULL) { delete tmp; - my_error(ER_OUT_OF_RESOURCES,MYF(0)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); goto err1; } tmp->table_list= *table_list; // Needed to open table - tmp->table_list.db= tmp->thd.db; - tmp->table_list.alias= tmp->table_list.real_name=tmp->thd.query; + tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query; tmp->lock(); pthread_mutex_lock(&tmp->mutex); if ((error=pthread_create(&tmp->thd.real_id,&connection_attrib, @@ -944,7 +1582,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_mutex_unlock(&tmp->mutex); tmp->unlock(); delete tmp; - net_printf(thd,ER_CANT_CREATE_THREAD,error); + my_error(ER_CANT_CREATE_THREAD, MYF(0), error); goto err1; } @@ -955,11 +1593,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_cond_wait(&tmp->cond_client,&tmp->mutex); } pthread_mutex_unlock(&tmp->mutex); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); thd->proc_info="got old table"; if (tmp->thd.killed) { @@ -983,7 +1616,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) } pthread_mutex_lock(&tmp->mutex); - table=tmp->get_local_table(thd); + table= tmp->get_local_table(thd); pthread_mutex_unlock(&tmp->mutex); if (table) thd->di=tmp; @@ -995,11 +1628,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) err1: thd->fatal_error(); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); err: pthread_mutex_unlock(&LOCK_delayed_create); DBUG_RETURN(0); // Continue with normal insert @@ -1018,6 +1646,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) my_ptrdiff_t adjust_ptrs; Field **field,**org_field, *found_next_number_field; TABLE *copy; + DBUG_ENTER("delayed_insert::get_local_table"); /* First request insert thread to get a lock */ status=1; @@ -1041,29 +1670,47 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) } } + /* + Allocate memory for the TABLE object, the field pointers array, and + one record buffer of reclength size. Normally a table has three + record buffers of rec_buff_length size, which includes alignment + bytes. Since the table copy is used for creating one record only, + the other record buffers and alignment are unnecessary. + */ client_thd->proc_info="allocating local table"; copy= (TABLE*) client_thd->alloc(sizeof(*copy)+ - (table->fields+1)*sizeof(Field**)+ - table->reclength); + (table->s->fields+1)*sizeof(Field**)+ + table->s->reclength); if (!copy) goto error; + + /* Copy the TABLE object. */ *copy= *table; - bzero((char*) ©->name_hash,sizeof(copy->name_hash)); // No name hashing + copy->s= ©->share_not_to_be_used; + // No name hashing + bzero((char*) ©->s->name_hash,sizeof(copy->s->name_hash)); /* We don't need to change the file handler here */ - field=copy->field=(Field**) (copy+1); - copy->record[0]=(byte*) (field+table->fields+1); - memcpy((char*) copy->record[0],(char*) table->record[0],table->reclength); + /* Assign the pointers for the field pointers array and the record. */ + field= copy->field= (Field**) (copy + 1); + copy->record[0]= (byte*) (field + table->s->fields + 1); + memcpy((char*) copy->record[0], (char*) table->record[0], + table->s->reclength); - /* Make a copy of all fields */ - - adjust_ptrs=PTR_BYTE_DIFF(copy->record[0],table->record[0]); + /* + Make a copy of all fields. + The copied fields need to point into the copied record. This is done + by copying the field objects with their old pointer values and then + "move" the pointers by the distance between the original and copied + records. That way we preserve the relative positions in the records. + */ + adjust_ptrs= PTR_BYTE_DIFF(copy->record[0], table->record[0]); - found_next_number_field=table->found_next_number_field; - for (org_field=table->field ; *org_field ; org_field++,field++) + found_next_number_field= table->found_next_number_field; + for (org_field= table->field; *org_field; org_field++, field++) { - if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy))) - return 0; + if (!(*field= (*org_field)->new_field(client_thd->mem_root, copy, 1))) + DBUG_RETURN(0); (*field)->orig_table= copy; // Remove connection (*field)->move_field(adjust_ptrs); // Point at copy->record[0] if (*org_field == found_next_number_field) @@ -1076,7 +1723,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) { /* Restore offset as this may have been reset in handle_inserts */ copy->timestamp_field= - (Field_timestamp*) copy->field[table->timestamp_field_offset]; + (Field_timestamp*) copy->field[table->s->timestamp_field_offset]; copy->timestamp_field->unireg_check= table->timestamp_field->unireg_check; copy->timestamp_field_type= copy->timestamp_field->get_auto_set_type(); } @@ -1086,22 +1733,25 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Adjust in_use for pointing to client thread */ copy->in_use= client_thd; - - return copy; + + /* Adjust lock_count. This table object is not part of a lock. */ + copy->lock_count= 0; + + DBUG_RETURN(copy); /* Got fatal error */ error: tables_in_use--; status=1; pthread_cond_signal(&cond); // Inform thread about abort - return 0; + DBUG_RETURN(0); } /* Put a question in queue */ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore, - char *query, uint query_length, int log_on) + char *query, uint query_length, bool log_on) { delayed_row *row=0; delayed_insert *di=thd->di; @@ -1118,13 +1768,13 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool igno if (!query) query_length=0; - if (!(row->record= (char*) my_malloc(table->reclength+query_length+1, + if (!(row->record= (char*) my_malloc(table->s->reclength+query_length+1, MYF(MY_WME)))) goto err; - memcpy(row->record,table->record[0],table->reclength); + memcpy(row->record, table->record[0], table->s->reclength); if (query_length) { - row->query=row->record+table->reclength; + row->query= row->record+table->s->reclength; memcpy(row->query,query,query_length+1); } row->query_length= query_length; @@ -1135,10 +1785,26 @@ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool igno row->last_insert_id= thd->last_insert_id; row->timestamp_field_type= table->timestamp_field_type; + /* The session variable settings can always be copied. */ + row->auto_increment_increment= thd->variables.auto_increment_increment; + row->auto_increment_offset= thd->variables.auto_increment_offset; + /* + Next insert id must be set for the first value in a multi-row insert + only. So clear it after the first use. Assume a multi-row insert. + Since the user thread doesn't really execute the insert, + thd->next_insert_id is left untouched between the rows. If we copy + the same insert id to every row of the multi-row insert, the delayed + insert thread would copy this before inserting every row. Thus it + tries to insert all rows with the same insert id. This fails on the + unique constraint. So just the first row would be really inserted. + */ + row->next_insert_id= thd->next_insert_id; + thd->next_insert_id= 0; + di->rows.push_back(row); di->stacked_inserts++; di->status=1; - if (table->blob_fields) + if (table->s->blob_fields) unlink_blobs(table); pthread_cond_signal(&di->cond); @@ -1179,7 +1845,7 @@ void kill_delayed_threads(void) delayed_insert *tmp; while ((tmp=it++)) { - tmp->thd.killed=1; + tmp->thd.killed= THD::KILL_CONNECTION; if (tmp->thd.mysys_var) { pthread_mutex_lock(&tmp->thd.mysys_var->mutex); @@ -1206,7 +1872,7 @@ void kill_delayed_threads(void) * Create a new delayed insert thread */ -extern "C" pthread_handler_decl(handle_delayed_insert,arg) +pthread_handler_t handle_delayed_insert(void *arg) { delayed_insert *di=(delayed_insert*) arg; THD *thd= &di->thd; @@ -1217,7 +1883,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) thd->thread_id=thread_id++; thd->end_time(); threads.append(thd); - thd->killed=abort_loop; + thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED; pthread_mutex_unlock(&LOCK_thread_count); /* @@ -1238,6 +1904,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) #endif DBUG_ENTER("handle_delayed_insert"); + thd->thread_stack= (char*) &thd; if (init_thr_lock() || thd->store_globals()) { thd->fatal_error(); @@ -1260,7 +1927,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) if (!(di->table->file->table_flags() & HA_CAN_INSERT_DELAYED)) { thd->fatal_error(); - my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.real_name); + my_error(ER_ILLEGAL_HA, MYF(0), di->table_list.table_name); goto end; } di->table->copy_blobs=1; @@ -1278,7 +1945,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) for (;;) { - if (thd->killed) + if (thd->killed == THD::KILL_CONNECTION) { uint lock_count; /* @@ -1324,9 +1991,9 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) #endif if (thd->killed || di->status) break; - if (error == ETIME || error == ETIMEDOUT) + if (error == ETIMEDOUT || error == ETIME) { - thd->killed=1; + thd->killed= THD::KILL_CONNECTION; break; } } @@ -1342,6 +2009,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) if (di->tables_in_use && ! thd->lock) { + bool not_used; /* Request for new delayed insert. Lock the table, but avoid to be blocked by a global read lock. @@ -1353,10 +2021,12 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) inserts are done. */ if (! (thd->lock= mysql_lock_tables(thd, &di->table, 1, - MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK))) + MYSQL_LOCK_IGNORE_GLOBAL_READ_LOCK, + ¬_used))) { - di->dead= 1; // Some fatal error - thd->killed= 1; + /* Fatal error */ + di->dead= 1; + thd->killed= THD::KILL_CONNECTION; } pthread_cond_broadcast(&di->cond_client); } @@ -1364,8 +2034,9 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) { if (di->handle_inserts()) { - di->dead= 1; // Some fatal error - thd->killed= 1; + /* Some fatal error */ + di->dead= 1; + thd->killed= THD::KILL_CONNECTION; } } di->status=0; @@ -1395,7 +2066,7 @@ end: close_thread_tables(thd); // Free the table di->table=0; di->dead= 1; // If error - thd->killed= 1; + thd->killed= THD::KILL_CONNECTION; // If error pthread_cond_broadcast(&di->cond_client); // Safety pthread_mutex_unlock(&di->mutex); @@ -1443,7 +2114,8 @@ bool delayed_insert::handle_inserts(void) { int error; ulong max_rows; - bool using_ignore=0, using_bin_log=mysql_bin_log.is_open(); + bool using_ignore= 0, using_opt_replace= 0; + bool using_bin_log= mysql_bin_log.is_open(); delayed_row *row; DBUG_ENTER("handle_inserts"); @@ -1456,15 +2128,15 @@ bool delayed_insert::handle_inserts(void) if (thr_upgrade_write_delay_lock(*thd.lock->locks)) { /* This can only happen if thread is killed by shutdown */ - sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name); + sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name); goto err; } thd.proc_info="insert"; max_rows= delayed_insert_limit; - if (thd.killed || table->version != refresh_version) + if (thd.killed || table->s->version != refresh_version) { - thd.killed=1; + thd.killed= THD::KILL_CONNECTION; max_rows= ~(ulong)0; // Do as much as possible } @@ -1476,11 +2148,19 @@ bool delayed_insert::handle_inserts(void) if (!using_bin_log) table->file->extra(HA_EXTRA_WRITE_CACHE); pthread_mutex_lock(&mutex); + + /* Reset auto-increment cacheing */ + if (thd.clear_next_insert_id) + { + thd.next_insert_id= 0; + thd.clear_next_insert_id= 0; + } + while ((row=rows.get())) { stacked_inserts--; pthread_mutex_unlock(&mutex); - memcpy(table->record[0],row->record,table->reclength); + memcpy(table->record[0],row->record,table->s->reclength); thd.start_time=row->start_time; thd.query_start_used=row->query_start_used; @@ -1489,6 +2169,14 @@ bool delayed_insert::handle_inserts(void) thd.insert_id_used=row->insert_id_used; table->timestamp_field_type= row->timestamp_field_type; + /* The session variable settings can always be copied. */ + thd.variables.auto_increment_increment= row->auto_increment_increment; + thd.variables.auto_increment_offset= row->auto_increment_offset; + /* Next insert id must be used only if non-zero. */ + if (row->next_insert_id) + thd.next_insert_id= row->next_insert_id; + DBUG_PRINT("loop", ("next_insert_id: %lu", (ulong) thd.next_insert_id)); + info.ignore= row->ignore; info.handle_duplicates= row->dup; if (info.ignore || @@ -1497,29 +2185,50 @@ bool delayed_insert::handle_inserts(void) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); using_ignore=1; } + if (info.handle_duplicates == DUP_REPLACE && + (!table->triggers || + !table->triggers->has_delete_triggers())) + { + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + using_opt_replace= 1; + } thd.clear_error(); // reset error for binlog - if (write_record(table,&info)) + if (write_record(&thd, table, &info)) { info.error_count++; // Ignore errors thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status); row->log_query = 0; + /* + We must reset next_insert_id. Otherwise all following rows may + become duplicates. If write_record() failed on a duplicate and + next_insert_id would be left unchanged, the next rows would also + be tried with the same insert id and would fail. Since the end + of a multi-row statement is unknown here, all following rows in + the queue would be dropped, regardless which thread added them. + After the queue is used up, next_insert_id is cleared and the + next run will succeed. This could even happen if these come from + the same multi-row statement as the current queue contents. That + way it would look somewhat random which rows are rejected after + a duplicate. + */ + thd.next_insert_id= 0; } if (using_ignore) { using_ignore=0; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); } - if (row->query) + if (using_opt_replace) { - if (row->log_query & DELAYED_LOG_UPDATE) - mysql_update_log.write(&thd,row->query, row->query_length); - if (row->log_query & DELAYED_LOG_BIN && using_bin_log) - { - Query_log_event qinfo(&thd, row->query, row->query_length,0, FALSE); - mysql_bin_log.write(&qinfo); - } + using_opt_replace= 0; + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); + } + if (row->query && row->log_query && using_bin_log) + { + Query_log_event qinfo(&thd, row->query, row->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); } - if (table->blob_fields) + if (table->s->blob_fields) free_delayed_insert_blobs(table); thread_safe_sub(delayed_rows_in_use,1,&LOCK_delayed_status); thread_safe_increment(delayed_insert_writes,&LOCK_delayed_status); @@ -1533,7 +2242,7 @@ bool delayed_insert::handle_inserts(void) on this table until all entries has been processed */ if (group_count++ >= max_rows && (row= rows.head()) && - (!(row->log_query & DELAYED_LOG_BIN && using_bin_log) || + (!(row->log_query & using_bin_log) || row->query)) { group_count=0; @@ -1548,13 +2257,14 @@ bool delayed_insert::handle_inserts(void) /* This should never happen */ table->file->print_error(error,MYF(0)); sql_print_error("%s",thd.net.last_error); + DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop")); goto err; } query_cache_invalidate3(&thd, table, 1); if (thr_reschedule_write_lock(*thd.lock->locks)) { /* This should never happen */ - sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->real_name); + sql_print_error(ER(ER_DELAYED_CANT_CHANGE_LOCK),table->s->table_name); } if (!using_bin_log) table->file->extra(HA_EXTRA_WRITE_CACHE); @@ -1573,6 +2283,7 @@ bool delayed_insert::handle_inserts(void) { // This shouldn't happen table->file->print_error(error,MYF(0)); sql_print_error("%s",thd.net.last_error); + DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop")); goto err; } query_cache_invalidate3(&thd, table, 1); @@ -1580,13 +2291,16 @@ bool delayed_insert::handle_inserts(void) DBUG_RETURN(0); err: + DBUG_EXECUTE("error", max_rows= 0;); /* Remove all not used rows */ while ((row=rows.get())) { delete row; thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status); stacked_inserts--; + DBUG_EXECUTE("error", max_rows++;); } + DBUG_PRINT("error", ("dropped %lu rows after an error", max_rows)); thread_safe_increment(delayed_insert_errors, &LOCK_delayed_status); pthread_mutex_lock(&mutex); DBUG_RETURN(1); @@ -1597,13 +2311,84 @@ bool delayed_insert::handle_inserts(void) Store records in INSERT ... SELECT * ***************************************************************************/ + +/* + make insert specific preparation and checks after opening tables + + SYNOPSIS + mysql_insert_select_prepare() + thd thread handler + + RETURN + FALSE OK + TRUE Error +*/ + +bool mysql_insert_select_prepare(THD *thd) +{ + LEX *lex= thd->lex; + SELECT_LEX *select_lex= &lex->select_lex; + TABLE_LIST *first_select_leaf_table; + DBUG_ENTER("mysql_insert_select_prepare"); + + /* + SELECT_LEX do not belong to INSERT statement, so we can't add WHERE + clause if table is VIEW + */ + + if (mysql_prepare_insert(thd, lex->query_tables, + lex->query_tables->table, lex->field_list, 0, + lex->update_list, lex->value_list, + lex->duplicates, + &select_lex->where, TRUE, FALSE, FALSE)) + DBUG_RETURN(TRUE); + + /* + exclude first table from leaf tables list, because it belong to + INSERT + */ + DBUG_ASSERT(select_lex->leaf_tables != 0); + lex->leaf_tables_insert= select_lex->leaf_tables; + /* skip all leaf tables belonged to view where we are insert */ + for (first_select_leaf_table= select_lex->leaf_tables->next_leaf; + first_select_leaf_table && + first_select_leaf_table->belong_to_view && + first_select_leaf_table->belong_to_view == + lex->leaf_tables_insert->belong_to_view; + first_select_leaf_table= first_select_leaf_table->next_leaf) + {} + select_lex->leaf_tables= first_select_leaf_table; + DBUG_RETURN(FALSE); +} + + +select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, + List<Item> *fields_par, + List<Item> *update_fields, + List<Item> *update_values, + enum_duplicates duplic, + bool ignore_check_option_errors) + :table_list(table_list_par), table(table_par), fields(fields_par), + last_insert_id(0), + insert_into_view(table_list_par && table_list_par->view != 0) +{ + bzero((char*) &info,sizeof(info)); + info.handle_duplicates= duplic; + info.ignore= ignore_check_option_errors; + info.update_fields= update_fields; + info.update_values= update_values; + if (table_list_par) + info.view= (table_list_par->view ? table_list_par : 0); +} + + int select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { - int res; LEX *lex= thd->lex; + int res; + table_map map= 0; SELECT_LEX *lex_current_select_save= lex->current_select; - bool lex_select_no_error= lex->select_lex.no_error; DBUG_ENTER("select_insert::prepare"); unit= u; @@ -1611,29 +2396,166 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) Since table in which we are going to insert is added to the first select, LEX::current_select should point to the first select while we are fixing fields from insert list. - Since these checks may cause the query to fail, we don't want the - error messages to be converted into warnings, must force no_error=0 */ lex->current_select= &lex->select_lex; - lex->select_lex.no_error= 0; - res= - check_insert_fields(thd, table, *fields, values) || - setup_fields(thd, 0, insert_table_list, values, 0, 0, 0) || - (info.handle_duplicates == DUP_UPDATE && - (check_update_fields(thd, table, insert_table_list, *info.update_fields) || - setup_fields(thd, 0, dup_table_list, *info.update_values, 1, 0, 0))); + res= check_insert_fields(thd, table_list, *fields, values, + !insert_into_view, &map) || + setup_fields(thd, 0, values, 0, 0, 0); + + if (!res && fields->elements) + { + bool saved_abort_on_warning= thd->abort_on_warning; + thd->abort_on_warning= !info.ignore && (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); + res= check_that_all_fields_are_given_values(thd, table_list->table, + table_list); + thd->abort_on_warning= saved_abort_on_warning; + } + + if (info.handle_duplicates == DUP_UPDATE && !res) + { + Name_resolution_context *context= &lex->select_lex.context; + Name_resolution_context_state ctx_state; + + /* Save the state of the current name resolution context. */ + ctx_state.save_state(context, table_list); + + /* Perform name resolution only in the first table - 'table_list'. */ + table_list->next_local= 0; + context->resolve_in_table_list_only(table_list); + + lex->select_lex.no_wrap_view_item= TRUE; + res= res || check_update_fields(thd, context->table_list, + *info.update_fields, &map); + lex->select_lex.no_wrap_view_item= FALSE; + /* + When we are not using GROUP BY and there are no ungrouped aggregate functions + we can refer to other tables in the ON DUPLICATE KEY part. + We use next_name_resolution_table descructively, so check it first (views?) + */ + DBUG_ASSERT (!table_list->next_name_resolution_table); + if (lex->select_lex.group_list.elements == 0 && + !lex->select_lex.with_sum_func) + /* + We must make a single context out of the two separate name resolution contexts : + the INSERT table and the tables in the SELECT part of INSERT ... SELECT. + To do that we must concatenate the two lists + */ + table_list->next_name_resolution_table= ctx_state.get_first_name_resolution_table(); + + res= res || setup_fields(thd, 0, *info.update_values, 1, 0, 0); + if (!res) + { + /* + Traverse the update values list and substitute fields from the + select for references (Item_ref objects) to them. This is done in + order to get correct values from those fields when the select + employs a temporary table. + */ + List_iterator<Item> li(*info.update_values); + Item *item; + + while ((item= li++)) + { + item->transform(&Item::update_value_transformer, + (byte*)lex->current_select); + } + } + /* Restore the current context. */ + ctx_state.restore_state(context, table_list); + } + lex->current_select= lex_current_select_save; - lex->select_lex.no_error= lex_select_no_error; if (res) DBUG_RETURN(1); + /* + if it is INSERT into join view then check_insert_fields already found + real table for insert + */ + table= table_list->table; - restore_record(table,default_values); // Get empty record + /* + Is table which we are changing used somewhere in other parts of + query + */ + if (!(lex->current_select->options & OPTION_BUFFER_RESULT) && + unique_table(thd, table_list, table_list->next_global, 0)) + { + /* Using same table for INSERT and SELECT */ + lex->current_select->options|= OPTION_BUFFER_RESULT; + lex->current_select->join->select_options|= OPTION_BUFFER_RESULT; + } + else if (!thd->prelocked_mode) + { + /* + We must not yet prepare the result table if it is the same as one of the + source tables (INSERT SELECT). The preparation may disable + indexes on the result table, which may be used during the select, if it + is the same table (Bug #6034). Do the preparation after the select phase + in select_insert::prepare2(). + We won't start bulk inserts at all if this statement uses functions or + should invoke triggers since they may access to the same table too. + */ + table->file->start_bulk_insert((ha_rows) 0); + } + restore_record(table,s->default_values); // Get empty record table->next_number_field=table->found_next_number_field; + +#ifdef HAVE_REPLICATION + if (thd->slave_thread && + (info.handle_duplicates == DUP_UPDATE) && + (table->next_number_field != NULL) && + rpl_master_has_bug(&active_mi->rli, 24432)) + DBUG_RETURN(1); +#endif + thd->cuted_fields=0; - if (info.ignore || - info.handle_duplicates != DUP_ERROR) + if (info.ignore || info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - table->file->start_bulk_insert((ha_rows) 0); + if (info.handle_duplicates == DUP_REPLACE) + { + if (!table->triggers || !table->triggers->has_delete_triggers()) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + } + thd->no_trans_update= 0; + thd->abort_on_warning= (!info.ignore && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + res= (table_list->prepare_where(thd, 0, TRUE) || + table_list->prepare_check_option(thd)); + + if (!res) + mark_fields_used_by_triggers_for_insert_stmt(thd, table, + info.handle_duplicates); + DBUG_RETURN(res); +} + + +/* + Finish the preparation of the result table. + + SYNOPSIS + select_insert::prepare2() + void + + DESCRIPTION + If the result table is the same as one of the source tables (INSERT SELECT), + the result table is not finally prepared at the join prepair phase. + Do the final preparation now. + + RETURN + 0 OK +*/ + +int select_insert::prepare2(void) +{ + DBUG_ENTER("select_insert::prepare2"); + if (thd->lex->current_select->options & OPTION_BUFFER_RESULT && + !thd->prelocked_mode) + table->file->start_bulk_insert((ha_rows) 0); DBUG_RETURN(0); } @@ -1646,12 +2568,16 @@ void select_insert::cleanup() select_insert::~select_insert() { + DBUG_ENTER("~select_insert"); if (table) { table->next_number_field=0; + table->auto_increment_field_not_null= FALSE; table->file->reset(); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; + thd->abort_on_warning= 0; + DBUG_VOID_RETURN; } @@ -1664,24 +2590,43 @@ bool select_insert::send_data(List<Item> &values) unit->offset_limit_cnt--; DBUG_RETURN(0); } - thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields + + thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields store_values(values); - error=thd->net.report_error || write_record(table,&info); thd->count_cuted_fields= CHECK_FIELD_IGNORE; - - if (!error) + if (thd->net.report_error) + DBUG_RETURN(1); + if (table_list) // Not CREATE ... SELECT { - /* - Restore fields of the record since it is possible that they were - changed by ON DUPLICATE KEY UPDATE clause. - */ - if (info.handle_duplicates == DUP_UPDATE) - restore_record(table, default_values); - - if (table->next_number_field) // Clear for next record + switch (table_list->view_check_option(thd, info.ignore)) { + case VIEW_CHECK_SKIP: + DBUG_RETURN(0); + case VIEW_CHECK_ERROR: + DBUG_RETURN(1); + } + } + if (!(error= write_record(thd, table, &info))) + { + if (table->triggers || info.handle_duplicates == DUP_UPDATE) { + /* + Restore fields of the record since it is possible that they were + changed by ON DUPLICATE KEY UPDATE clause. + + If triggers exist then whey can modify some fields which were not + originally touched by INSERT ... SELECT, so we have to restore + their original values for the next row. + */ + restore_record(table, s->default_values); + } + if (table->next_number_field) + { + /* + Clear auto-increment field for the next record, if triggers are used + we will clear it twice, but this should be cheap. + */ table->next_number_field->reset(); - if (! last_insert_id && thd->insert_id_used) + if (!last_insert_id && thd->insert_id_used) last_insert_id= thd->last_insert_id; } } @@ -1692,17 +2637,18 @@ bool select_insert::send_data(List<Item> &values) void select_insert::store_values(List<Item> &values) { if (fields->elements) - fill_record(*fields, values, 1); + fill_record_n_invoke_before_triggers(thd, *fields, values, 1, + table->triggers, TRG_EVENT_INSERT); else - fill_record(table->field, values, 1); + fill_record_n_invoke_before_triggers(thd, table->field, values, 1, + table->triggers, TRG_EVENT_INSERT); } void select_insert::send_error(uint errcode,const char *err) { DBUG_ENTER("select_insert::send_error"); - /* TODO error should be sent at the query processing end */ - ::send_error(thd,errcode,err); + my_message(errcode, err, MYF(0)); if (!table) { @@ -1712,7 +2658,8 @@ void select_insert::send_error(uint errcode,const char *err) */ DBUG_VOID_RETURN; } - table->file->end_bulk_insert(); + if (!thd->prelocked_mode) + table->file->end_bulk_insert(); /* If at least one row has been inserted/modified and will stay in the table (the table doesn't have transactions) (example: we got a duplicate key @@ -1724,18 +2671,19 @@ void select_insert::send_error(uint errcode,const char *err) { if (last_insert_id) thd->insert_id(last_insert_id); // For binary log - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, table->file->has_transactions(), FALSE); mysql_bin_log.write(&qinfo); } - if (!table->tmp_table) + if (!table->s->tmp_table) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } if (info.copied || info.deleted || info.updated) + { query_cache_invalidate3(thd, table, 1); + } ha_rollback_stmt(thd); DBUG_VOID_RETURN; } @@ -1746,25 +2694,25 @@ bool select_insert::send_eof() int error,error2; DBUG_ENTER("select_insert::send_eof"); - error=table->file->end_bulk_insert(); + error= (!thd->prelocked_mode) ? table->file->end_bulk_insert():0; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); /* We must invalidate the table in the query cache before binlog writing - and ha_autocommit_... + and ha_autocommit_or_rollback */ if (info.copied || info.deleted || info.updated) { query_cache_invalidate3(thd, table, 1); - if (!(table->file->has_transactions() || table->tmp_table)) + if (!(table->file->has_transactions() || table->s->tmp_table)) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } if (last_insert_id) - thd->insert_id(last_insert_id); // For binary log + thd->insert_id(info.copied ? last_insert_id : 0); // For binary log /* Write to binlog before commiting transaction */ - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (!error) @@ -1778,8 +2726,6 @@ bool select_insert::send_eof() if (error) { table->file->print_error(error,MYF(0)); - //TODO error should be sent at the query processing end - ::send_error(thd); DBUG_RETURN(1); } char buff[160]; @@ -1789,7 +2735,8 @@ bool select_insert::send_eof() else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); - ::send_ok(thd,info.copied+info.deleted+info.updated,last_insert_id,buff); + thd->row_count_func= info.copied+info.deleted+info.updated; + ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff); DBUG_RETURN(0); } @@ -1798,46 +2745,208 @@ bool select_insert::send_eof() CREATE TABLE (SELECT) ... ***************************************************************************/ +/* + Create table from lists of fields and items (or open existing table + with same name). + + SYNOPSIS + create_table_from_items() + thd in Thread object + create_info in Create information (like MAX_ROWS, ENGINE or + temporary table flag) + create_table in Pointer to TABLE_LIST object providing database + and name for table to be created or to be open + alter_info in/out Initial list of columns and indexes for the table + to be created + items in List of items which should be used to produce rest + of fields for the table (corresponding fields will + be added to the end of alter_info->create_list) + lock out Pointer to the MYSQL_LOCK object for table created + (open) will be returned in this parameter. Since + this table is not included in THD::lock caller is + responsible for explicitly unlocking this table. + + NOTES + If 'create_info->options' bitmask has HA_LEX_CREATE_IF_NOT_EXISTS + flag and table with name provided already exists then this function will + simply open existing table. + Also note that create, open and lock sequence in this function is not + atomic and thus contains gap for deadlock and can cause other troubles. + Since this function contains some logic specific to CREATE TABLE ... SELECT + it should be changed before it can be used in other contexts. + + RETURN VALUES + non-zero Pointer to TABLE object for table created or opened + 0 Error +*/ + +static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, + TABLE_LIST *create_table, + Alter_info *alter_info, + List<Item> *items, + MYSQL_LOCK **lock) +{ + TABLE tmp_table; // Used during 'create_field()' + TABLE *table= 0; + uint select_field_count= items->elements; + /* Add selected items to field list */ + List_iterator_fast<Item> it(*items); + Item *item; + Field *tmp_field; + bool not_used; + DBUG_ENTER("create_table_from_items"); + + tmp_table.alias= 0; + tmp_table.timestamp_field= 0; + tmp_table.s= &tmp_table.share_not_to_be_used; + tmp_table.s->db_create_options=0; + tmp_table.s->blob_ptr_size= portable_sizeof_char_ptr; + tmp_table.s->db_low_byte_first= test(create_info->db_type == DB_TYPE_MYISAM || + create_info->db_type == DB_TYPE_HEAP); + tmp_table.null_row=tmp_table.maybe_null=0; + + while ((item=it++)) + { + create_field *cr_field; + Field *field, *def_field; + if (item->type() == Item::FUNC_ITEM) + field= item->tmp_table_field(&tmp_table); + else + field= create_tmp_field(thd, &tmp_table, item, item->type(), + (Item ***) 0, &tmp_field, &def_field, 0, 0, 0, 0, + 0); + if (!field || + !(cr_field=new create_field(field,(item->type() == Item::FIELD_ITEM ? + ((Item_field *)item)->field : + (Field*) 0)))) + DBUG_RETURN(0); + if (item->maybe_null) + cr_field->flags &= ~NOT_NULL_FLAG; + alter_info->create_list.push_back(cr_field); + } + /* + create and lock table + + We don't log the statement, it will be logged later. + + If this is a HEAP table, the automatic DELETE FROM which is written to the + binlog when a HEAP table is opened for the first time since startup, must + not be written: 1) it would be wrong (imagine we're in CREATE SELECT: we + don't want to delete from it) 2) it would be written before the CREATE + TABLE, which is a wrong order. So we keep binary logging disabled when we + open_table(). + NOTE: By locking table which we just have created (or for which we just have + have found that it already exists) separately from other tables used by the + statement we create potential window for deadlock. + TODO: create and open should be done atomic ! + */ + { + tmp_disable_binlog(thd); + if (!mysql_create_table(thd, create_table->db, create_table->table_name, + create_info, alter_info, 0, select_field_count)) + { + /* + If we are here in prelocked mode we either create temporary table + or prelocked mode is caused by the SELECT part of this statement. + */ + DBUG_ASSERT(!thd->prelocked_mode || + create_info->options & HA_LEX_CREATE_TMP_TABLE || + thd->lex->requires_prelocking()); + + /* + NOTE: We don't want to ignore set of locked tables here if we are + under explicit LOCK TABLES since it will open gap for deadlock + too wide (and also is not backward compatible). + */ + if (! (table= open_table(thd, create_table, thd->mem_root, (bool*) 0, + (MYSQL_LOCK_IGNORE_FLUSH | + ((thd->prelocked_mode == PRELOCKED) ? + MYSQL_OPEN_IGNORE_LOCKED_TABLES:0))))) + quick_rm_table(create_info->db_type, create_table->db, + table_case_name(create_info, create_table->table_name)); + } + reenable_binlog(thd); + if (!table) // open failed + DBUG_RETURN(0); + } + + /* + FIXME: What happens if trigger manages to be created while we are + obtaining this lock ? May be it is sensible just to disable + trigger execution in this case ? Or will MYSQL_LOCK_IGNORE_FLUSH + save us from that ? + */ + table->reginfo.lock_type=TL_WRITE; + if (! ((*lock)= mysql_lock_tables(thd, &table, 1, + MYSQL_LOCK_IGNORE_FLUSH, ¬_used))) + { + VOID(pthread_mutex_lock(&LOCK_open)); + hash_delete(&open_cache,(byte*) table); + VOID(pthread_mutex_unlock(&LOCK_open)); + quick_rm_table(create_info->db_type, create_table->db, + table_case_name(create_info, create_table->table_name)); + DBUG_RETURN(0); + } + table->file->extra(HA_EXTRA_WRITE_CACHE); + DBUG_RETURN(table); +} + + int select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { DBUG_ENTER("select_create::prepare"); unit= u; - table= create_table_from_items(thd, create_info, db, name, + table= create_table_from_items(thd, create_info, create_table, alter_info, &values, &lock); if (!table) DBUG_RETURN(-1); // abort() deletes table - if (table->fields < values.elements) + if (table->s->fields < values.elements) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),1); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1); DBUG_RETURN(-1); } /* First field to copy */ - field=table->field+table->fields - values.elements; + field= table->field+table->s->fields - values.elements; + + /* Mark all fields that are given values */ + for (Field **f= field ; *f ; f++) + (*f)->query_id= thd->query_id; /* Don't set timestamp if used */ table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; table->next_number_field=table->found_next_number_field; - restore_record(table,default_values); // Get empty record + restore_record(table,s->default_values); // Get empty record thd->cuted_fields=0; - if (info.ignore || - info.handle_duplicates != DUP_ERROR) + if (info.ignore || info.handle_duplicates != DUP_ERROR) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - table->file->start_bulk_insert((ha_rows) 0); - DBUG_RETURN(0); + if (info.handle_duplicates == DUP_REPLACE) + { + if (!table->triggers || !table->triggers->has_delete_triggers()) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + } + if (!thd->prelocked_mode) + table->file->start_bulk_insert((ha_rows) 0); + thd->no_trans_update= 0; + thd->abort_on_warning= (!info.ignore && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + DBUG_RETURN(check_that_all_fields_are_given_values(thd, table, + table_list)); } void select_create::store_values(List<Item> &values) { - fill_record(field, values, 1); + fill_record_n_invoke_before_triggers(thd, field, values, 1, + table->triggers, TRG_EVENT_INSERT); } @@ -1861,6 +2970,7 @@ bool select_create::send_eof() else { table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); VOID(pthread_mutex_lock(&LOCK_open)); mysql_unlock_tables(thd, lock); /* @@ -1868,13 +2978,13 @@ bool select_create::send_eof() Check if we can remove the following two rows. We should be able to just keep the table in the table cache. */ - if (!table->tmp_table) + if (!table->s->tmp_table) { - ulong version= table->version; + ulong version= table->s->version; hash_delete(&open_cache,(byte*) table); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } lock=0; table=0; @@ -1894,19 +3004,20 @@ void select_create::abort() if (table) { table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); - enum db_type table_type=table->db_type; - if (!table->tmp_table) + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); + enum db_type table_type=table->s->db_type; + if (!table->s->tmp_table) { - ulong version= table->version; + ulong version= table->s->version; hash_delete(&open_cache,(byte*) table); if (!create_info->table_existed) - quick_rm_table(table_type, db, name); + quick_rm_table(table_type, create_table->db, create_table->table_name); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } else if (!create_info->table_existed) - close_temporary_table(thd, db, name); + close_temporary_table(thd, create_table->db, create_table->table_name); table=0; } VOID(pthread_mutex_unlock(&LOCK_open)); @@ -1917,11 +3028,11 @@ void select_create::abort() Instansiate templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List_iterator_fast<List_item>; #ifndef EMBEDDED_LIBRARY template class I_List<delayed_insert>; template class I_List_iterator<delayed_insert>; template class I_List<delayed_row>; #endif /* EMBEDDED_LIBRARY */ -#endif /* __GNUC__ */ +#endif /* HAVE_EXPLICIT_TEMPLATE_INSTANTIATION */ diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 270fdb3f20a..3be844b6761 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -17,20 +16,19 @@ /* A lexical scanner on a temporary buffer with a yacc interface */ +#define MYSQL_LEX 1 #include "mysql_priv.h" #include "item_create.h" #include <m_ctype.h> #include <hash.h> - +#include "sp.h" +#include "sp_head.h" /* - Fake table list object, pointer to which is used as special value for - st_lex::time_zone_tables_used indicating that we implicitly use time - zone tables in this statement but real table list was not yet created. - Pointer to it is also returned by my_tz_get_tables_list() as indication - of transient error; + We are using pointer to this variable for distinguishing between assignment + to NEW row field (when parsing trigger definition) and structured variable. */ -TABLE_LIST fake_time_zone_tables_list; +sys_var_long_ptr trg_new_row_fake_var(0, 0); /* Macros to look like lex */ @@ -42,12 +40,6 @@ TABLE_LIST fake_time_zone_tables_list; #define yySkip() lex->ptr++ #define yyLength() ((uint) (lex->ptr - lex->tok_start)-1) -#if MYSQL_VERSION_ID < 32300 -#define FLOAT_NUM REAL_NUM -#endif - -pthread_key(LEX*,THR_LEX); - /* Longest standard keyword name */ #define TOCK_NAME_LENGTH 24 @@ -56,7 +48,8 @@ pthread_key(LEX*,THR_LEX); used when comparing keywords */ -uchar to_upper_lex[] = { +static uchar to_upper_lex[]= +{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, @@ -95,8 +88,6 @@ void lex_init(void) for (i=0 ; i < array_elements(sql_functions) ; i++) sql_functions[i].length=(uchar) strlen(sql_functions[i].name); - VOID(pthread_key_create(&THR_LEX,NULL)); - DBUG_VOID_RETURN; } @@ -108,6 +99,16 @@ void lex_free(void) } +void +st_parsing_options::reset() +{ + allows_variable= TRUE; + allows_select_into= TRUE; + allows_select_procedure= TRUE; + allows_derived= TRUE; +} + + /* This is called before every query that is to be parsed. Because of this, it's critical to not do too much things here. @@ -117,15 +118,24 @@ void lex_free(void) void lex_start(THD *thd, uchar *buf,uint length) { LEX *lex= thd->lex; + DBUG_ENTER("lex_start"); + + lex->thd= lex->unit.thd= thd; + lex->buf= lex->ptr= buf; + lex->end_of_query= buf+length; + + lex->context_stack.empty(); lex->unit.init_query(); lex->unit.init_select(); - lex->thd= thd; - lex->unit.thd= thd; + /* 'parent_lex' is used in init_query() so it must be before it. */ + lex->select_lex.parent_lex= lex; lex->select_lex.init_query(); lex->value_list.empty(); lex->update_list.empty(); lex->param_list.empty(); - lex->auxilliary_table_list.empty(); + lex->view_list.empty(); + lex->prepared_stmt_params.empty(); + lex->auxiliary_table_list.empty(); lex->unit.next= lex->unit.master= lex->unit.link_next= lex->unit.return_to= 0; lex->unit.prev= lex->unit.link_prev= 0; @@ -136,15 +146,23 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0; lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list); lex->select_lex.options= 0; + lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED; + lex->select_lex.init_order(); + lex->select_lex.group_list.empty(); lex->describe= 0; - lex->subqueries= lex->derived_tables= FALSE; + lex->subqueries= FALSE; + lex->view_prepare_mode= FALSE; + lex->stmt_prepare_mode= FALSE; + lex->derived_tables= 0; lex->lock_option= TL_READ; - lex->found_colon= 0; + lex->found_semicolon= 0; lex->safe_to_cache_query= 1; lex->time_zone_tables_used= 0; + lex->leaf_tables_insert= 0; + lex->parsing_options.reset(); + lex->empty_field_list_on_rset= 0; lex->select_lex.select_number= 1; lex->next_state=MY_LEX_START; - lex->end_of_query=(lex->ptr=buf)+length; lex->yylineno = 1; lex->in_comment=0; lex->length=0; @@ -154,23 +172,32 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->select_lex.ftfunc_list= &lex->select_lex.ftfunc_list_alloc; lex->select_lex.group_list.empty(); lex->select_lex.order_list.empty(); + lex->select_lex.udf_list.empty(); lex->current_select= &lex->select_lex; lex->yacc_yyss=lex->yacc_yyvs=0; lex->ignore_space=test(thd->variables.sql_mode & MODE_IGNORE_SPACE); - lex->sql_command=SQLCOM_END; + lex->sql_command= lex->orig_sql_command= SQLCOM_END; lex->duplicates= DUP_ERROR; lex->ignore= 0; + lex->sphead= NULL; + lex->spcont= NULL; lex->proc_list.first= 0; + lex->escape_used= FALSE; + lex->reset_query_tables_list(FALSE); + + lex->nest_level=0 ; + lex->allow_sum_func= 0; + lex->in_sum_func= NULL; + DBUG_VOID_RETURN; } void lex_end(LEX *lex) { - for (SELECT_LEX *sl= lex->all_selects_list; - sl; - sl= sl->next_select_in_list()) - sl->expr_list.delete_elements(); // If error when parsing sql-varargs + DBUG_ENTER("lex_end"); + DBUG_PRINT("enter", ("lex: 0x%lx", (long) lex)); x_free(lex->yacc_yyss); x_free(lex->yacc_yyvs); + DBUG_VOID_RETURN; } @@ -184,29 +211,16 @@ static int find_keyword(LEX *lex, uint len, bool function) lex->yylval->symbol.symbol=symbol; lex->yylval->symbol.str= (char*) tok; lex->yylval->symbol.length=len; + + if ((symbol->tok == NOT_SYM) && + (lex->thd->variables.sql_mode & MODE_HIGH_NOT_PRECEDENCE)) + return NOT2_SYM; + if ((symbol->tok == OR_OR_SYM) && + !(lex->thd->variables.sql_mode & MODE_PIPES_AS_CONCAT)) + return OR2_SYM; + return symbol->tok; } -#ifdef HAVE_DLOPEN - udf_func *udf; - if (function && using_udf_functions && (udf=find_udf((char*) tok, len))) - { - lex->safe_to_cache_query=0; - lex->yylval->udf=udf; - switch (udf->returns) { - case STRING_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_CHAR_FUNC : UDA_CHAR_SUM; - case REAL_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_FLOAT_FUNC : UDA_FLOAT_SUM; - case INT_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_INT_FUNC : UDA_INT_SUM; - case ROW_RESULT: - default: - // This case should never be choosen - DBUG_ASSERT(0); - return 0; - } - } -#endif return 0; } @@ -215,7 +229,7 @@ static int find_keyword(LEX *lex, uint len, bool function) SYNOPSIS is_keyword() - name checked name + name checked name (must not be empty) len length of checked name RETURN VALUES @@ -225,6 +239,7 @@ static int find_keyword(LEX *lex, uint len, bool function) bool is_keyword(const char *name, uint len) { + DBUG_ASSERT(len != 0); return get_hash_symbol(name,len,0)!=0; } @@ -282,16 +297,19 @@ static char *get_text(LEX *lex) { c = yyGet(); #ifdef USE_MB - int l; - if (use_mb(cs) && - (l = my_ismbchar(cs, - (const char *)lex->ptr-1, - (const char *)lex->end_of_query))) { + { + int l; + if (use_mb(cs) && + (l = my_ismbchar(cs, + (const char *)lex->ptr-1, + (const char *)lex->end_of_query))) { lex->ptr += l-1; continue; + } } #endif - if (c == '\\') + if (c == '\\' && + !(lex->thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)) { // Escaped character found_escape=1; if (lex->ptr == lex->end_of_query) @@ -338,7 +356,8 @@ static char *get_text(LEX *lex) continue; } #endif - if (*str == '\\' && str+1 != end) + if (!(lex->thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) && + *str == '\\' && str+1 != end) { switch(*++str) { case 'n': @@ -436,12 +455,12 @@ static inline uint int_token(const char *str,uint length) else if (length < signed_longlong_len) return LONG_NUM; else if (length > signed_longlong_len) - return REAL_NUM; + return DECIMAL_NUM; else { cmp=signed_longlong_str+1; smaller=LONG_NUM; // If <= signed_longlong_str - bigger=REAL_NUM; + bigger=DECIMAL_NUM; } } else @@ -457,10 +476,10 @@ static inline uint int_token(const char *str,uint length) else if (length > longlong_len) { if (length > unsigned_longlong_len) - return REAL_NUM; + return DECIMAL_NUM; cmp=unsigned_longlong_str; smaller=ULONGLONG_NUM; - bigger=REAL_NUM; + bigger=DECIMAL_NUM; } else { @@ -474,14 +493,14 @@ static inline uint int_token(const char *str,uint length) } /* - yylex remember the following states from the following yylex() + MYSQLlex remember the following states from the following MYSQLlex() - MY_LEX_EOQ Found end of query - MY_LEX_OPERATOR_OR_IDENT Last state was an ident, text or number (which can't be followed by a signed number) */ -int yylex(void *arg, void *yythd) +int MYSQLlex(void *arg, void *yythd) { reg1 uchar c; int tokval, result_state; @@ -494,6 +513,10 @@ int yylex(void *arg, void *yythd) uchar *ident_map= cs->ident_map; lex->yylval=yylval; // The global state + + lex->tok_end_prev= lex->tok_end; + lex->tok_start_prev= lex->tok_start; + lex->tok_start=lex->tok_end=lex->ptr; state=lex->next_state; lex->next_state=MY_LEX_OPERATOR_OR_IDENT; @@ -541,30 +564,26 @@ int yylex(void *arg, void *yythd) its value in a query for the binlog, the query must stay grammatically correct. */ - else if (c == '?' && ((THD*) yythd)->command == COM_PREPARE && - !ident_map[yyPeek()]) + else if (c == '?' && lex->stmt_prepare_mode && !ident_map[yyPeek()]) return(PARAM_MARKER); return((int) c); case MY_LEX_IDENT_OR_NCHAR: if (yyPeek() != '\'') - { // Found x'hex-number' + { state= MY_LEX_IDENT; break; } - yyGet(); // Skip ' - while ((c = yyGet()) && (c !='\'')) ; - length=(lex->ptr - lex->tok_start); // Length of hexnum+3 - if (c != '\'') + /* Found N'string' */ + lex->tok_start++; // Skip N + yySkip(); // Skip ' + if (!(yylval->lex_str.str = get_text(lex))) { - return(ABORT_SYM); // Illegal hex constant + state= MY_LEX_CHAR; // Read char by char + break; } - yyGet(); // get_token makes an unget - yylval->lex_str=get_token(lex,length); - yylval->lex_str.str+=2; // Skip x' - yylval->lex_str.length-=3; // Don't count x' and last ' - lex->yytoklen-=3; - return (NCHAR_STRING); + yylval->lex_str.length= lex->yytoklen; + return(NCHAR_STRING); case MY_LEX_IDENT_OR_HEX: if (yyPeek() == '\'') @@ -572,8 +591,12 @@ int yylex(void *arg, void *yythd) state= MY_LEX_HEX_NUMBER; break; } - /* Fall through */ - case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling + case MY_LEX_IDENT_OR_BIN: + if (yyPeek() == '\'') + { // Found b'bin-number' + state= MY_LEX_BIN_NUMBER; + break; + } case MY_LEX_IDENT: uchar *start; #if defined(USE_MB) && defined(USE_MB_IDENT) @@ -695,6 +718,20 @@ int yylex(void *arg, void *yythd) } yyUnget(); } + else if (c == 'b' && (lex->ptr - lex->tok_start) == 2 && + lex->tok_start[0] == '0' ) + { // b'bin-number' + while (my_isxdigit(cs,(c = yyGet()))) ; + if ((lex->ptr - lex->tok_start) >= 4 && !ident_map[c]) + { + yylval->lex_str= get_token(lex, yyLength()); + yylval->lex_str.str+= 2; // Skip 0x + yylval->lex_str.length-= 2; + lex->yytoklen-= 2; + return (BIN_NUM); + } + yyUnget(); + } // fall through case MY_LEX_IDENT_START: // We come here after '.' result_state= IDENT; @@ -735,8 +772,8 @@ int yylex(void *arg, void *yythd) lex->tok_start=lex->ptr; // Skip first ` while ((c=yyGet())) { - int length; - if ((length= my_mbcharlen(cs, c)) == 1) + int var_length; + if ((var_length= my_mbcharlen(cs, c)) == 1) { if (c == (uchar) NAMES_SEP_CHAR) break; /* Old .frm format can't handle this char */ @@ -750,9 +787,9 @@ int yylex(void *arg, void *yythd) } } #ifdef USE_MB - else if (length < 1) + else if (var_length < 1) break; // Error - lex->ptr+= length-1; + lex->ptr+= var_length-1; #endif } if (double_quotes) @@ -790,7 +827,7 @@ int yylex(void *arg, void *yythd) return(FLOAT_NUM); } yylval->lex_str=get_token(lex,yyLength()); - return(REAL_NUM); + return(DECIMAL_NUM); case MY_LEX_HEX_NUMBER: // Found x'hexstring' yyGet(); // Skip ' @@ -807,6 +844,19 @@ int yylex(void *arg, void *yythd) lex->yytoklen-=3; return (HEX_NUM); + case MY_LEX_BIN_NUMBER: // Found b'bin-string' + yyGet(); // Skip ' + while ((c= yyGet()) == '0' || c == '1'); + length= (lex->ptr - lex->tok_start); // Length of bin-num + 3 + if (c != '\'') + return(ABORT_SYM); // Illegal hex constant + yyGet(); // get_token makes an unget + yylval->lex_str= get_token(lex, length); + yylval->lex_str.str+= 2; // Skip b' + yylval->lex_str.length-= 3; // Don't count b' and last ' + lex->yytoklen-= 3; + return (BIN_NUM); + case MY_LEX_CMP_OP: // Incomplete comparison operator if (state_map[yyPeek()] == MY_LEX_CMP_OP || state_map[yyPeek()] == MY_LEX_LONG_CMP_OP) @@ -924,16 +974,15 @@ int yylex(void *arg, void *yythd) { THD* thd= (THD*)yythd; if ((thd->client_capabilities & CLIENT_MULTI_STATEMENTS) && - (thd->command != COM_PREPARE)) + !lex->stmt_prepare_mode) { - lex->safe_to_cache_query=0; - lex->found_colon=(char*)lex->ptr; - thd->server_status |= SERVER_MORE_RESULTS_EXISTS; - lex->next_state=MY_LEX_END; - return(END_OF_INPUT); + lex->safe_to_cache_query= 0; + lex->found_semicolon=(char*) lex->ptr; + thd->server_status|= SERVER_MORE_RESULTS_EXISTS; + lex->next_state= MY_LEX_END; + return (END_OF_INPUT); } - else - state=MY_LEX_CHAR; // Return ';' + state= MY_LEX_CHAR; // Return ';' break; } /* fall true */ @@ -1004,6 +1053,8 @@ int yylex(void *arg, void *yythd) if (c == '.') lex->next_state=MY_LEX_IDENT_SEP; length= (uint) (lex->ptr - lex->tok_start)-1; + if (length == 0) + return(ABORT_SYM); // Names must be nonempty. if ((tokval= find_keyword(lex,length,0))) { yyUnget(); // Put back 'c' @@ -1023,10 +1074,33 @@ Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root) create_list(rhs.create_list, mem_root), flags(rhs.flags), keys_onoff(rhs.keys_onoff), - tablespace_op(rhs.tablespace_op), - is_simple(rhs.is_simple) + tablespace_op(rhs.tablespace_op) {} + +/* + Skip comment in the end of statement. + + SYNOPSIS + skip_rear_comments() + begin pointer to the beginning of statement + end pointer to the end of statement + + DESCRIPTION + The function is intended to trim comments at the end of the statement. + + RETURN + Pointer to the last non-comment symbol of the statement. +*/ + +uchar *skip_rear_comments(uchar *begin, uchar *end) +{ + while (begin < end && (end[-1] <= ' ' || end[-1] == '*' || + end[-1] == '/' || end[-1] == ';')) + end-= 1; + return end; +} + /* st_select_lex structures initialisations */ @@ -1034,6 +1108,7 @@ Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root) void st_select_lex_node::init_query() { options= 0; + sql_cache= SQL_CACHE_UNSPECIFIED; linkage= UNSPECIFIED_TYPE; no_error= no_table_names_allowed= 0; uncacheable= 0; @@ -1059,54 +1134,80 @@ void st_select_lex_unit::init_query() cleaned= 0; item_list.empty(); describe= 0; + found_rows_for_union= 0; } void st_select_lex::init_query() { st_select_lex_node::init_query(); table_list.empty(); + top_join_list.empty(); + join_list= &top_join_list; + embedding= leaf_tables= 0; item_list.empty(); join= 0; - where= 0; - having= 0; + having= prep_having= where= prep_where= 0; olap= UNSPECIFIED_OLAP_TYPE; having_fix_field= 0; - resolve_mode= NOMATTER_MODE; - cond_count= with_wild= 0; + context.select_lex= this; + context.init(); + /* + Add the name resolution context of the current (sub)query to the + stack of contexts for the whole query. + TODO: + push_context may return an error if there is no memory for a new + element in the stack, however this method has no return value, + thus push_context should be moved to a place where query + initialization is checked for failure. + */ + parent_lex->push_context(&context); + cond_count= between_count= with_wild= 0; + conds_processed_with_permanent_arena= 0; ref_pointer_array= 0; + select_n_where_fields= 0; select_n_having_items= 0; - prep_where= 0; - prep_having= 0; subquery_in_having= explicit_limit= 0; - parsing_place= NO_MATTER; is_item_list_lookup= 0; + first_execution= 1; + first_cond_optimization= 1; + parsing_place= NO_MATTER; + exclude_from_table_unique_test= no_wrap_view_item= FALSE; + nest_level= 0; + link_next= 0; } void st_select_lex::init_select() { st_select_lex_node::init_select(); group_list.empty(); - type= db= db1= table1= db2= table2= 0; + type= db= 0; having= 0; use_index_ptr= ignore_index_ptr= 0; table_join_options= 0; in_sum_expr= with_wild= 0; options= 0; + sql_cache= SQL_CACHE_UNSPECIFIED; braces= 0; - when_list.empty(); expr_list.empty(); + udf_list.empty(); interval_list.empty(); use_index.empty(); ftfunc_list_alloc.empty(); + inner_sum_func_list= 0; ftfunc_list= &ftfunc_list_alloc; linkage= UNSPECIFIED_TYPE; order_list.elements= 0; order_list.first= 0; order_list.next= (byte**) &order_list.first; - select_limit= HA_POS_ERROR; - offset_limit= 0; + /* Set limit and offset to default values */ + select_limit= 0; /* denotes the default limit = HA_POS_ERROR */ + offset_limit= 0; /* denotes the default offset = 0 */ with_sum_func= 0; - + is_correlated= 0; + cur_pos_in_select_list= UNDEF_POS; + non_agg_fields.empty(); + cond_value= having_value= Item::COND_UNDEF; + inner_refs_list.empty(); } /* @@ -1296,10 +1397,20 @@ void st_select_lex::mark_as_dependent(SELECT_LEX *last) if (!(s->uncacheable & UNCACHEABLE_DEPENDENT)) { // Select is dependent of outer select - s->uncacheable|= UNCACHEABLE_DEPENDENT; + s->uncacheable= (s->uncacheable & ~UNCACHEABLE_UNITED) | + UNCACHEABLE_DEPENDENT; SELECT_LEX_UNIT *munit= s->master_unit(); - munit->uncacheable|= UNCACHEABLE_DEPENDENT; + munit->uncacheable= (munit->uncacheable & ~UNCACHEABLE_UNITED) | + UNCACHEABLE_DEPENDENT; + for (SELECT_LEX *sl= munit->first_select(); sl ; sl= sl->next_select()) + { + if (sl != s && + !(sl->uncacheable & (UNCACHEABLE_DEPENDENT | UNCACHEABLE_UNITED))) + sl->uncacheable|= UNCACHEABLE_UNITED; + } } + is_correlated= TRUE; + this->master_unit()->item->is_correlated= TRUE; } bool st_select_lex_node::set_braces(bool value) { return 1; } @@ -1329,160 +1440,17 @@ ulong st_select_lex_node::get_table_join_options() */ bool st_select_lex::test_limit() { - if (select_limit != HA_POS_ERROR) + if (select_limit != 0) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), - "LIMIT & IN/ALL/ANY/SOME subquery"); + "LIMIT & IN/ALL/ANY/SOME subquery"); return(1); } - // We need only 1 row to determinate existence - select_limit= 1; // no sense in ORDER BY without LIMIT order_list.empty(); return(0); } -/* - Interface method of table list creation for query - - SYNOPSIS - st_select_lex_unit::create_total_list() - thd THD pointer - result pointer on result list of tables pointer - check_derived force derived table chacking (used for creating - table list for derived query) - DESCRIPTION - This is used for UNION & subselect to create a new table list of all used - tables. - The table_list->table entry in all used tables are set to point - to the entries in this list. - - RETURN - 0 - OK - !0 - error -*/ -bool st_select_lex_unit::create_total_list(THD *thd_arg, st_lex *lex, - TABLE_LIST **result_arg) -{ - *result_arg= 0; - if (!(res= create_total_list_n_last_return(thd_arg, lex, &result_arg))) - { - /* - If time zone tables were used implicitly in statement we should add - them to global table list. - */ - if (lex->time_zone_tables_used) - { - /* - Altough we are modifying lex data, it won't raise any problem in - case when this lex belongs to some prepared statement or stored - procedure: such modification does not change any invariants imposed - by requirement to reuse the same lex for multiple executions. - */ - if ((lex->time_zone_tables_used= my_tz_get_table_list(thd)) != - &fake_time_zone_tables_list) - { - *result_arg= lex->time_zone_tables_used; - } - else - { - send_error(thd, 0); - res= 1; - } - } - } - return res; -} - -/* - Table list creation for query - - SYNOPSIS - st_select_lex_unit::create_total_list() - thd THD pointer - lex pointer on LEX stricture - result pointer on pointer on result list of tables pointer - - DESCRIPTION - This is used for UNION & subselect to create a new table list of all used - tables. - The table_list->table_list in all tables of global list are set to point - to the local SELECT_LEX entries. - - RETURN - 0 - OK - !0 - error -*/ -bool st_select_lex_unit:: -create_total_list_n_last_return(THD *thd_arg, - st_lex *lex, - TABLE_LIST ***result_arg) -{ - TABLE_LIST *slave_list_first=0, **slave_list_last= &slave_list_first; - TABLE_LIST **new_table_list= *result_arg, *aux; - SELECT_LEX *sl= (SELECT_LEX*)slave; - - /* - iterate all inner selects + fake_select (if exists), - fake_select->next_select() always is 0 - */ - for (; - sl; - sl= (sl->next_select() ? - sl->next_select() : - (sl == fake_select_lex ? - 0 : - fake_select_lex))) - { - // check usage of ORDER BY in union - if (sl->order_list.first && sl->next_select() && !sl->braces && - sl->linkage != GLOBAL_OPTIONS_TYPE) - { - net_printf(thd_arg,ER_WRONG_USAGE,"UNION","ORDER BY"); - return 1; - } - - for (SELECT_LEX_UNIT *inner= sl->first_inner_unit(); - inner; - inner= inner->next_unit()) - { - if (inner->create_total_list_n_last_return(thd, lex, - &slave_list_last)) - return 1; - } - - if ((aux= (TABLE_LIST*) sl->table_list.first)) - { - TABLE_LIST *next_table; - for (; aux; aux= next_table) - { - TABLE_LIST *cursor; - next_table= aux->next; - /* Add to the total table list */ - if (!(cursor= (TABLE_LIST *) thd->memdup((char*) aux, - sizeof(*aux)))) - { - send_error(thd,0); - return 1; - } - *new_table_list= cursor; - cursor->table_list= aux; - new_table_list= &cursor->next; - *new_table_list= 0; // end result list - aux->table_list= cursor; - } - } - } - - if (slave_list_first) - { - *new_table_list= slave_list_first; - new_table_list= slave_list_last; - } - *result_arg= new_table_list; - return 0; -} - st_select_lex_unit* st_select_lex_unit::master_unit() { @@ -1504,7 +1472,9 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc) bool st_select_lex::add_item_to_list(THD *thd, Item *item) { - return item_list.push_back(item); + DBUG_ENTER("st_select_lex::add_item_to_list"); + DBUG_PRINT("info", ("Item: %p", item)); + DBUG_RETURN(item_list.push_back(item)); } @@ -1590,221 +1560,729 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) We have to create array in prepared statement memory if it is prepared statement */ - Item_arena *arena= thd->current_arena; - return (ref_pointer_array= - (Item **)arena->alloc(sizeof(Item*) * - (item_list.elements + - select_n_having_items + - order_group_num)* 5)) == 0; + Query_arena *arena= thd->stmt_arena; + return (ref_pointer_array= + (Item **)arena->alloc(sizeof(Item*) * (n_child_sum_items + + item_list.elements + + select_n_having_items + + select_n_where_fields + + order_group_num)*5)) == 0; +} + + +void st_select_lex_unit::print(String *str) +{ + bool union_all= !union_distinct; + for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) + { + if (sl != first_select()) + { + str->append(STRING_WITH_LEN(" union ")); + if (union_all) + str->append(STRING_WITH_LEN("all ")); + else if (union_distinct == sl) + union_all= TRUE; + } + if (sl->braces) + str->append('('); + sl->print(thd, str); + if (sl->braces) + str->append(')'); + } + if (fake_select_lex == global_parameters) + { + if (fake_select_lex->order_list.elements) + { + str->append(STRING_WITH_LEN(" order by ")); + fake_select_lex->print_order(str, + (ORDER *) fake_select_lex-> + order_list.first); + } + fake_select_lex->print_limit(thd, str); + } +} + + +void st_select_lex::print_order(String *str, ORDER *order) +{ + for (; order; order= order->next) + { + if (order->counter_used) + { + char buffer[20]; + uint length= my_snprintf(buffer, 20, "%d", order->counter); + str->append(buffer, length); + } + else + (*order->item)->print(str); + if (!order->asc) + str->append(STRING_WITH_LEN(" desc")); + if (order->next) + str->append(','); + } +} + + +void st_select_lex::print_limit(THD *thd, String *str) +{ + SELECT_LEX_UNIT *unit= master_unit(); + Item_subselect *item= unit->item; + if (item && unit->global_parameters == this && + (item->substype() == Item_subselect::EXISTS_SUBS || + item->substype() == Item_subselect::IN_SUBS || + item->substype() == Item_subselect::ALL_SUBS)) + { + DBUG_ASSERT(!item->fixed || + select_limit->val_int() == LL(1) && offset_limit == 0); + return; + } + + if (explicit_limit) + { + str->append(STRING_WITH_LEN(" limit ")); + if (offset_limit) + { + offset_limit->print(str); + str->append(','); + } + select_limit->print(str); + } +} + +/** + @brief Restore the LEX and THD in case of a parse error. + + This is a clean up call that is invoked by the Bison generated + parser before returning an error from MYSQLparse. If your + semantic actions manipulate with the global thread state (which + is a very bad practice and should not normally be employed) and + need a clean-up in case of error, and you can not use %destructor + rule in the grammar file itself, this function should be used + to implement the clean up. +*/ + +void st_lex::cleanup_lex_after_parse_error(THD *thd) +{ + /* + Delete sphead for the side effect of restoring of the original + LEX state, thd->lex, thd->mem_root and thd->free_list if they + were replaced when parsing stored procedure statements. We + will never use sphead object after a parse error, so it's okay + to delete it only for the sake of the side effect. + TODO: make this functionality explicit in sp_head class. + Sic: we must nullify the member of the main lex, not the + current one that will be thrown away + */ + if (thd->lex->sphead) + { + delete thd->lex->sphead; + thd->lex->sphead= NULL; + } +} + +/* + Initialize (or reset) Query_tables_list object. + + SYNOPSIS + reset_query_tables_list() + init TRUE - we should perform full initialization of object with + allocating needed memory + FALSE - object is already initialized so we should only reset + its state so it can be used for parsing/processing + of new statement + + DESCRIPTION + This method initializes Query_tables_list so it can be used as part + of LEX object for parsing/processing of statement. One can also use + this method to reset state of already initialized Query_tables_list + so it can be used for processing of new statement. +*/ + +void Query_tables_list::reset_query_tables_list(bool init) +{ + query_tables= 0; + query_tables_last= &query_tables; + query_tables_own_last= 0; + if (init) + { + /* + We delay real initialization of hash (and therefore related + memory allocation) until first insertion into this hash. + */ + hash_clear(&sroutines); + } + else if (sroutines.records) + { + /* Non-zero sroutines.records means that hash was initialized. */ + my_hash_reset(&sroutines); + } + sroutines_list.empty(); + sroutines_list_own_last= sroutines_list.next; + sroutines_list_own_elements= 0; +} + + +/* + Destroy Query_tables_list object with freeing all resources used by it. + + SYNOPSIS + destroy_query_tables_list() +*/ + +void Query_tables_list::destroy_query_tables_list() +{ + hash_free(&sroutines); } /* - Find db.table which will be updated in this unit + Initialize LEX object. SYNOPSIS - st_select_lex_unit::check_updateable() - db - data base name - table - real table name + st_lex::st_lex() + + NOTE + LEX object initialized with this constructor can be used as part of + THD object for which one can safely call open_tables(), lock_tables() + and close_thread_tables() functions. But it is not yet ready for + statement parsing. On should use lex_start() function to prepare LEX + for this. +*/ + +st_lex::st_lex() + :result(0), yacc_yyss(0), yacc_yyvs(0), + sql_command(SQLCOM_END) +{ + reset_query_tables_list(TRUE); +} + + +/* + Check whether the merging algorithm can be used on this VIEW + + SYNOPSIS + st_lex::can_be_merged() + + DESCRIPTION + We can apply merge algorithm if it is single SELECT view with + subqueries only in WHERE clause (we do not count SELECTs of underlying + views, and second level subqueries) and we have not grpouping, ordering, + HAVING clause, aggregate functions, DISTINCT clause, LIMIT clause and + several underlying tables. RETURN - 1 - found - 0 - OK (table did not found) + FALSE - only temporary table algorithm can be used + TRUE - merge algorithm can be used */ -bool st_select_lex_unit::check_updateable(char *db, char *table) +bool st_lex::can_be_merged() { - for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) - if (sl->check_updateable(db, table)) - return 1; - return 0; + // TODO: do not forget implement case when select_lex.table_list.elements==0 + + /* find non VIEW subqueries/unions */ + bool selects_allow_merge= select_lex.next_select() == 0; + if (selects_allow_merge) + { + for (SELECT_LEX_UNIT *tmp_unit= select_lex.first_inner_unit(); + tmp_unit; + tmp_unit= tmp_unit->next_unit()) + { + if (tmp_unit->first_select()->parent_lex == this && + (tmp_unit->item == 0 || + (tmp_unit->item->place() != IN_WHERE && + tmp_unit->item->place() != IN_ON))) + { + selects_allow_merge= 0; + break; + } + } + } + + return (selects_allow_merge && + select_lex.group_list.elements == 0 && + select_lex.having == 0 && + select_lex.with_sum_func == 0 && + select_lex.table_list.elements >= 1 && + !(select_lex.options & SELECT_DISTINCT) && + select_lex.select_limit == 0); } /* - Find db.table which will be updated in this select and - underlying ones (except derived tables) + check if command can use VIEW with MERGE algorithm (for top VIEWs) SYNOPSIS - st_select_lex::check_updateable() - db - data base name - table - real table name + st_lex::can_use_merged() + + DESCRIPTION + Only listed here commands can use merge algorithm in top level + SELECT_LEX (for subqueries will be used merge algorithm if + st_lex::can_not_use_merged() is not TRUE). RETURN - 1 - found - 0 - OK (table did not found) + FALSE - command can't use merged VIEWs + TRUE - VIEWs with MERGE algorithms can be used */ -bool st_select_lex::check_updateable(char *db, char *table) +bool st_lex::can_use_merged() { - if (find_real_table_in_list(get_table_list(), db, table)) - return 1; + switch (sql_command) + { + case SQLCOM_SELECT: + case SQLCOM_CREATE_TABLE: + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + case SQLCOM_DELETE: + case SQLCOM_DELETE_MULTI: + case SQLCOM_INSERT: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE: + case SQLCOM_REPLACE_SELECT: + case SQLCOM_LOAD: + return TRUE; + default: + return FALSE; + } +} + +/* + Check if command can't use merged views in any part of command + + SYNOPSIS + st_lex::can_not_use_merged() - return check_updateable_in_subqueries(db, table); + DESCRIPTION + Temporary table algorithm will be used on all SELECT levels for queries + listed here (see also st_lex::can_use_merged()). + + RETURN + FALSE - command can't use merged VIEWs + TRUE - VIEWs with MERGE algorithms can be used +*/ + +bool st_lex::can_not_use_merged() +{ + switch (sql_command) + { + case SQLCOM_CREATE_VIEW: + case SQLCOM_SHOW_CREATE: + /* + SQLCOM_SHOW_FIELDS is necessary to make + information schema tables working correctly with views. + see get_schema_tables_result function + */ + case SQLCOM_SHOW_FIELDS: + return TRUE; + default: + return FALSE; + } } /* - Find db.table which will be updated in underlying subqueries + Detect that we need only table structure of derived table/view - SYNOPSIS - st_select_lex::check_updateable_in_subqueries() - db - data base name - table - real table name + SYNOPSIS + only_view_structure() RETURN - 1 - found - 0 - OK (table did not found) + TRUE yes, we need only structure + FALSE no, we need data */ -bool st_select_lex::check_updateable_in_subqueries(char *db, char *table) +bool st_lex::only_view_structure() { - for (SELECT_LEX_UNIT *un= first_inner_unit(); - un; - un= un->next_unit()) + switch (sql_command) { + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_FIELDS: + case SQLCOM_REVOKE_ALL: + case SQLCOM_REVOKE: + case SQLCOM_GRANT: + case SQLCOM_CREATE_VIEW: + return TRUE; + default: + return FALSE; + } +} + + +/* + Should Items_ident be printed correctly + + SYNOPSIS + need_correct_ident() + + RETURN + TRUE yes, we need only structure + FALSE no, we need data +*/ + + +bool st_lex::need_correct_ident() +{ + switch(sql_command) { - if (un->first_select()->linkage != DERIVED_TABLE_TYPE && - un->check_updateable(db, table)) - return 1; + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_TABLES: + case SQLCOM_CREATE_VIEW: + return TRUE; + default: + return FALSE; } - return 0; } +/* + Get effective type of CHECK OPTION for given view -void st_select_lex_unit::print(String *str) + SYNOPSIS + get_effective_with_check() + view given view + + NOTE + It have not sense to set CHECK OPTION for SELECT satement or subqueries, + so we do not. + + RETURN + VIEW_CHECK_NONE no need CHECK OPTION + VIEW_CHECK_LOCAL CHECK OPTION LOCAL + VIEW_CHECK_CASCADED CHECK OPTION CASCADED +*/ + +uint8 st_lex::get_effective_with_check(st_table_list *view) { - for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) + if (view->select_lex->master_unit() == &unit && + which_check_option_applicable()) + return (uint8)view->with_check; + return VIEW_CHECK_NONE; +} + + +/* + initialize limit counters + + SYNOPSIS + st_select_lex_unit::set_limit() + values - SELECT_LEX with initial values for counters +*/ + +void st_select_lex_unit::set_limit(SELECT_LEX *sl) +{ + ha_rows select_limit_val; + + DBUG_ASSERT(! thd->stmt_arena->is_stmt_prepare()); + select_limit_val= (ha_rows)(sl->select_limit ? sl->select_limit->val_uint() : + HA_POS_ERROR); + offset_limit_cnt= (ha_rows)(sl->offset_limit ? sl->offset_limit->val_uint() : + ULL(0)); + select_limit_cnt= select_limit_val + offset_limit_cnt; + if (select_limit_cnt < select_limit_val) + select_limit_cnt= HA_POS_ERROR; // no limit +} + + +/* + Unlink the first table from the global table list and the first table from + outer select (lex->select_lex) local list + + SYNOPSIS + unlink_first_table() + link_to_local Set to 1 if caller should link this table to local list + + NOTES + We assume that first tables in both lists is the same table or the local + list is empty. + + RETURN + 0 If 'query_tables' == 0 + unlinked table + In this case link_to_local is set. + +*/ +TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local) +{ + TABLE_LIST *first; + if ((first= query_tables)) { - if (sl != first_select()) + /* + Exclude from global table list + */ + if ((query_tables= query_tables->next_global)) + query_tables->prev_global= &query_tables; + else + query_tables_last= &query_tables; + first->next_global= 0; + + /* + and from local list if it is not empty + */ + if ((*link_to_local= test(select_lex.table_list.first))) { - str->append(" union ", 7); - if (!union_distinct) - str->append("all ", 4); + select_lex.context.table_list= + select_lex.context.first_name_resolution_table= first->next_local; + select_lex.table_list.first= (byte*) (first->next_local); + select_lex.table_list.elements--; //safety + first->next_local= 0; + /* + Ensure that the global list has the same first table as the local + list. + */ + first_lists_tables_same(); } - if (sl->braces) - str->append('('); - sl->print(thd, str); - if (sl->braces) - str->append(')'); } - if (fake_select_lex == global_parameters) + return first; +} + + +/* + Bring first local table of first most outer select to first place in global + table list + + SYNOPSYS + st_lex::first_lists_tables_same() + + NOTES + In many cases (for example, usual INSERT/DELETE/...) the first table of + main SELECT_LEX have special meaning => check that it is the first table + in global list and re-link to be first in the global list if it is + necessary. We need such re-linking only for queries with sub-queries in + the select list, as only in this case tables of sub-queries will go to + the global list first. +*/ + +void st_lex::first_lists_tables_same() +{ + TABLE_LIST *first_table= (TABLE_LIST*) select_lex.table_list.first; + if (query_tables != first_table && first_table != 0) { - if (fake_select_lex->order_list.elements) - { - str->append(" order by ", 10); - fake_select_lex->print_order(str, - (ORDER *) fake_select_lex-> - order_list.first); - } - fake_select_lex->print_limit(thd, str); + TABLE_LIST *next; + if (query_tables_last == &first_table->next_global) + query_tables_last= first_table->prev_global; + + if ((next= *first_table->prev_global= first_table->next_global)) + next->prev_global= first_table->prev_global; + /* include in new place */ + first_table->next_global= query_tables; + /* + We are sure that query_tables is not 0, because first_table was not + first table in the global list => we can use + query_tables->prev_global without check of query_tables + */ + query_tables->prev_global= &first_table->next_global; + first_table->prev_global= &query_tables; + query_tables= first_table; } } -void st_select_lex::print_order(String *str, ORDER *order) +/* + Add implicitly used time zone description tables to global table list + (if needed). + + SYNOPSYS + st_lex::add_time_zone_tables_to_query_tables() + thd - pointer to current thread context + + RETURN VALUE + TRUE - error + FALSE - success +*/ + +bool st_lex::add_time_zone_tables_to_query_tables(THD *thd_arg) { - for (; order; order= order->next) + /* We should not add these tables twice */ + if (!time_zone_tables_used) { - (*order->item)->print(str); - if (!order->asc) - str->append(" desc", 5); - if (order->next) - str->append(','); + time_zone_tables_used= my_tz_get_table_list(thd_arg, &query_tables_last); + if (time_zone_tables_used == &fake_time_zone_tables_list) + return TRUE; } + return FALSE; } - -void st_select_lex::print_limit(THD *thd, String *str) +/* + Link table back that was unlinked with unlink_first_table() + + SYNOPSIS + link_first_table_back() + link_to_local do we need link this table to local + + RETURN + global list +*/ + +void st_lex::link_first_table_back(TABLE_LIST *first, + bool link_to_local) { - if (explicit_limit) + if (first) { - str->append(" limit ", 7); - char buff[20]; - // latin1 is good enough for numbers - String st(buff, sizeof(buff), &my_charset_latin1); - st.set((ulonglong)select_limit, &my_charset_latin1); - str->append(st); - if (offset_limit) + if ((first->next_global= query_tables)) + query_tables->prev_global= &first->next_global; + else + query_tables_last= &first->next_global; + query_tables= first; + + if (link_to_local) { - str->append(','); - st.set((ulonglong)select_limit, &my_charset_latin1); - str->append(st); + first->next_local= (TABLE_LIST*) select_lex.table_list.first; + select_lex.context.table_list= first; + select_lex.table_list.first= (byte*) first; + select_lex.table_list.elements++; //safety } } } -st_lex::st_lex() - :result(0) -{} - /* - Unlink first table from global table list and first table from outer select - list (lex->select_lex) + cleanup lex for case when we open table by table for processing SYNOPSIS - unlink_first_table() - tables Global table list - global_first Save first global table here - local_first Save first local table here + st_lex::cleanup_after_one_table_open() - NOTES - This function assumes that outer select list is non-empty. + NOTE + This method is mostly responsible for cleaning up of selects lists and + derived tables state. To rollback changes in Query_tables_list one has + to call Query_tables_list::reset_query_tables_list(FALSE). +*/ - RETURN - global list without first table +void st_lex::cleanup_after_one_table_open() +{ + /* + thd->lex->derived_tables & additional units may be set if we open + a view. It is necessary to clear thd->lex->derived_tables flag + to prevent processing of derived tables during next open_and_lock_tables + if next table is a real table and cleanup & remove underlying units + NOTE: all units will be connected to thd->lex->select_lex, because we + have not UNION on most upper level. + */ + if (all_selects_list != &select_lex) + { + derived_tables= 0; + /* cleunup underlying units (units of VIEW) */ + for (SELECT_LEX_UNIT *un= select_lex.first_inner_unit(); + un; + un= un->next_unit()) + un->cleanup(); + /* reduce all selects list to default state */ + all_selects_list= &select_lex; + /* remove underlying units (units of VIEW) subtree */ + select_lex.cut_subtree(); + } + time_zone_tables_used= 0; +} + + +/* + Save current state of Query_tables_list for this LEX, and prepare it + for processing of new statemnt. + SYNOPSIS + reset_n_backup_query_tables_list() + backup Pointer to Query_tables_list instance to be used for backup */ -TABLE_LIST *st_lex::unlink_first_table(TABLE_LIST *tables, - TABLE_LIST **global_first, - TABLE_LIST **local_first) + +void st_lex::reset_n_backup_query_tables_list(Query_tables_list *backup) { - DBUG_ASSERT(select_lex.table_list.first != 0); + backup->set_query_tables_list(this); /* - Save pointers to first elements of global table list and list - of tables used in outer select. It does not harm if these lists - are the same. + We have to perform full initialization here since otherwise we + will damage backed up state. */ - *global_first= tables; - *local_first= (TABLE_LIST*)select_lex.table_list.first; + this->reset_query_tables_list(TRUE); +} + + +/* + Restore state of Query_tables_list for this LEX from backup. - /* Exclude first elements from these lists */ - select_lex.table_list.first= (byte*) (*local_first)->next; - tables= tables->next; - (*global_first)->next= 0; - return tables; + SYNOPSIS + restore_backup_query_tables_list() + backup Pointer to Query_tables_list instance used for backup +*/ + +void st_lex::restore_backup_query_tables_list(Query_tables_list *backup) +{ + this->destroy_query_tables_list(); + this->set_query_tables_list(backup); } /* - Link table which was unlinked with unlink_first_table() back. + Do end-of-prepare fixup for list of tables and their merge-VIEWed tables SYNOPSIS - link_first_table_back() - tables Global table list - global_first Saved first global table - local_first Saved first local table + fix_prepare_info_in_table_list() + thd Thread handle + tbl List of tables to process - RETURN - global list + DESCRIPTION + Perform end-end-of prepare fixup for list of tables, if any of the tables + is a merge-algorithm VIEW, recursively fix up its underlying tables as + well. + +*/ + +static void fix_prepare_info_in_table_list(THD *thd, TABLE_LIST *tbl) +{ + for (; tbl; tbl= tbl->next_local) + { + if (tbl->on_expr) + { + tbl->prep_on_expr= tbl->on_expr; + tbl->on_expr= tbl->on_expr->copy_andor_structure(thd); + } + fix_prepare_info_in_table_list(thd, tbl->merge_underlying_list); + } +} + + +/* + Save WHERE/HAVING/ON clauses and replace them with disposable copies + + SYNOPSIS + st_select_lex::fix_prepare_information + thd thread handler + conds in/out pointer to WHERE condition to be met at execution + having_conds in/out pointer to HAVING condition to be met at execution + + DESCRIPTION + The passed WHERE and HAVING are to be saved for the future executions. + This function saves it, and returns a copy which can be thrashed during + this execution of the statement. By saving/thrashing here we mean only + AND/OR trees. + The function also calls fix_prepare_info_in_table_list that saves all + ON expressions. */ -TABLE_LIST *st_lex::link_first_table_back(TABLE_LIST *tables, - TABLE_LIST *global_first, - TABLE_LIST *local_first) + +void st_select_lex::fix_prepare_information(THD *thd, Item **conds, + Item **having_conds) { - global_first->next= tables; - select_lex.table_list.first= (byte*) local_first; - return global_first; + if (!thd->stmt_arena->is_conventional() && first_execution) + { + first_execution= 0; + if (*conds) + { + prep_where= *conds; + *conds= where= prep_where->copy_andor_structure(thd); + } + if (*having_conds) + { + prep_having= *having_conds; + *having_conds= having= prep_having->copy_andor_structure(thd); + } + fix_prepare_info_in_table_list(thd, (TABLE_LIST *)table_list.first); + } } /* - There are st_select_lex::add_table_to_list & + There are st_select_lex::add_table_to_list & st_select_lex::set_lock_for_tables are in sql_parse.cc - st_select_lex::print is in sql_select.h + st_select_lex::print is in sql_select.cc st_select_lex_unit::prepare, st_select_lex_unit::exec, st_select_lex_unit::cleanup, st_select_lex_unit::reinit_exec_mechanism, st_select_lex_unit::change_result are in sql_union.cc */ + diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 7faeb8046c7..de7de0d46e9 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -21,6 +20,10 @@ class Table_ident; class sql_exchange; class LEX_COLUMN; +class sp_head; +class sp_name; +class sp_instr; +class sp_pcontext; /* The following hack is needed because mysql_yacc.cc does not define @@ -33,8 +36,12 @@ class LEX_COLUMN; #define LEX_YYSTYPE void * #else #include "lex_symbol.h" +#if MYSQL_LEX #include "sql_yacc.h" #define LEX_YYSTYPE YYSTYPE * +#else +#define LEX_YYSTYPE void * +#endif #endif /* @@ -49,21 +56,22 @@ enum enum_sql_command { SQLCOM_SHOW_DATABASES, SQLCOM_SHOW_TABLES, SQLCOM_SHOW_FIELDS, SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_LOGS, SQLCOM_SHOW_STATUS, - SQLCOM_SHOW_INNODB_STATUS,SQLCOM_SHOW_NDBCLUSTER_STATUS, + SQLCOM_SHOW_INNODB_STATUS, SQLCOM_SHOW_NDBCLUSTER_STATUS, SQLCOM_SHOW_MUTEX_STATUS, SQLCOM_SHOW_PROCESSLIST, SQLCOM_SHOW_MASTER_STAT, SQLCOM_SHOW_SLAVE_STAT, SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, SQLCOM_SHOW_CHARSETS, - SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, + SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, SQLCOM_SHOW_TABLE_STATUS, + SQLCOM_SHOW_TRIGGERS, SQLCOM_LOAD,SQLCOM_SET_OPTION,SQLCOM_LOCK_TABLES,SQLCOM_UNLOCK_TABLES, SQLCOM_GRANT, SQLCOM_CHANGE_DB, SQLCOM_CREATE_DB, SQLCOM_DROP_DB, SQLCOM_ALTER_DB, SQLCOM_REPAIR, SQLCOM_REPLACE, SQLCOM_REPLACE_SELECT, SQLCOM_CREATE_FUNCTION, SQLCOM_DROP_FUNCTION, - SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK, + SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK, SQLCOM_ASSIGN_TO_KEYCACHE, SQLCOM_PRELOAD_KEYS, SQLCOM_FLUSH, SQLCOM_KILL, SQLCOM_ANALYZE, SQLCOM_ROLLBACK, SQLCOM_ROLLBACK_TO_SAVEPOINT, - SQLCOM_COMMIT, SQLCOM_SAVEPOINT, + SQLCOM_COMMIT, SQLCOM_SAVEPOINT, SQLCOM_RELEASE_SAVEPOINT, SQLCOM_SLAVE_START, SQLCOM_SLAVE_STOP, SQLCOM_BEGIN, SQLCOM_LOAD_MASTER_TABLE, SQLCOM_CHANGE_MASTER, SQLCOM_RENAME_TABLE, SQLCOM_BACKUP_TABLE, SQLCOM_RESTORE_TABLE, @@ -74,10 +82,20 @@ enum enum_sql_command { SQLCOM_SHOW_BINLOG_EVENTS, SQLCOM_SHOW_NEW_MASTER, SQLCOM_DO, SQLCOM_SHOW_WARNS, SQLCOM_EMPTY_QUERY, SQLCOM_SHOW_ERRORS, SQLCOM_SHOW_COLUMN_TYPES, SQLCOM_SHOW_STORAGE_ENGINES, SQLCOM_SHOW_PRIVILEGES, - SQLCOM_HELP, SQLCOM_DROP_USER, SQLCOM_REVOKE_ALL, SQLCOM_CHECKSUM, - + SQLCOM_HELP, SQLCOM_CREATE_USER, SQLCOM_DROP_USER, SQLCOM_RENAME_USER, + SQLCOM_REVOKE_ALL, SQLCOM_CHECKSUM, + SQLCOM_CREATE_PROCEDURE, SQLCOM_CREATE_SPFUNCTION, SQLCOM_CALL, + SQLCOM_DROP_PROCEDURE, SQLCOM_ALTER_PROCEDURE,SQLCOM_ALTER_FUNCTION, + SQLCOM_SHOW_CREATE_PROC, SQLCOM_SHOW_CREATE_FUNC, + SQLCOM_SHOW_STATUS_PROC, SQLCOM_SHOW_STATUS_FUNC, SQLCOM_PREPARE, SQLCOM_EXECUTE, SQLCOM_DEALLOCATE_PREPARE, + SQLCOM_CREATE_VIEW, SQLCOM_DROP_VIEW, + SQLCOM_CREATE_TRIGGER, SQLCOM_DROP_TRIGGER, + SQLCOM_XA_START, SQLCOM_XA_END, SQLCOM_XA_PREPARE, + SQLCOM_XA_COMMIT, SQLCOM_XA_ROLLBACK, SQLCOM_XA_RECOVER, + SQLCOM_SHOW_PROC_CODE, SQLCOM_SHOW_FUNC_CODE, /* This should be the last !!! */ + SQLCOM_END }; @@ -85,6 +103,47 @@ enum enum_sql_command { #define DESCRIBE_NORMAL 1 #define DESCRIBE_EXTENDED 2 +enum enum_sp_suid_behaviour +{ + SP_IS_DEFAULT_SUID= 0, + SP_IS_NOT_SUID, + SP_IS_SUID +}; + +enum enum_sp_data_access +{ + SP_DEFAULT_ACCESS= 0, + SP_CONTAINS_SQL, + SP_NO_SQL, + SP_READS_SQL_DATA, + SP_MODIFIES_SQL_DATA +}; + +const LEX_STRING sp_data_access_name[]= +{ + { (char*) STRING_WITH_LEN("") }, + { (char*) STRING_WITH_LEN("CONTAINS SQL") }, + { (char*) STRING_WITH_LEN("NO SQL") }, + { (char*) STRING_WITH_LEN("READS SQL DATA") }, + { (char*) STRING_WITH_LEN("MODIFIES SQL DATA") } +}; + +#define DERIVED_SUBQUERY 1 +#define DERIVED_VIEW 2 + +enum enum_view_create_mode +{ + VIEW_CREATE_NEW, // check that there are not such VIEW/table + VIEW_ALTER, // check that VIEW .frm with such name exists + VIEW_CREATE_OR_REPLACE // check only that there are not such table +}; + +enum enum_drop_mode +{ + DROP_DEFAULT, // mode is not specified + DROP_CASCADE, // CASCADE option + DROP_RESTRICT // RESTRICT option +}; typedef List<Item> List_item; @@ -250,7 +309,15 @@ protected: *link_next, **link_prev; /* list of whole SELECT_LEX */ public: - ulong options; + ulonglong options; + + /* + In sql_cache we store SQL_CACHE flag as specified by user to be + able to restore SELECT statement from internal structures. + */ + enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE }; + e_sql_cache sql_cache; + /* result of this query can't be cached, bit field, can be : UNCACHEABLE_DEPENDENT @@ -270,7 +337,7 @@ public: } static void *operator new(size_t size, MEM_ROOT *mem_root) { return (void*) alloc_root(mem_root, (uint) size); } - static void operator delete(void *ptr,size_t size) {} + static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } static void operator delete(void *ptr, MEM_ROOT *mem_root) {} st_select_lex_node(): linkage(UNSPECIFIED_TYPE) {} virtual ~st_select_lex_node() {} @@ -306,6 +373,8 @@ public: friend class st_select_lex_unit; friend bool mysql_new_select(struct st_lex *lex, bool move_down); + friend bool mysql_make_view(THD *thd, File_parser *parser, + TABLE_LIST *table, uint flags); private: void fast_exclude(); }; @@ -315,7 +384,6 @@ typedef class st_select_lex_node SELECT_LEX_NODE; SELECT_LEX_UNIT - unit of selects (UNION, INTERSECT, ...) group SELECT_LEXs */ -struct st_lex; class THD; class select_result; class JOIN; @@ -328,14 +396,15 @@ protected: TABLE *table; /* temporary table using for appending UNION results */ select_result *result; - int res; - ulong found_rows_for_union; + ulonglong found_rows_for_union; + bool saved_error; + +public: bool prepared, // prepare phase already performed for UNION (unit) optimized, // optimize phase already performed for UNION (unit) executed, // already executed cleaned; -public: // list of fields which points to temporary table for union List<Item> item_list; /* @@ -370,17 +439,12 @@ public: Procedure *last_procedure; /* Pointer to procedure, if such exists */ void init_query(); - bool create_total_list(THD *thd, st_lex *lex, TABLE_LIST **result); st_select_lex_unit* master_unit(); st_select_lex* outer_select(); st_select_lex* first_select() { return my_reinterpret_cast(st_select_lex*)(slave); } - st_select_lex* first_select_in_union() - { - return my_reinterpret_cast(st_select_lex*)(slave); - } st_select_lex_unit* next_unit() { return my_reinterpret_cast(st_select_lex_unit*)(next); @@ -390,27 +454,27 @@ public: void exclude_tree(); /* UNION methods */ - int prepare(THD *thd, select_result *result, ulong additional_options, - const char *tmp_table_alias); - int exec(); - int cleanup(); + bool prepare(THD *thd, select_result *result, ulong additional_options); + bool exec(); + bool cleanup(); inline void unclean() { cleaned= 0; } void reinit_exec_mechanism(); - bool check_updateable(char *db, char *table); void print(String *str); bool add_fake_select_lex(THD *thd); - ulong init_prepare_fake_select_lex(THD *thd); - int change_result(select_subselect *result, select_subselect *old_result); + void init_prepare_fake_select_lex(THD *thd); inline bool is_prepared() { return prepared; } + bool change_result(select_subselect *result, select_subselect *old_result); + void set_limit(st_select_lex *values); + void set_thd(THD *thd_arg) { thd= thd_arg; } friend void lex_start(THD *thd, uchar *buf, uint length); friend int subselect_union_engine::exec(); -private: - bool create_total_list_n_last_return(THD *thd, st_lex *lex, - TABLE_LIST ***result); + + List<Item> *get_unit_column_types(); }; + typedef class st_select_lex_unit SELECT_LEX_UNIT; /* @@ -419,13 +483,20 @@ typedef class st_select_lex_unit SELECT_LEX_UNIT; class st_select_lex: public st_select_lex_node { public: - char *db, *db1, *table1, *db2, *table2; /* For outer join using .. */ + Name_resolution_context context; + char *db; Item *where, *having; /* WHERE & HAVING clauses */ Item *prep_where; /* saved WHERE clause for prepared statement processing */ Item *prep_having;/* saved HAVING clause for prepared statement processing */ + /* Saved values of the WHERE and HAVING clauses*/ + Item::cond_result cond_value, having_value; + /* point on lex in which it was created, used in view subquery detection */ + st_lex *parent_lex; enum olap_type olap; - SQL_LIST table_list, group_list; /* FROM & GROUP BY clauses */ - List<Item> item_list; /* list of fields & expressions */ + /* FROM clause - points to the beginning of the TABLE_LIST::next_local list. */ + SQL_LIST table_list; + SQL_LIST group_list; /* GROUP BY clause. */ + List<Item> item_list; /* list of fields & expressions */ List<String> interval_list, use_index, *use_index_ptr, ignore_index, *ignore_index_ptr; bool is_item_list_lookup; @@ -436,13 +507,21 @@ public: List<Item_func_match> *ftfunc_list; List<Item_func_match> ftfunc_list_alloc; JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */ - const char *type; /* type of select for EXPLAIN */ + List<TABLE_LIST> top_join_list; /* join list of the top level */ + List<TABLE_LIST> *join_list; /* list for the currently parsed join */ + TABLE_LIST *embedding; /* table embedding to the above list */ + /* + Beginning of the list of leaves in a FROM clause, where the leaves + inlcude all base tables including view tables. The tables are connected + by TABLE_LIST::next_leaf, so leaf_tables points to the left-most leaf. + */ + TABLE_LIST *leaf_tables; + const char *type; /* type of select for EXPLAIN */ SQL_LIST order_list; /* ORDER clause */ List<List_item> expr_list; - List<List_item> when_list; /* WHEN clause (expression) */ SQL_LIST *gorder_list; - ha_rows select_limit, offset_limit; /* LIMIT clause parameters */ + Item *select_limit, *offset_limit; /* LIMIT clause parameters */ // Arrays of pointers to top elements of all_fields list Item **ref_pointer_array; @@ -452,17 +531,37 @@ public: list during split_sum_func */ uint select_n_having_items; - uint cond_count; /* number of arguments of and/or/xor in where/having */ + uint cond_count; /* number of arguments of and/or/xor in where/having/on */ + uint between_count; /* number of between predicates in where/having/on */ + /* + Number of fields used in select list or where clause of current select + and all inner subselects. + */ + uint select_n_where_fields; enum_parsing_place parsing_place; /* where we are parsing expression */ bool with_sum_func; /* sum function indicator */ + /* + PS or SP cond natural joins was alredy processed with permanent + arena and all additional items which we need alredy stored in it + */ + bool conds_processed_with_permanent_arena; ulong table_join_options; uint in_sum_expr; uint select_number; /* number of select (used for EXPLAIN) */ + int nest_level; /* nesting level of select */ + Item_sum *inner_sum_func_list; /* list of sum func in nested selects */ uint with_wild; /* item list contain '*' */ bool braces; /* SELECT ... UNION (SELECT ... ) <- this braces */ /* TRUE when having fix field called in processing of this SELECT */ bool having_fix_field; + /* List of references to fields referenced from inner selects */ + List<Item_outer_ref> inner_refs_list; + /* Number of Item_sum-derived objects in this SELECT */ + uint n_sum_items; + /* Number of Item_sum-derived objects in children and descendant SELECTs */ + uint n_child_sum_items; + /* explicit LIMIT clause was used */ bool explicit_limit; /* @@ -470,27 +569,46 @@ public: query processing end even if we use temporary table */ bool subquery_in_having; - + /* TRUE <=> this SELECT is correlated w.r.t. some ancestor select */ + bool is_correlated; + /* + This variable is required to ensure proper work of subqueries and + stored procedures. Generally, one should use the states of + Query_arena to determine if it's a statement prepare or first + execution of a stored procedure. However, in case when there was an + error during the first execution of a stored procedure, the SP body + is not expelled from the SP cache. Therefore, a deeply nested + subquery might be left unoptimized. So we need this per-subquery + variable to inidicate the optimization/execution state of every + subquery. Prepared statements work OK in that regard, as in + case of an error during prepare the PS is not created. + */ + bool first_execution; + bool first_cond_optimization; + /* do not wrap view fields with Item_ref */ + bool no_wrap_view_item; + /* exclude this select from check of unique_table() */ + bool exclude_from_table_unique_test; + /* List of fields that aren't under an aggregate function */ + List<Item_field> non_agg_fields; + /* index in the select list of the expression currently being fixed */ + int cur_pos_in_select_list; + + List<udf_func> udf_list; /* udf function calls stack */ /* - SELECT for SELECT command st_select_lex. Used to privent scaning - item_list of non-SELECT st_select_lex (no sense find to finding - reference in it (all should be in tables, it is dangerouse due - to order of fix_fields calling for non-SELECTs commands (item list - can be not fix_fieldsd)). This value will be assigned for - primary select (sql_yac.yy) and for any subquery and - UNION SELECT (sql_parse.cc mysql_new_select()) - - - INSERT for primary st_select_lex structure of simple INSERT/REPLACE - (used for name resolution, see Item_fiels & Item_ref fix_fields, - FALSE for INSERT/REPLACE ... SELECT, because it's - st_select_lex->table_list will be preprocessed (first table removed) - before passing to handle_select) - - NOMATTER for other + This is a copy of the original JOIN USING list that comes from + the parser. The parser : + 1. Sets the natural_join of the second TABLE_LIST in the join + and the st_select_lex::prev_join_using. + 2. Makes a parent TABLE_LIST and sets its is_natural_join/ + join_using_fields members. + 3. Uses the wrapper TABLE_LIST as a table in the upper level. + We cannot assign directly to join_using_fields in the parser because + at stage (1.) the parent TABLE_LIST is not constructed yet and + the assignment will override the JOIN USING fields of the lower level + joins on the right. */ - enum {NOMATTER_MODE, SELECT_MODE, INSERT_MODE} resolve_mode; - + List<String> *prev_join_using; void init_query(); void init_select(); @@ -532,6 +650,11 @@ public: List<String> *ignore_index= 0, LEX_STRING *option= 0); TABLE_LIST* get_table_list(); + bool init_nested_join(THD *thd); + TABLE_LIST *end_nested_join(THD *thd); + TABLE_LIST *nest_last_join(THD *thd); + void add_joined_table(TABLE_LIST *table); + TABLE_LIST *convert_right_join(); List<Item>* get_item_list(); List<String>* get_use_index(); List<String>* get_ignore_index(); @@ -543,22 +666,37 @@ public: order_list.first= 0; order_list.next= (byte**) &order_list.first; } - + /* + This method created for reiniting LEX in mysql_admin_table() and can be + used only if you are going remove all SELECT_LEX & units except belonger + to LEX (LEX::unit & LEX::select, for other purposes there are + SELECT_LEX_UNIT::exclude_level & SELECT_LEX_UNIT::exclude_tree + */ + void cut_subtree() { slave= 0; } bool test_limit(); friend void lex_start(THD *thd, uchar *buf, uint length); - st_select_lex() {} + st_select_lex() : n_sum_items(0), n_child_sum_items(0) {} void make_empty_select() { init_query(); init_select(); } bool setup_ref_array(THD *thd, uint order_group_num); - bool check_updateable(char *db, char *table); - bool check_updateable_in_subqueries(char *db, char *table); void print(THD *thd, String *str); static void print_order(String *str, ORDER *order); void print_limit(THD *thd, String *str); + void fix_prepare_information(THD *thd, Item **conds, Item **having_conds); + /* + Destroy the used execution plan (JOIN) of this subtree (this + SELECT_LEX and all nested SELECT_LEXes and SELECT_LEX_UNITs). + */ + bool cleanup(); + /* + Recursively cleanup the join of this select lex and of all nested + select lexes. + */ + void cleanup_all_joins(bool full); }; typedef class st_select_lex SELECT_LEX; @@ -570,6 +708,10 @@ typedef class st_select_lex SELECT_LEX; #define ALTER_RENAME 32 #define ALTER_ORDER 64 #define ALTER_OPTIONS 128 +#define ALTER_CHANGE_COLUMN_DEFAULT 256 +#define ALTER_KEYS_ONOFF 512 +#define ALTER_CONVERT 1024 +#define ALTER_FORCE 2048 /** @brief Parsing data for CREATE or ALTER TABLE. @@ -588,14 +730,13 @@ public: uint flags; enum enum_enable_or_disable keys_onoff; enum tablespace_op_type tablespace_op; - bool is_simple; Alter_info() : flags(0), keys_onoff(LEAVE_AS_IS), - tablespace_op(NO_TABLESPACE_OP), - is_simple(1) + tablespace_op(NO_TABLESPACE_OP) {} + void reset() { drop_list.empty(); @@ -605,7 +746,6 @@ public: flags= 0; keys_onoff= LEAVE_AS_IS; tablespace_op= NO_TABLESPACE_OP; - is_simple= 1; } /** Construct a copy of this object to be used for mysql_alter_table @@ -626,10 +766,141 @@ private: Alter_info(const Alter_info &rhs); // not implemented }; +struct st_sp_chistics +{ + LEX_STRING comment; + enum enum_sp_suid_behaviour suid; + bool detistic; + enum enum_sp_data_access daccess; +}; + + +struct st_trg_chistics +{ + enum trg_action_time_type action_time; + enum trg_event_type event; +}; + +extern sys_var_long_ptr trg_new_row_fake_var; + +enum xa_option_words {XA_NONE, XA_JOIN, XA_RESUME, XA_ONE_PHASE, + XA_SUSPEND, XA_FOR_MIGRATE}; + + +/* + Class representing list of all tables used by statement. + It also contains information about stored functions used by statement + since during its execution we may have to add all tables used by its + stored functions/triggers to this list in order to pre-open and lock + them. + + Also used by st_lex::reset_n_backup/restore_backup_query_tables_list() + methods to save and restore this information. +*/ + +class Query_tables_list +{ +public: + /* Global list of all tables used by this statement */ + TABLE_LIST *query_tables; + /* Pointer to next_global member of last element in the previous list. */ + TABLE_LIST **query_tables_last; + /* + If non-0 then indicates that query requires prelocking and points to + next_global member of last own element in query table list (i.e. last + table which was not added to it as part of preparation to prelocking). + 0 - indicates that this query does not need prelocking. + */ + TABLE_LIST **query_tables_own_last; + /* + Set of stored routines called by statement. + (Note that we use lazy-initialization for this hash). + */ + enum { START_SROUTINES_HASH_SIZE= 16 }; + HASH sroutines; + /* + List linking elements of 'sroutines' set. Allows you to add new elements + to this set as you iterate through the list of existing elements. + 'sroutines_list_own_last' is pointer to ::next member of last element of + this list which represents routine which is explicitly used by query. + 'sroutines_list_own_elements' number of explicitly used routines. + We use these two members for restoring of 'sroutines_list' to the state + in which it was right after query parsing. + */ + SQL_LIST sroutines_list; + byte **sroutines_list_own_last; + uint sroutines_list_own_elements; + + /* + These constructor and destructor serve for creation/destruction + of Query_tables_list instances which are used as backup storage. + */ + Query_tables_list() {} + ~Query_tables_list() {} + + /* Initializes (or resets) Query_tables_list object for "real" use. */ + void reset_query_tables_list(bool init); + void destroy_query_tables_list(); + void set_query_tables_list(Query_tables_list *state) + { + *this= *state; + } + + /* + Direct addition to the list of query tables. + If you are using this function, you must ensure that the table + object, in particular table->db member, is initialized. + */ + void add_to_query_tables(TABLE_LIST *table) + { + *(table->prev_global= query_tables_last)= table; + query_tables_last= &table->next_global; + } + bool requires_prelocking() + { + return test(query_tables_own_last); + } + void mark_as_requiring_prelocking(TABLE_LIST **tables_own_last) + { + query_tables_own_last= tables_own_last; + } + /* Return pointer to first not-own table in query-tables or 0 */ + TABLE_LIST* first_not_own_table() + { + return ( query_tables_own_last ? *query_tables_own_last : 0); + } + void chop_off_not_own_tables() + { + if (query_tables_own_last) + { + *query_tables_own_last= 0; + query_tables_last= query_tables_own_last; + query_tables_own_last= 0; + } + } +}; + + +/* + st_parsing_options contains the flags for constructions that are + allowed in the current statement. +*/ + +struct st_parsing_options +{ + bool allows_variable; + bool allows_select_into; + bool allows_select_procedure; + bool allows_derived; + + st_parsing_options() { reset(); } + void reset(); +}; + /* The state of the lex parsing. This is saved in the THD struct */ -typedef struct st_lex +typedef struct st_lex : public Query_tables_list { uint yylineno,yytoklen; /* Simulate lex */ LEX_YYSTYPE yylval; @@ -639,23 +910,38 @@ typedef struct st_lex SELECT_LEX *current_select; /* list of all SELECT_LEX */ SELECT_LEX *all_selects_list; + uchar *buf; /* The beginning of string, used by SPs */ uchar *ptr,*tok_start,*tok_end,*end_of_query; + + /* The values of tok_start/tok_end as they were one call of MYSQLlex before */ + uchar *tok_start_prev, *tok_end_prev; + char *length,*dec,*change,*name; char *help_arg; char *backup_dir; /* For RESTORE/BACKUP */ char* to_log; /* For PURGE MASTER LOGS TO */ - time_t purge_time; /* For PURGE MASTER LOGS BEFORE */ char* x509_subject,*x509_issuer,*ssl_cipher; - char* found_colon; /* For multi queries - next query */ + char* found_semicolon; /* For multi queries - next query */ String *wild; sql_exchange *exchange; select_result *result; Item *default_value, *on_update_value; - LEX_STRING *comment, name_and_length; + LEX_STRING comment, ident; LEX_USER *grant_user; + XID *xid; gptr yacc_yyss,yacc_yyvs; THD *thd; CHARSET_INFO *charset, *underscore_charset; + /* store original leaf_tables for INSERT SELECT and PS/SP */ + TABLE_LIST *leaf_tables_insert; + /* Position (first character index) of SELECT of CREATE VIEW statement */ + uint create_view_select_start; + + /* + The definer of the object being created (view, trigger, stored routine). + I.e. the value of DEFINER clause. + */ + LEX_USER *definer; List<key_part_spec> col_list; List<key_part_spec> ref_list; @@ -666,53 +952,166 @@ typedef struct st_lex List<List_item> many_values; List<set_var_base> var_list; List<Item_param> param_list; - SQL_LIST proc_list, auxilliary_table_list, save_list; + List<LEX_STRING> view_list; // view list (list of field names in view) + /* + A stack of name resolution contexts for the query. This stack is used + at parse time to set local name resolution contexts for various parts + of a query. For example, in a JOIN ... ON (some_condition) clause the + Items in 'some_condition' must be resolved only against the operands + of the the join, and not against the whole clause. Similarly, Items in + subqueries should be resolved against the subqueries (and outer queries). + The stack is used in the following way: when the parser detects that + all Items in some clause need a local context, it creates a new context + and pushes it on the stack. All newly created Items always store the + top-most context in the stack. Once the parser leaves the clause that + required a local context, the parser pops the top-most context. + */ + List<Name_resolution_context> context_stack; + + SQL_LIST proc_list, auxiliary_table_list, save_list; create_field *last_field; - char *savepoint_name; // Transaction savepoint id + Item_sum *in_sum_func; udf_func udf; HA_CHECK_OPT check_opt; // check/repair options HA_CREATE_INFO create_info; LEX_MASTER_INFO mi; // used by CHANGE MASTER USER_RESOURCES mqh; - ulong thread_id,type; - enum_sql_command sql_command; - thr_lock_type lock_option, multi_lock_option; + ulong type; + /* + This variable is used in post-parse stage to declare that sum-functions, + or functions which have sense only if GROUP BY is present, are allowed. + For example in a query + SELECT ... FROM ...WHERE MIN(i) == 1 GROUP BY ... HAVING MIN(i) > 2 + MIN(i) in the WHERE clause is not allowed in the opposite to MIN(i) + in the HAVING clause. Due to possible nesting of select construct + the variable can contain 0 or 1 for each nest level. + */ + nesting_map allow_sum_func; + enum_sql_command sql_command, orig_sql_command; + thr_lock_type lock_option; enum SSL_type ssl_type; /* defined in violite.h */ enum my_lex_states next_state; enum enum_duplicates duplicates; enum enum_tx_isolation tx_isolation; enum enum_ha_read_modes ha_read_mode; - enum ha_rkey_function ha_rkey_mode; + union { + enum ha_rkey_function ha_rkey_mode; + enum xa_option_words xa_opt; + }; enum enum_var_type option_type; + enum enum_view_create_mode create_view_mode; + enum enum_drop_mode drop_mode; uint uint_geom_type; uint grant, grant_tot_col, which_columns; uint fk_delete_opt, fk_update_opt, fk_match_option; uint slave_thd_opt, start_transaction_opt; + int nest_level; + /* + In LEX representing update which were transformed to multi-update + stores total number of tables. For LEX representing multi-delete + holds number of tables from which we will delete records. + */ + uint table_count; uint8 describe; + /* + A flag that indicates what kinds of derived tables are present in the + query (0 if no derived tables, otherwise a combination of flags + DERIVED_SUBQUERY and DERIVED_VIEW). + */ + uint8 derived_tables; + uint8 create_view_algorithm; + uint8 create_view_check; bool drop_if_exists, drop_temporary, local_file, one_shot_set; bool in_comment, ignore_space, verbose, no_write_to_binlog; - bool derived_tables; + bool tx_chain, tx_release; + /* + Special JOIN::prepare mode: changing of query is prohibited. + When creating a view, we need to just check its syntax omitting + any optimizations: afterwards definition of the view will be + reconstructed by means of ::print() methods and written to + to an .frm file. We need this definition to stay untouched. + */ + bool view_prepare_mode; + /* + TRUE if we're parsing a prepared statement: in this mode + we should allow placeholders and disallow multistatements. + */ + bool stmt_prepare_mode; bool safe_to_cache_query; bool subqueries, ignore; + st_parsing_options parsing_options; Alter_info alter_info; /* Prepared statements SQL syntax:*/ LEX_STRING prepared_stmt_name; /* Statement name (in all queries) */ - /* + /* Prepared statement query text or name of variable that holds the prepared statement (in PREPARE ... queries) */ - LEX_STRING prepared_stmt_code; + LEX_STRING prepared_stmt_code; /* If true, prepared_stmt_code is a name of variable that holds the query */ bool prepared_stmt_code_is_varref; /* Names of user variables holding parameters (in EXECUTE) */ - List<LEX_STRING> prepared_stmt_params; + List<LEX_STRING> prepared_stmt_params; /* - If points to fake_time_zone_tables_list indicates that time zone - tables are implicitly used by statement, also is used for holding - list of those tables after they are opened. + Points to part of global table list which contains time zone tables + implicitly used by the statement. */ TABLE_LIST *time_zone_tables_used; + sp_head *sphead; + sp_name *spname; + bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */ + bool all_privileges; + sp_pcontext *spcont; + + st_sp_chistics sp_chistics; + bool only_view; /* used for SHOW CREATE TABLE/VIEW */ + /* + field_list was created for view and should be removed before PS/SP + rexecuton + */ + bool empty_field_list_on_rset; + /* + view created to be run from definer (standard behaviour) + */ + uint8 create_view_suid; + /* Characterstics of trigger being created */ + st_trg_chistics trg_chistics; + /* + List of all items (Item_trigger_field objects) representing fields in + old/new version of row in trigger. We use this list for checking whenever + all such fields are valid at trigger creation time and for binding these + fields to TABLE object at table open (altough for latter pointer to table + being opened is probably enough). + */ + SQL_LIST trg_table_fields; + + /* + stmt_definition_begin is intended to point to the next word after + DEFINER-clause in the following statements: + - CREATE TRIGGER (points to "TRIGGER"); + - CREATE PROCEDURE (points to "PROCEDURE"); + - CREATE FUNCTION (points to "FUNCTION" or "AGGREGATE"); + + This pointer is required to add possibly omitted DEFINER-clause to the + DDL-statement before dumping it to the binlog. + */ + const char *stmt_definition_begin; + + /* + Pointers to part of LOAD DATA statement that should be rewritten + during replication ("LOCAL 'filename' REPLACE INTO" part). + */ + uchar *fname_start, *fname_end; + + bool escape_used; + st_lex(); + + virtual ~st_lex() + { + destroy_query_tables_list(); + } + inline void uncacheable(uint8 cause) { safe_to_cache_query= 0; @@ -732,21 +1131,87 @@ typedef struct st_lex un->uncacheable|= cause; } } - TABLE_LIST *unlink_first_table(TABLE_LIST *tables, - TABLE_LIST **global_first, - TABLE_LIST **local_first); - TABLE_LIST *link_first_table_back(TABLE_LIST *tables, - TABLE_LIST *global_first, - TABLE_LIST *local_first); -} LEX; + TABLE_LIST *unlink_first_table(bool *link_to_local); + void link_first_table_back(TABLE_LIST *first, bool link_to_local); + void first_lists_tables_same(); + bool add_time_zone_tables_to_query_tables(THD *thd); + + bool can_be_merged(); + bool can_use_merged(); + bool can_not_use_merged(); + bool only_view_structure(); + bool need_correct_ident(); + uint8 get_effective_with_check(st_table_list *view); + /* + Is this update command where 'WHITH CHECK OPTION' clause is important -extern TABLE_LIST fake_time_zone_tables_list; + SYNOPSIS + st_lex::which_check_option_applicable() -void lex_init(void); -void lex_free(void); -void lex_start(THD *thd, uchar *buf,uint length); -void lex_end(LEX *lex); + RETURN + TRUE have to take 'WHITH CHECK OPTION' clause into account + FALSE 'WHITH CHECK OPTION' clause do not need + */ + inline bool which_check_option_applicable() + { + switch (sql_command) { + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + case SQLCOM_INSERT: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE: + case SQLCOM_REPLACE_SELECT: + case SQLCOM_LOAD: + return TRUE; + default: + return FALSE; + } + } -extern pthread_key(LEX*,THR_LEX); + void cleanup_after_one_table_open(); + + bool push_context(Name_resolution_context *context) + { + return context_stack.push_front(context); + } + + void pop_context() + { + context_stack.pop(); + } + + Name_resolution_context *current_context() + { + return context_stack.head(); + } + /* + Restore the LEX and THD in case of a parse error. + */ + static void cleanup_lex_after_parse_error(THD *thd); + + void reset_n_backup_query_tables_list(Query_tables_list *backup); + void restore_backup_query_tables_list(Query_tables_list *backup); +} LEX; + +struct st_lex_local: public st_lex +{ + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } + static void *operator new(size_t size, MEM_ROOT *mem_root) + { + return (void*) alloc_root(mem_root, (uint) size); + } + static void operator delete(void *ptr,size_t size) + { TRASH(ptr, size); } + static void operator delete(void *ptr, MEM_ROOT *mem_root) + { /* Never called */ } +}; -#define current_lex (current_thd->lex) +extern void lex_init(void); +extern void lex_free(void); +extern void lex_start(THD *thd, uchar *buf,uint length); +extern void lex_end(LEX *lex); +extern int MYSQLlex(void *arg, void *yythd); +extern uchar *skip_rear_comments(uchar *begin, uchar *end); diff --git a/sql/sql_list.cc b/sql/sql_list.cc index d57a7dfe4e3..01ab9b91424 100644 --- a/sql/sql_list.cc +++ b/sql/sql_list.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2003, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/sql_list.h b/sql/sql_list.h index 2075361a398..7913acfd086 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -21,12 +20,6 @@ /* mysql standard class memory allocator */ -#ifdef SAFEMALLOC -#define TRASH(XX,YY) bfill((XX), (YY), 0x8F) -#else -#define TRASH(XX,YY) /* no-op */ -#endif - class Sql_alloc { public: @@ -38,6 +31,8 @@ public: { return (void*) sql_alloc((uint) size); } + static void *operator new[](size_t size, MEM_ROOT *mem_root) throw () + { return (void*) alloc_root(mem_root, (uint) size); } static void *operator new(size_t size, MEM_ROOT *mem_root) throw () { return (void*) alloc_root(mem_root, (uint) size); } static void operator delete(void *ptr, size_t size) { TRASH(ptr, size); } @@ -161,6 +156,16 @@ public: } return 1; } + inline bool push_back(void *info, MEM_ROOT *mem_root) + { + if (((*last)=new (mem_root) list_node(info, &end_of_list))) + { + last= &(*last)->next; + elements++; + return 0; + } + return 1; + } inline bool push_front(void *info) { list_node *node=new list_node(info,first); @@ -177,32 +182,12 @@ public: void remove(list_node **prev) { list_node *node=(*prev)->next; - if (&(*prev)->next == last) - { - /* - We're removing the last element from the list. Adjust "last" to point - to the previous element. - The other way to fix this would be to change this function to - remove_next() and have base_list_iterator save ptr to previous node - (one extra assignment in iterator++) but as the remove() of the last - element isn't a common operation it's faster to just walk through the - list from the beginning here. - */ - list_node *cur= first; - if (cur == *prev) - { - last= &first; - } - else - { - while (cur->next != *prev) - cur= cur->next; - last= &(cur->next); - } - } + if (!--elements) + last= &first; + else if (last == &(*prev)->next) + last= prev; delete *prev; *prev=node; - elements--; } inline void concat(base_list *list) { @@ -222,6 +207,30 @@ public: last= &first; return tmp->info; } + inline void disjoin(base_list *list) + { + list_node **prev= &first; + list_node *node= first; + list_node *list_first= list->first; + elements=0; + while (node && node != list_first) + { + prev= &node->next; + node= node->next; + elements++; + } + *prev= *last; + last= prev; + } + inline void prepand(base_list *list) + { + if (!list->is_empty()) + { + *list->last= first; + first= list->first; + elements+= list->elements; + } + } inline list_node* last_node() { return *last; } inline list_node* first_node() { return first;} inline void *head() { return first->info; } @@ -304,10 +313,21 @@ protected: ls.elements= elm; } public: - base_list_iterator(base_list &list_par) - :list(&list_par), el(&list_par.first), prev(0), current(0) + base_list_iterator() + :list(0), el(0), prev(0), current(0) {} + base_list_iterator(base_list &list_par) + { init(list_par); } + + inline void init(base_list &list_par) + { + list= &list_par; + el= &list_par.first; + prev= 0; + current= 0; + } + inline void *next(void) { prev=el; @@ -378,10 +398,15 @@ public: inline List(const List<T> &tmp, MEM_ROOT *mem_root) : base_list(tmp, mem_root) {} inline bool push_back(T *a) { return base_list::push_back(a); } + inline bool push_back(T *a, MEM_ROOT *mem_root) + { return base_list::push_back(a, mem_root); } inline bool push_front(T *a) { return base_list::push_front(a); } inline T* head() {return (T*) base_list::head(); } inline T** head_ref() {return (T**) base_list::head_ref(); } inline T* pop() {return (T*) base_list::pop(); } + inline void concat(List<T> *list) { base_list::concat(list); } + inline void disjoin(List<T> *list) { base_list::disjoin(list); } + inline void prepand(List<T> *list) { base_list::prepand(list); } void delete_elements(void) { list_node *element,*next; @@ -399,9 +424,13 @@ template <class T> class List_iterator :public base_list_iterator { public: List_iterator(List<T> &a) : base_list_iterator(a) {} + List_iterator() : base_list_iterator() {} + inline void init(List<T> &a) { base_list_iterator::init(a); } inline T* operator++(int) { return (T*) base_list_iterator::next(); } inline T *replace(T *a) { return (T*) base_list_iterator::replace(a); } inline T *replace(List<T> &a) { return (T*) base_list_iterator::replace(a); } + inline void rewind(void) { base_list_iterator::rewind(); } + inline void remove() { base_list_iterator::remove(); } inline void after(T *a) { base_list_iterator::after(a); } inline T** ref(void) { return (T**) base_list_iterator::ref(); } }; @@ -418,6 +447,8 @@ protected: public: inline List_iterator_fast(List<T> &a) : base_list_iterator(a) {} + inline List_iterator_fast() : base_list_iterator() {} + inline void init(List<T> &a) { base_list_iterator::init(a); } inline T* operator++(int) { return (T*) base_list_iterator::next_fast(); } inline void rewind(void) { base_list_iterator::rewind(); } void sublist(List<T> &list_arg, uint el_arg) @@ -461,9 +492,14 @@ struct ilink template <class T> class I_List_iterator; +/* + WARNING: copy constructor of this class does not create a usable + copy, as its members may point at each other. +*/ + class base_ilist { - public: +public: struct ilink *first,last; inline void empty() { first= &last; last.prev= &first; } base_ilist() { empty(); } diff --git a/sql/sql_load.cc b/sql/sql_load.cc index 10124e5f5ff..7a535381c01 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -21,6 +20,8 @@ #include <my_dir.h> #include <m_ctype.h> #include "sql_repl.h" +#include "sp_head.h" +#include "sql_trigger.h" class READ_INFO { File file; @@ -71,17 +72,48 @@ public: void set_io_cache_arg(void* arg) { cache.arg = arg; } }; -static int read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table, - List<Item> &fields, READ_INFO &read_info, - ulong skip_lines); -static int read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, - List<Item> &fields, READ_INFO &read_info, - String &enclosed, ulong skip_lines); - -int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, - List<Item> &fields, enum enum_duplicates handle_duplicates, - bool ignore, - bool read_file_from_client,thr_lock_type lock_type) +static int read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, + List<Item> &fields_vars, List<Item> &set_fields, + List<Item> &set_values, READ_INFO &read_info, + ulong skip_lines, + bool ignore_check_option_errors); +static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, + List<Item> &fields_vars, List<Item> &set_fields, + List<Item> &set_values, READ_INFO &read_info, + String &enclosed, ulong skip_lines, + bool ignore_check_option_errors); +#ifndef EMBEDDED_LIBRARY +static bool write_execute_load_query_log_event(THD *thd, + bool duplicates, bool ignore, + bool transactional_table); +#endif /* EMBEDDED_LIBRARY */ + +/* + Execute LOAD DATA query + + SYNOPSYS + mysql_load() + thd - current thread + ex - sql_exchange object representing source file and its parsing rules + table_list - list of tables to which we are loading data + fields_vars - list of fields and variables to which we read + data from file + set_fields - list of fields mentioned in set clause + set_values - expressions to assign to fields in previous list + handle_duplicates - indicates whenever we should emit error or + replace row if we will meet duplicates. + ignore - - indicates whenever we should ignore duplicates + read_file_from_client - is this LOAD DATA LOCAL ? + + RETURN VALUES + TRUE - error / FALSE - success +*/ + +bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, + List<Item> &fields_vars, List<Item> &set_fields, + List<Item> &set_values, + enum enum_duplicates handle_duplicates, bool ignore, + bool read_file_from_client) { char name[FN_REFLEN]; File file; @@ -89,6 +121,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, int error; String *field_term=ex->field_term,*escaped=ex->escaped; String *enclosed=ex->enclosed; + Item *unused_conds= 0; bool is_fifo=0; #ifndef EMBEDDED_LIBRARY LOAD_FILE_INFO lf_info; @@ -100,8 +133,8 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, loaded is located */ char *tdb= thd->db ? thd->db : db; // Result is never null - bool transactional_table, log_delayed; ulong skip_lines= ex->skip_lines; + bool transactional_table; DBUG_ENTER("mysql_load"); #ifdef EMBEDDED_LIBRARY @@ -112,60 +145,116 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, { my_message(ER_WRONG_FIELD_TERMINATORS,ER(ER_WRONG_FIELD_TERMINATORS), MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); + } + if (open_and_lock_tables(thd, table_list)) + DBUG_RETURN(TRUE); + if (setup_tables_and_check_access(thd, &thd->lex->select_lex.context, + &thd->lex->select_lex.top_join_list, + table_list, &unused_conds, + &thd->lex->select_lex.leaf_tables, FALSE, + INSERT_ACL | UPDATE_ACL, + INSERT_ACL | UPDATE_ACL)) + DBUG_RETURN(-1); + if (!table_list->table || // do not suport join view + !table_list->updatable || // and derived tables + check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "LOAD"); + DBUG_RETURN(TRUE); + } + if (table_list->prepare_where(thd, 0, TRUE) || + table_list->prepare_check_option(thd)) + { + DBUG_RETURN(TRUE); } /* - This needs to be done before external_lock + Let us emit an error if we are loading data to table which is used + in subselect in SET clause like we do it for INSERT. + + The main thing to fix to remove this restriction is to ensure that the + table is marked to be 'used for insert' in which case we should never + mark this table as as 'const table' (ie, one that has only one row). */ - ha_enable_transaction(thd, FALSE); - if (!(table = open_ltable(thd,table_list,lock_type))) - DBUG_RETURN(-1); + if (unique_table(thd, table_list, table_list->next_global, 0)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); + DBUG_RETURN(TRUE); + } + + table= table_list->table; transactional_table= table->file->has_transactions(); - log_delayed= (transactional_table || table->tmp_table); - if (!fields.elements) + if (!fields_vars.elements) { Field **field; for (field=table->field; *field ; field++) - fields.push_back(new Item_field(*field)); + fields_vars.push_back(new Item_field(*field)); + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + /* + Let us also prepare SET clause, altough it is probably empty + in this case. + */ + if (setup_fields(thd, 0, set_fields, 1, 0, 0) || + setup_fields(thd, 0, set_values, 1, 0, 0)) + DBUG_RETURN(TRUE); } else { // Part field list - thd->dupp_field=0; - if (setup_tables(table_list) || - setup_fields(thd, 0, table_list, fields, 1, 0, 0)) - DBUG_RETURN(-1); - if (thd->dupp_field) - { - my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dupp_field->field_name); - DBUG_RETURN(-1); - } + /* TODO: use this conds for 'WITH CHECK OPTIONS' */ + if (setup_fields(thd, 0, fields_vars, 1, 0, 0) || + setup_fields(thd, 0, set_fields, 1, 0, 0) || + check_that_all_fields_are_given_values(thd, table, table_list)) + DBUG_RETURN(TRUE); + /* + Check whenever TIMESTAMP field with auto-set feature specified + explicitly. + */ + if (table->timestamp_field && + table->timestamp_field->query_id == thd->query_id) + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + /* + Fix the expressions in SET clause. This should be done after + check_that_all_fields_are_given_values() and setting use_timestamp + since it may update query_id for some fields. + */ + if (setup_fields(thd, 0, set_values, 1, 0, 0)) + DBUG_RETURN(TRUE); } + mark_fields_used_by_triggers_for_insert_stmt(thd, table, handle_duplicates); + uint tot_length=0; - bool use_blobs=0,use_timestamp=0; - List_iterator_fast<Item> it(fields); + bool use_blobs= 0, use_vars= 0; + List_iterator_fast<Item> it(fields_vars); + Item *item; - Item_field *field; - while ((field=(Item_field*) it++)) + while ((item= it++)) { - if (field->field->flags & BLOB_FLAG) + if (item->type() == Item::FIELD_ITEM) { - use_blobs=1; - tot_length+=256; // Will be extended if needed + Field *field= ((Item_field*)item)->field; + if (field->flags & BLOB_FLAG) + { + use_blobs= 1; + tot_length+= 256; // Will be extended if needed + } + else + tot_length+= field->field_length; } else - tot_length+=field->field->field_length; - if (!field_term->length() && !(field->field->flags & NOT_NULL_FLAG)) - field->field->set_notnull(); - if (field->field == table->timestamp_field) - use_timestamp=1; + use_vars= 1; } if (use_blobs && !ex->line_term->length() && !field_term->length()) { my_message(ER_BLOBS_AND_NO_TERMINATED,ER(ER_BLOBS_AND_NO_TERMINATED), MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); + } + if (use_vars && !field_term->length() && !enclosed->length()) + { + my_error(ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR, MYF(0)); + DBUG_RETURN(TRUE); } /* We can't give an error in the middle when using LOCAL files */ @@ -197,7 +286,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, #if !defined(__WIN__) && !defined(OS2) && ! defined(__NETWARE__) MY_STAT stat_info; if (!my_stat(name,&stat_info,MYF(MY_WME))) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); // if we are not in slave thread, the file must be: if (!thd->slave_thread && @@ -208,15 +297,24 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, ((stat_info.st_mode & S_IFREG) == S_IFREG || (stat_info.st_mode & S_IFIFO) == S_IFIFO))) { - my_error(ER_TEXTFILE_NOT_READABLE,MYF(0),name); - DBUG_RETURN(-1); + my_error(ER_TEXTFILE_NOT_READABLE, MYF(0), name); + DBUG_RETURN(TRUE); } if ((stat_info.st_mode & S_IFIFO) == S_IFIFO) is_fifo = 1; #endif + + if (opt_secure_file_priv && + strncmp(opt_secure_file_priv, name, strlen(opt_secure_file_priv))) + { + /* Read only allowed from within dir specified by secure_file_priv */ + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); + DBUG_RETURN(TRUE); + } + } if ((file=my_open(name,O_RDONLY,MYF(MY_WME))) < 0) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } COPY_INFO info; @@ -225,35 +323,28 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, info.handle_duplicates=handle_duplicates; info.escape_char=escaped->length() ? (*escaped)[0] : INT_MAX; - READ_INFO read_info(file,tot_length,thd->variables.collation_database, + READ_INFO read_info(file,tot_length, + ex->cs ? ex->cs : thd->variables.collation_database, *field_term,*ex->line_start, *ex->line_term, *enclosed, info.escape_char, read_file_from_client, is_fifo); if (read_info.error) { if (file >= 0) my_close(file,MYF(0)); // no files in net reading - DBUG_RETURN(-1); // Can't allocate buffers + DBUG_RETURN(TRUE); // Can't allocate buffers } #ifndef EMBEDDED_LIBRARY if (mysql_bin_log.is_open()) { lf_info.thd = thd; - lf_info.ex = ex; - lf_info.db = db; - lf_info.table_name = table_list->real_name; - lf_info.fields = &fields; - lf_info.ignore= ignore; - lf_info.handle_dup = handle_duplicates; lf_info.wrote_create_file = 0; lf_info.last_pos_in_file = HA_POS_ERROR; - lf_info.log_delayed= log_delayed; + lf_info.log_delayed= transactional_table; read_info.set_io_cache_arg((void*) &lf_info); } #endif /*!EMBEDDED_LIBRARY*/ - restore_record(table,default_values); - thd->count_cuted_fields= CHECK_FIELD_WARN; /* calc cuted fields */ thd->cuted_fields=0L; /* Skip lines if there is a line terminator */ @@ -270,30 +361,45 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, if (!(error=test(read_info.error))) { - if (use_timestamp) - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; table->next_number_field=table->found_next_number_field; if (ignore || handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); - table->file->start_bulk_insert((ha_rows) 0); + if (handle_duplicates == DUP_REPLACE) + { + if (!table->triggers || + !table->triggers->has_delete_triggers()) + table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + } + if (!thd->prelocked_mode) + table->file->start_bulk_insert((ha_rows) 0); table->copy_blobs=1; + + thd->no_trans_update= 0; + thd->abort_on_warning= (!ignore && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + if (!field_term->length() && !enclosed->length()) - error=read_fixed_length(thd,info,table,fields,read_info, - skip_lines); + error= read_fixed_length(thd, info, table_list, fields_vars, + set_fields, set_values, read_info, + skip_lines, ignore); else - error=read_sep_field(thd,info,table,fields,read_info,*enclosed, - skip_lines); - if (table->file->end_bulk_insert() && !error) + error= read_sep_field(thd, info, table_list, fields_vars, + set_fields, set_values, read_info, + *enclosed, skip_lines, ignore); + if (!thd->prelocked_mode && table->file->end_bulk_insert() && !error) { table->file->print_error(my_errno, MYF(0)); error= 1; } table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); + table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE); table->next_number_field=0; } - ha_enable_transaction(thd, TRUE); if (file >= 0) my_close(file,MYF(0)); free_blobs(table); /* if pack_blob was used */ @@ -343,8 +449,14 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, /* If the file was not empty, wrote_create_file is true */ if (lf_info.wrote_create_file) { - Delete_file_log_event d(thd, db, log_delayed); - mysql_bin_log.write(&d); + if ((info.copied || info.deleted) && !transactional_table) + write_execute_load_query_log_event(thd, handle_duplicates, + ignore, transactional_table); + else + { + Delete_file_log_event d(thd, db, transactional_table); + mysql_bin_log.write(&d); + } } } #endif /*!EMBEDDED_LIBRARY*/ @@ -353,30 +465,25 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, } sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted, (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); - // on the slave thd->query is never initialized - if (!thd->slave_thread) - mysql_update_log.write(thd,thd->query,thd->query_length); - if (!log_delayed) + if (!transactional_table) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; #ifndef EMBEDDED_LIBRARY if (mysql_bin_log.is_open()) { /* As already explained above, we need to call end_io_cache() or the last - block will be logged only after Execute_load_log_event (which is wrong), - when read_info is destroyed. + block will be logged only after Execute_load_query_log_event (which is + wrong), when read_info is destroyed. */ - read_info.end_io_cache(); + read_info.end_io_cache(); if (lf_info.wrote_create_file) - { - Execute_load_log_event e(thd, db, log_delayed); - mysql_bin_log.write(&e); - } + write_execute_load_query_log_event(thd, handle_duplicates, + ignore, transactional_table); } #endif /*!EMBEDDED_LIBRARY*/ if (transactional_table) - error=ha_autocommit_or_rollback(thd,error); + error=ha_autocommit_or_rollback(thd,error); /* ok to client sent only after binlog write and engine commit */ send_ok(thd, info.copied + info.deleted, 0L, name); @@ -386,33 +493,55 @@ err: mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + table->auto_increment_field_not_null= FALSE; + thd->abort_on_warning= 0; DBUG_RETURN(error); } + +#ifndef EMBEDDED_LIBRARY + +/* Not a very useful function; just to avoid duplication of code */ +static bool write_execute_load_query_log_event(THD *thd, + bool duplicates, bool ignore, + bool transactional_table) +{ + Execute_load_query_log_event + e(thd, thd->query, thd->query_length, + (char*)thd->lex->fname_start - (char*)thd->query, + (char*)thd->lex->fname_end - (char*)thd->query, + (duplicates == DUP_REPLACE) ? LOAD_DUP_REPLACE : + (ignore ? LOAD_DUP_IGNORE : LOAD_DUP_ERROR), + transactional_table, FALSE); + return mysql_bin_log.write(&e); +} + +#endif + /**************************************************************************** ** Read of rows of fixed size + optional garage + optonal newline ****************************************************************************/ static int -read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, - READ_INFO &read_info, ulong skip_lines) +read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, + List<Item> &fields_vars, List<Item> &set_fields, + List<Item> &set_values, READ_INFO &read_info, + ulong skip_lines, bool ignore_check_option_errors) { - List_iterator_fast<Item> it(fields); + List_iterator_fast<Item> it(fields_vars); Item_field *sql_field; + TABLE *table= table_list->table; ulonglong id; + bool no_trans_update; DBUG_ENTER("read_fixed_length"); id= 0; - - /* No fields can be null in this format. mark all fields as not null */ - while ((sql_field= (Item_field*) it++)) - sql_field->field->set_notnull(); - + while (!read_info.read_fixed_length()) { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); DBUG_RETURN(1); } if (skip_lines) @@ -431,23 +560,36 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, #ifdef HAVE_purify read_info.row_end[0]=0; #endif + no_trans_update= !table->file->has_transactions(); + + restore_record(table, s->default_values); + /* + There is no variables in fields_vars list in this format so + this conversion is safe. + */ while ((sql_field= (Item_field*) it++)) { Field *field= sql_field->field; + if (field == table->next_number_field) + table->auto_increment_field_not_null= TRUE; + /* + No fields specified in fields_vars list can be null in this format. + Mark field as not null, we should do this for each row because of + restore_record... + */ + field->set_notnull(); + if (pos == read_info.row_end) { thd->cuted_fields++; /* Not enough fields */ push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_TOO_FEW_RECORDS, ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count); - field->reset(); } else { uint length; byte save_chr; - if (field == table->next_number_field) - table->auto_increment_field_not_null= TRUE; if ((length=(uint) (read_info.row_end-pos)) > field->field_length) length=field->field_length; @@ -465,16 +607,37 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); } - if (write_record(table,&info)) + + if (thd->killed || + fill_record_n_invoke_before_triggers(thd, set_fields, set_values, + ignore_check_option_errors, + table->triggers, + TRG_EVENT_INSERT)) + DBUG_RETURN(1); + + switch (table_list->view_check_option(thd, + ignore_check_option_errors)) { + case VIEW_CHECK_SKIP: + read_info.next_line(); + goto continue_loop; + case VIEW_CHECK_ERROR: + DBUG_RETURN(-1); + } + + if (write_record(thd, table, &info)) DBUG_RETURN(1); + thd->no_trans_update= no_trans_update; + /* If auto_increment values are used, save the first one for LAST_INSERT_ID() and for the binary/update log. */ if (!id && thd->insert_id_used) id= thd->last_insert_id; - if (table->next_number_field) - table->next_number_field->reset(); // Clear for next record + /* + We don't need to reset auto-increment field since we are restoring + its default value at the beginning of each loop iteration. + */ if (read_info.next_line()) // Skip to next line break; if (read_info.line_cuted) @@ -485,6 +648,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); } thd->row_count++; +continue_loop:; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log @@ -494,92 +658,153 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, static int -read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, - List<Item> &fields, READ_INFO &read_info, - String &enclosed, ulong skip_lines) +read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, + List<Item> &fields_vars, List<Item> &set_fields, + List<Item> &set_values, READ_INFO &read_info, + String &enclosed, ulong skip_lines, + bool ignore_check_option_errors) { - List_iterator_fast<Item> it(fields); - Item_field *sql_field; + List_iterator_fast<Item> it(fields_vars); + Item *item; + TABLE *table= table_list->table; uint enclosed_length; ulonglong id; + bool no_trans_update; DBUG_ENTER("read_sep_field"); enclosed_length=enclosed.length(); id= 0; + no_trans_update= !table->file->has_transactions(); for (;;it.rewind()) { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); DBUG_RETURN(1); } - while ((sql_field=(Item_field*) it++)) + + restore_record(table, s->default_values); + + while ((item= it++)) { uint length; byte *pos; if (read_info.read_field()) break; + + /* If this line is to be skipped we don't want to fill field or var */ + if (skip_lines) + continue; + pos=read_info.row_start; length=(uint) (read_info.row_end-pos); - Field *field=sql_field->field; if (!read_info.enclosed && - (enclosed_length && length == 4 && !memcmp(pos,"NULL",4)) || + (enclosed_length && length == 4 && + !memcmp(pos, STRING_WITH_LEN("NULL"))) || (length == 1 && read_info.found_null)) { - if (field->reset()) + if (item->type() == Item::FIELD_ITEM) { - my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field->field_name, - thd->row_count); - DBUG_RETURN(1); - } - field->set_null(); - if (!field->maybe_null()) - { - if (field->type() == FIELD_TYPE_TIMESTAMP) - ((Field_timestamp*) field)->set_time(); - else if (field != table->next_number_field) - field->set_warning((uint) MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_NULL_TO_NOTNULL, 1); + Field *field= ((Item_field *)item)->field; + if (field->reset()) + { + my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field->field_name, + thd->row_count); + DBUG_RETURN(1); + } + field->set_null(); + if (field == table->next_number_field) + table->auto_increment_field_not_null= TRUE; + if (!field->maybe_null()) + { + if (field->type() == FIELD_TYPE_TIMESTAMP) + ((Field_timestamp*) field)->set_time(); + else if (field != table->next_number_field) + field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_NULL_TO_NOTNULL, 1); + } } + else + ((Item_user_var_as_out_param *)item)->set_null_value( + read_info.read_charset); continue; } - if (field == table->next_number_field) - table->auto_increment_field_not_null= TRUE; - field->set_notnull(); - read_info.row_end[0]=0; // Safe to change end marker - field->store((char*) read_info.row_start,length,read_info.read_charset); + + if (item->type() == Item::FIELD_ITEM) + { + + Field *field= ((Item_field *)item)->field; + field->set_notnull(); + read_info.row_end[0]=0; // Safe to change end marker + if (field == table->next_number_field) + table->auto_increment_field_not_null= TRUE; + field->store((char*) pos, length, read_info.read_charset); + } + else + ((Item_user_var_as_out_param *)item)->set_value((char*) pos, length, + read_info.read_charset); } if (read_info.error) break; if (skip_lines) { - if (!--skip_lines) - thd->cuted_fields= 0L; // Reset warnings + skip_lines--; continue; } - if (sql_field) - { // Last record - if (sql_field == (Item_field*) fields.head()) + if (item) + { + /* Have not read any field, thus input file is simply ended */ + if (item == fields_vars.head()) break; - for (; sql_field ; sql_field=(Item_field*) it++) + for (; item ; item= it++) { - sql_field->field->set_null(); - if (sql_field->field->reset()) + if (item->type() == Item::FIELD_ITEM) { - my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0),sql_field->field->field_name, - thd->row_count); - DBUG_RETURN(1); + Field *field= ((Item_field *)item)->field; + if (field->reset()) + { + my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0),field->field_name, + thd->row_count); + DBUG_RETURN(1); + } + /* + QQ: We probably should not throw warning for each field. + But how about intention to always have the same number + of warnings in THD::cuted_fields (and get rid of cuted_fields + in the end ?) + */ + thd->cuted_fields++; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_TOO_FEW_RECORDS, + ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count); } - thd->cuted_fields++; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_TOO_FEW_RECORDS, - ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count); + else + ((Item_user_var_as_out_param *)item)->set_null_value( + read_info.read_charset); } } - if (write_record(table,&info)) + + if (thd->killed || + fill_record_n_invoke_before_triggers(thd, set_fields, set_values, + ignore_check_option_errors, + table->triggers, + TRG_EVENT_INSERT)) + DBUG_RETURN(1); + + switch (table_list->view_check_option(thd, + ignore_check_option_errors)) { + case VIEW_CHECK_SKIP: + read_info.next_line(); + goto continue_loop; + case VIEW_CHECK_ERROR: + DBUG_RETURN(-1); + } + + + if (write_record(thd, table, &info)) DBUG_RETURN(1); /* If auto_increment values are used, save the first one for @@ -587,8 +812,11 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, */ if (!id && thd->insert_id_used) id= thd->last_insert_id; - if (table->next_number_field) - table->next_number_field->reset(); // Clear for next record + /* + We don't need to reset auto-increment field since we are restoring + its default value at the beginning of each loop iteration. + */ + thd->no_trans_update= no_trans_update; if (read_info.next_line()) // Skip to next line break; if (read_info.line_cuted) @@ -597,8 +825,11 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); + if (thd->killed) + DBUG_RETURN(1); } thd->row_count++; +continue_loop:; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log @@ -687,12 +918,11 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs, my_free((gptr) buffer,MYF(0)); /* purecov: inspected */ error=1; } - else + else { /* init_io_cache() will not initialize read_function member - if the cache is READ_NET. The reason is explained in - mysys/mf_iocache.c. So we work around the problem with a + if the cache is READ_NET. So we work around the problem with a manual assignment */ need_end_io_cache = 1; diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc index 1f60c61ed46..4e61c664106 100644 --- a/sql/sql_locale.cc +++ b/sql/sql_locale.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -41,8 +40,8 @@ static TYPELIB my_locale_typelib_day_names_ar_AE = { array_elements(my_locale_day_names_ar_AE)-1, "", my_locale_day_names_ar_AE, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ar_AE = { array_elements(my_locale_ab_day_names_ar_AE)-1, "", my_locale_ab_day_names_ar_AE, NULL }; -MY_LOCALE my_locale_ar_AE= -{ +MY_LOCALE my_locale_ar_AE +( 6, "ar_AE", "Arabic - United Arab Emirates", @@ -51,7 +50,7 @@ MY_LOCALE my_locale_ar_AE= &my_locale_typelib_ab_month_names_ar_AE, &my_locale_typelib_day_names_ar_AE, &my_locale_typelib_ab_day_names_ar_AE -}; +); /***** LOCALE END ar_AE *****/ /***** LOCALE BEGIN ar_BH: Arabic - Bahrain *****/ @@ -71,8 +70,8 @@ static TYPELIB my_locale_typelib_day_names_ar_BH = { array_elements(my_locale_day_names_ar_BH)-1, "", my_locale_day_names_ar_BH, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ar_BH = { array_elements(my_locale_ab_day_names_ar_BH)-1, "", my_locale_ab_day_names_ar_BH, NULL }; -MY_LOCALE my_locale_ar_BH= -{ +MY_LOCALE my_locale_ar_BH +( 7, "ar_BH", "Arabic - Bahrain", @@ -81,7 +80,7 @@ MY_LOCALE my_locale_ar_BH= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_BH *****/ /***** LOCALE BEGIN ar_JO: Arabic - Jordan *****/ @@ -101,8 +100,8 @@ static TYPELIB my_locale_typelib_day_names_ar_JO = { array_elements(my_locale_day_names_ar_JO)-1, "", my_locale_day_names_ar_JO, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ar_JO = { array_elements(my_locale_ab_day_names_ar_JO)-1, "", my_locale_ab_day_names_ar_JO, NULL }; -MY_LOCALE my_locale_ar_JO= -{ +MY_LOCALE my_locale_ar_JO +( 8, "ar_JO", "Arabic - Jordan", @@ -111,7 +110,7 @@ MY_LOCALE my_locale_ar_JO= &my_locale_typelib_ab_month_names_ar_JO, &my_locale_typelib_day_names_ar_JO, &my_locale_typelib_ab_day_names_ar_JO -}; +); /***** LOCALE END ar_JO *****/ /***** LOCALE BEGIN ar_SA: Arabic - Saudi Arabia *****/ @@ -131,8 +130,8 @@ static TYPELIB my_locale_typelib_day_names_ar_SA = { array_elements(my_locale_day_names_ar_SA)-1, "", my_locale_day_names_ar_SA, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ar_SA = { array_elements(my_locale_ab_day_names_ar_SA)-1, "", my_locale_ab_day_names_ar_SA, NULL }; -MY_LOCALE my_locale_ar_SA= -{ +MY_LOCALE my_locale_ar_SA +( 9, "ar_SA", "Arabic - Saudi Arabia", @@ -141,7 +140,7 @@ MY_LOCALE my_locale_ar_SA= &my_locale_typelib_ab_month_names_ar_SA, &my_locale_typelib_day_names_ar_SA, &my_locale_typelib_ab_day_names_ar_SA -}; +); /***** LOCALE END ar_SA *****/ /***** LOCALE BEGIN ar_SY: Arabic - Syria *****/ @@ -161,8 +160,8 @@ static TYPELIB my_locale_typelib_day_names_ar_SY = { array_elements(my_locale_day_names_ar_SY)-1, "", my_locale_day_names_ar_SY, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ar_SY = { array_elements(my_locale_ab_day_names_ar_SY)-1, "", my_locale_ab_day_names_ar_SY, NULL }; -MY_LOCALE my_locale_ar_SY= -{ +MY_LOCALE my_locale_ar_SY +( 10, "ar_SY", "Arabic - Syria", @@ -171,7 +170,7 @@ MY_LOCALE my_locale_ar_SY= &my_locale_typelib_ab_month_names_ar_SY, &my_locale_typelib_day_names_ar_SY, &my_locale_typelib_ab_day_names_ar_SY -}; +); /***** LOCALE END ar_SY *****/ /***** LOCALE BEGIN be_BY: Belarusian - Belarus *****/ @@ -191,8 +190,8 @@ static TYPELIB my_locale_typelib_day_names_be_BY = { array_elements(my_locale_day_names_be_BY)-1, "", my_locale_day_names_be_BY, NULL }; static TYPELIB my_locale_typelib_ab_day_names_be_BY = { array_elements(my_locale_ab_day_names_be_BY)-1, "", my_locale_ab_day_names_be_BY, NULL }; -MY_LOCALE my_locale_be_BY= -{ +MY_LOCALE my_locale_be_BY +( 11, "be_BY", "Belarusian - Belarus", @@ -201,7 +200,7 @@ MY_LOCALE my_locale_be_BY= &my_locale_typelib_ab_month_names_be_BY, &my_locale_typelib_day_names_be_BY, &my_locale_typelib_ab_day_names_be_BY -}; +); /***** LOCALE END be_BY *****/ /***** LOCALE BEGIN bg_BG: Bulgarian - Bulgaria *****/ @@ -221,8 +220,8 @@ static TYPELIB my_locale_typelib_day_names_bg_BG = { array_elements(my_locale_day_names_bg_BG)-1, "", my_locale_day_names_bg_BG, NULL }; static TYPELIB my_locale_typelib_ab_day_names_bg_BG = { array_elements(my_locale_ab_day_names_bg_BG)-1, "", my_locale_ab_day_names_bg_BG, NULL }; -MY_LOCALE my_locale_bg_BG= -{ +MY_LOCALE my_locale_bg_BG +( 12, "bg_BG", "Bulgarian - Bulgaria", @@ -231,7 +230,7 @@ MY_LOCALE my_locale_bg_BG= &my_locale_typelib_ab_month_names_bg_BG, &my_locale_typelib_day_names_bg_BG, &my_locale_typelib_ab_day_names_bg_BG -}; +); /***** LOCALE END bg_BG *****/ /***** LOCALE BEGIN ca_ES: Catalan - Catalan *****/ @@ -251,8 +250,8 @@ static TYPELIB my_locale_typelib_day_names_ca_ES = { array_elements(my_locale_day_names_ca_ES)-1, "", my_locale_day_names_ca_ES, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ca_ES = { array_elements(my_locale_ab_day_names_ca_ES)-1, "", my_locale_ab_day_names_ca_ES, NULL }; -MY_LOCALE my_locale_ca_ES= -{ +MY_LOCALE my_locale_ca_ES +( 13, "ca_ES", "Catalan - Catalan", @@ -261,7 +260,7 @@ MY_LOCALE my_locale_ca_ES= &my_locale_typelib_ab_month_names_ca_ES, &my_locale_typelib_day_names_ca_ES, &my_locale_typelib_ab_day_names_ca_ES -}; +); /***** LOCALE END ca_ES *****/ /***** LOCALE BEGIN cs_CZ: Czech - Czech Republic *****/ @@ -281,8 +280,8 @@ static TYPELIB my_locale_typelib_day_names_cs_CZ = { array_elements(my_locale_day_names_cs_CZ)-1, "", my_locale_day_names_cs_CZ, NULL }; static TYPELIB my_locale_typelib_ab_day_names_cs_CZ = { array_elements(my_locale_ab_day_names_cs_CZ)-1, "", my_locale_ab_day_names_cs_CZ, NULL }; -MY_LOCALE my_locale_cs_CZ= -{ +MY_LOCALE my_locale_cs_CZ +( 14, "cs_CZ", "Czech - Czech Republic", @@ -291,7 +290,7 @@ MY_LOCALE my_locale_cs_CZ= &my_locale_typelib_ab_month_names_cs_CZ, &my_locale_typelib_day_names_cs_CZ, &my_locale_typelib_ab_day_names_cs_CZ -}; +); /***** LOCALE END cs_CZ *****/ /***** LOCALE BEGIN da_DK: Danish - Denmark *****/ @@ -311,8 +310,8 @@ static TYPELIB my_locale_typelib_day_names_da_DK = { array_elements(my_locale_day_names_da_DK)-1, "", my_locale_day_names_da_DK, NULL }; static TYPELIB my_locale_typelib_ab_day_names_da_DK = { array_elements(my_locale_ab_day_names_da_DK)-1, "", my_locale_ab_day_names_da_DK, NULL }; -MY_LOCALE my_locale_da_DK= -{ +MY_LOCALE my_locale_da_DK +( 15, "da_DK", "Danish - Denmark", @@ -321,7 +320,7 @@ MY_LOCALE my_locale_da_DK= &my_locale_typelib_ab_month_names_da_DK, &my_locale_typelib_day_names_da_DK, &my_locale_typelib_ab_day_names_da_DK -}; +); /***** LOCALE END da_DK *****/ /***** LOCALE BEGIN de_AT: German - Austria *****/ @@ -341,8 +340,8 @@ static TYPELIB my_locale_typelib_day_names_de_AT = { array_elements(my_locale_day_names_de_AT)-1, "", my_locale_day_names_de_AT, NULL }; static TYPELIB my_locale_typelib_ab_day_names_de_AT = { array_elements(my_locale_ab_day_names_de_AT)-1, "", my_locale_ab_day_names_de_AT, NULL }; -MY_LOCALE my_locale_de_AT= -{ +MY_LOCALE my_locale_de_AT +( 16, "de_AT", "German - Austria", @@ -351,7 +350,7 @@ MY_LOCALE my_locale_de_AT= &my_locale_typelib_ab_month_names_de_AT, &my_locale_typelib_day_names_de_AT, &my_locale_typelib_ab_day_names_de_AT -}; +); /***** LOCALE END de_AT *****/ /***** LOCALE BEGIN de_DE: German - Germany *****/ @@ -371,8 +370,8 @@ static TYPELIB my_locale_typelib_day_names_de_DE = { array_elements(my_locale_day_names_de_DE)-1, "", my_locale_day_names_de_DE, NULL }; static TYPELIB my_locale_typelib_ab_day_names_de_DE = { array_elements(my_locale_ab_day_names_de_DE)-1, "", my_locale_ab_day_names_de_DE, NULL }; -MY_LOCALE my_locale_de_DE= -{ +MY_LOCALE my_locale_de_DE +( 4, "de_DE", "German - Germany", @@ -381,7 +380,7 @@ MY_LOCALE my_locale_de_DE= &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE -}; +); /***** LOCALE END de_DE *****/ /***** LOCALE BEGIN en_US: English - United States *****/ @@ -401,8 +400,8 @@ static TYPELIB my_locale_typelib_day_names_en_US = { array_elements(my_locale_day_names_en_US)-1, "", my_locale_day_names_en_US, NULL }; static TYPELIB my_locale_typelib_ab_day_names_en_US = { array_elements(my_locale_ab_day_names_en_US)-1, "", my_locale_ab_day_names_en_US, NULL }; -MY_LOCALE my_locale_en_US= -{ +MY_LOCALE my_locale_en_US +( 0, "en_US", "English - United States", @@ -411,7 +410,7 @@ MY_LOCALE my_locale_en_US= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_US *****/ /***** LOCALE BEGIN es_ES: Spanish - Spain *****/ @@ -431,8 +430,8 @@ static TYPELIB my_locale_typelib_day_names_es_ES = { array_elements(my_locale_day_names_es_ES)-1, "", my_locale_day_names_es_ES, NULL }; static TYPELIB my_locale_typelib_ab_day_names_es_ES = { array_elements(my_locale_ab_day_names_es_ES)-1, "", my_locale_ab_day_names_es_ES, NULL }; -MY_LOCALE my_locale_es_ES= -{ +MY_LOCALE my_locale_es_ES +( 17, "es_ES", "Spanish - Spain", @@ -441,7 +440,7 @@ MY_LOCALE my_locale_es_ES= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_ES *****/ /***** LOCALE BEGIN et_EE: Estonian - Estonia *****/ @@ -461,8 +460,8 @@ static TYPELIB my_locale_typelib_day_names_et_EE = { array_elements(my_locale_day_names_et_EE)-1, "", my_locale_day_names_et_EE, NULL }; static TYPELIB my_locale_typelib_ab_day_names_et_EE = { array_elements(my_locale_ab_day_names_et_EE)-1, "", my_locale_ab_day_names_et_EE, NULL }; -MY_LOCALE my_locale_et_EE= -{ +MY_LOCALE my_locale_et_EE +( 18, "et_EE", "Estonian - Estonia", @@ -471,7 +470,7 @@ MY_LOCALE my_locale_et_EE= &my_locale_typelib_ab_month_names_et_EE, &my_locale_typelib_day_names_et_EE, &my_locale_typelib_ab_day_names_et_EE -}; +); /***** LOCALE END et_EE *****/ /***** LOCALE BEGIN eu_ES: Basque - Basque *****/ @@ -491,8 +490,8 @@ static TYPELIB my_locale_typelib_day_names_eu_ES = { array_elements(my_locale_day_names_eu_ES)-1, "", my_locale_day_names_eu_ES, NULL }; static TYPELIB my_locale_typelib_ab_day_names_eu_ES = { array_elements(my_locale_ab_day_names_eu_ES)-1, "", my_locale_ab_day_names_eu_ES, NULL }; -MY_LOCALE my_locale_eu_ES= -{ +MY_LOCALE my_locale_eu_ES +( 19, "eu_ES", "Basque - Basque", @@ -501,7 +500,7 @@ MY_LOCALE my_locale_eu_ES= &my_locale_typelib_ab_month_names_eu_ES, &my_locale_typelib_day_names_eu_ES, &my_locale_typelib_ab_day_names_eu_ES -}; +); /***** LOCALE END eu_ES *****/ /***** LOCALE BEGIN fi_FI: Finnish - Finland *****/ @@ -521,8 +520,8 @@ static TYPELIB my_locale_typelib_day_names_fi_FI = { array_elements(my_locale_day_names_fi_FI)-1, "", my_locale_day_names_fi_FI, NULL }; static TYPELIB my_locale_typelib_ab_day_names_fi_FI = { array_elements(my_locale_ab_day_names_fi_FI)-1, "", my_locale_ab_day_names_fi_FI, NULL }; -MY_LOCALE my_locale_fi_FI= -{ +MY_LOCALE my_locale_fi_FI +( 20, "fi_FI", "Finnish - Finland", @@ -531,7 +530,7 @@ MY_LOCALE my_locale_fi_FI= &my_locale_typelib_ab_month_names_fi_FI, &my_locale_typelib_day_names_fi_FI, &my_locale_typelib_ab_day_names_fi_FI -}; +); /***** LOCALE END fi_FI *****/ /***** LOCALE BEGIN fo_FO: Faroese - Faroe Islands *****/ @@ -551,8 +550,8 @@ static TYPELIB my_locale_typelib_day_names_fo_FO = { array_elements(my_locale_day_names_fo_FO)-1, "", my_locale_day_names_fo_FO, NULL }; static TYPELIB my_locale_typelib_ab_day_names_fo_FO = { array_elements(my_locale_ab_day_names_fo_FO)-1, "", my_locale_ab_day_names_fo_FO, NULL }; -MY_LOCALE my_locale_fo_FO= -{ +MY_LOCALE my_locale_fo_FO +( 21, "fo_FO", "Faroese - Faroe Islands", @@ -561,7 +560,7 @@ MY_LOCALE my_locale_fo_FO= &my_locale_typelib_ab_month_names_fo_FO, &my_locale_typelib_day_names_fo_FO, &my_locale_typelib_ab_day_names_fo_FO -}; +); /***** LOCALE END fo_FO *****/ /***** LOCALE BEGIN fr_FR: French - France *****/ @@ -581,8 +580,8 @@ static TYPELIB my_locale_typelib_day_names_fr_FR = { array_elements(my_locale_day_names_fr_FR)-1, "", my_locale_day_names_fr_FR, NULL }; static TYPELIB my_locale_typelib_ab_day_names_fr_FR = { array_elements(my_locale_ab_day_names_fr_FR)-1, "", my_locale_ab_day_names_fr_FR, NULL }; -MY_LOCALE my_locale_fr_FR= -{ +MY_LOCALE my_locale_fr_FR +( 5, "fr_FR", "French - France", @@ -591,7 +590,7 @@ MY_LOCALE my_locale_fr_FR= &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR -}; +); /***** LOCALE END fr_FR *****/ /***** LOCALE BEGIN gl_ES: Galician - Galician *****/ @@ -611,8 +610,8 @@ static TYPELIB my_locale_typelib_day_names_gl_ES = { array_elements(my_locale_day_names_gl_ES)-1, "", my_locale_day_names_gl_ES, NULL }; static TYPELIB my_locale_typelib_ab_day_names_gl_ES = { array_elements(my_locale_ab_day_names_gl_ES)-1, "", my_locale_ab_day_names_gl_ES, NULL }; -MY_LOCALE my_locale_gl_ES= -{ +MY_LOCALE my_locale_gl_ES +( 22, "gl_ES", "Galician - Galician", @@ -621,7 +620,7 @@ MY_LOCALE my_locale_gl_ES= &my_locale_typelib_ab_month_names_gl_ES, &my_locale_typelib_day_names_gl_ES, &my_locale_typelib_ab_day_names_gl_ES -}; +); /***** LOCALE END gl_ES *****/ /***** LOCALE BEGIN gu_IN: Gujarati - India *****/ @@ -641,8 +640,8 @@ static TYPELIB my_locale_typelib_day_names_gu_IN = { array_elements(my_locale_day_names_gu_IN)-1, "", my_locale_day_names_gu_IN, NULL }; static TYPELIB my_locale_typelib_ab_day_names_gu_IN = { array_elements(my_locale_ab_day_names_gu_IN)-1, "", my_locale_ab_day_names_gu_IN, NULL }; -MY_LOCALE my_locale_gu_IN= -{ +MY_LOCALE my_locale_gu_IN +( 23, "gu_IN", "Gujarati - India", @@ -651,7 +650,7 @@ MY_LOCALE my_locale_gu_IN= &my_locale_typelib_ab_month_names_gu_IN, &my_locale_typelib_day_names_gu_IN, &my_locale_typelib_ab_day_names_gu_IN -}; +); /***** LOCALE END gu_IN *****/ /***** LOCALE BEGIN he_IL: Hebrew - Israel *****/ @@ -671,8 +670,8 @@ static TYPELIB my_locale_typelib_day_names_he_IL = { array_elements(my_locale_day_names_he_IL)-1, "", my_locale_day_names_he_IL, NULL }; static TYPELIB my_locale_typelib_ab_day_names_he_IL = { array_elements(my_locale_ab_day_names_he_IL)-1, "", my_locale_ab_day_names_he_IL, NULL }; -MY_LOCALE my_locale_he_IL= -{ +MY_LOCALE my_locale_he_IL +( 24, "he_IL", "Hebrew - Israel", @@ -681,7 +680,7 @@ MY_LOCALE my_locale_he_IL= &my_locale_typelib_ab_month_names_he_IL, &my_locale_typelib_day_names_he_IL, &my_locale_typelib_ab_day_names_he_IL -}; +); /***** LOCALE END he_IL *****/ /***** LOCALE BEGIN hi_IN: Hindi - India *****/ @@ -701,8 +700,8 @@ static TYPELIB my_locale_typelib_day_names_hi_IN = { array_elements(my_locale_day_names_hi_IN)-1, "", my_locale_day_names_hi_IN, NULL }; static TYPELIB my_locale_typelib_ab_day_names_hi_IN = { array_elements(my_locale_ab_day_names_hi_IN)-1, "", my_locale_ab_day_names_hi_IN, NULL }; -MY_LOCALE my_locale_hi_IN= -{ +MY_LOCALE my_locale_hi_IN +( 25, "hi_IN", "Hindi - India", @@ -711,7 +710,7 @@ MY_LOCALE my_locale_hi_IN= &my_locale_typelib_ab_month_names_hi_IN, &my_locale_typelib_day_names_hi_IN, &my_locale_typelib_ab_day_names_hi_IN -}; +); /***** LOCALE END hi_IN *****/ /***** LOCALE BEGIN hr_HR: Croatian - Croatia *****/ @@ -731,8 +730,8 @@ static TYPELIB my_locale_typelib_day_names_hr_HR = { array_elements(my_locale_day_names_hr_HR)-1, "", my_locale_day_names_hr_HR, NULL }; static TYPELIB my_locale_typelib_ab_day_names_hr_HR = { array_elements(my_locale_ab_day_names_hr_HR)-1, "", my_locale_ab_day_names_hr_HR, NULL }; -MY_LOCALE my_locale_hr_HR= -{ +MY_LOCALE my_locale_hr_HR +( 26, "hr_HR", "Croatian - Croatia", @@ -741,7 +740,7 @@ MY_LOCALE my_locale_hr_HR= &my_locale_typelib_ab_month_names_hr_HR, &my_locale_typelib_day_names_hr_HR, &my_locale_typelib_ab_day_names_hr_HR -}; +); /***** LOCALE END hr_HR *****/ /***** LOCALE BEGIN hu_HU: Hungarian - Hungary *****/ @@ -761,8 +760,8 @@ static TYPELIB my_locale_typelib_day_names_hu_HU = { array_elements(my_locale_day_names_hu_HU)-1, "", my_locale_day_names_hu_HU, NULL }; static TYPELIB my_locale_typelib_ab_day_names_hu_HU = { array_elements(my_locale_ab_day_names_hu_HU)-1, "", my_locale_ab_day_names_hu_HU, NULL }; -MY_LOCALE my_locale_hu_HU= -{ +MY_LOCALE my_locale_hu_HU +( 27, "hu_HU", "Hungarian - Hungary", @@ -771,7 +770,7 @@ MY_LOCALE my_locale_hu_HU= &my_locale_typelib_ab_month_names_hu_HU, &my_locale_typelib_day_names_hu_HU, &my_locale_typelib_ab_day_names_hu_HU -}; +); /***** LOCALE END hu_HU *****/ /***** LOCALE BEGIN id_ID: Indonesian - Indonesia *****/ @@ -791,8 +790,8 @@ static TYPELIB my_locale_typelib_day_names_id_ID = { array_elements(my_locale_day_names_id_ID)-1, "", my_locale_day_names_id_ID, NULL }; static TYPELIB my_locale_typelib_ab_day_names_id_ID = { array_elements(my_locale_ab_day_names_id_ID)-1, "", my_locale_ab_day_names_id_ID, NULL }; -MY_LOCALE my_locale_id_ID= -{ +MY_LOCALE my_locale_id_ID +( 28, "id_ID", "Indonesian - Indonesia", @@ -801,7 +800,7 @@ MY_LOCALE my_locale_id_ID= &my_locale_typelib_ab_month_names_id_ID, &my_locale_typelib_day_names_id_ID, &my_locale_typelib_ab_day_names_id_ID -}; +); /***** LOCALE END id_ID *****/ /***** LOCALE BEGIN is_IS: Icelandic - Iceland *****/ @@ -821,8 +820,8 @@ static TYPELIB my_locale_typelib_day_names_is_IS = { array_elements(my_locale_day_names_is_IS)-1, "", my_locale_day_names_is_IS, NULL }; static TYPELIB my_locale_typelib_ab_day_names_is_IS = { array_elements(my_locale_ab_day_names_is_IS)-1, "", my_locale_ab_day_names_is_IS, NULL }; -MY_LOCALE my_locale_is_IS= -{ +MY_LOCALE my_locale_is_IS +( 29, "is_IS", "Icelandic - Iceland", @@ -831,7 +830,7 @@ MY_LOCALE my_locale_is_IS= &my_locale_typelib_ab_month_names_is_IS, &my_locale_typelib_day_names_is_IS, &my_locale_typelib_ab_day_names_is_IS -}; +); /***** LOCALE END is_IS *****/ /***** LOCALE BEGIN it_CH: Italian - Switzerland *****/ @@ -851,8 +850,8 @@ static TYPELIB my_locale_typelib_day_names_it_CH = { array_elements(my_locale_day_names_it_CH)-1, "", my_locale_day_names_it_CH, NULL }; static TYPELIB my_locale_typelib_ab_day_names_it_CH = { array_elements(my_locale_ab_day_names_it_CH)-1, "", my_locale_ab_day_names_it_CH, NULL }; -MY_LOCALE my_locale_it_CH= -{ +MY_LOCALE my_locale_it_CH +( 30, "it_CH", "Italian - Switzerland", @@ -861,7 +860,7 @@ MY_LOCALE my_locale_it_CH= &my_locale_typelib_ab_month_names_it_CH, &my_locale_typelib_day_names_it_CH, &my_locale_typelib_ab_day_names_it_CH -}; +); /***** LOCALE END it_CH *****/ /***** LOCALE BEGIN ja_JP: Japanese - Japan *****/ @@ -881,8 +880,8 @@ static TYPELIB my_locale_typelib_day_names_ja_JP = { array_elements(my_locale_day_names_ja_JP)-1, "", my_locale_day_names_ja_JP, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ja_JP = { array_elements(my_locale_ab_day_names_ja_JP)-1, "", my_locale_ab_day_names_ja_JP, NULL }; -MY_LOCALE my_locale_ja_JP= -{ +MY_LOCALE my_locale_ja_JP +( 2, "ja_JP", "Japanese - Japan", @@ -891,7 +890,7 @@ MY_LOCALE my_locale_ja_JP= &my_locale_typelib_ab_month_names_ja_JP, &my_locale_typelib_day_names_ja_JP, &my_locale_typelib_ab_day_names_ja_JP -}; +); /***** LOCALE END ja_JP *****/ /***** LOCALE BEGIN ko_KR: Korean - Korea *****/ @@ -911,8 +910,8 @@ static TYPELIB my_locale_typelib_day_names_ko_KR = { array_elements(my_locale_day_names_ko_KR)-1, "", my_locale_day_names_ko_KR, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ko_KR = { array_elements(my_locale_ab_day_names_ko_KR)-1, "", my_locale_ab_day_names_ko_KR, NULL }; -MY_LOCALE my_locale_ko_KR= -{ +MY_LOCALE my_locale_ko_KR +( 31, "ko_KR", "Korean - Korea", @@ -921,7 +920,7 @@ MY_LOCALE my_locale_ko_KR= &my_locale_typelib_ab_month_names_ko_KR, &my_locale_typelib_day_names_ko_KR, &my_locale_typelib_ab_day_names_ko_KR -}; +); /***** LOCALE END ko_KR *****/ /***** LOCALE BEGIN lt_LT: Lithuanian - Lithuania *****/ @@ -941,8 +940,8 @@ static TYPELIB my_locale_typelib_day_names_lt_LT = { array_elements(my_locale_day_names_lt_LT)-1, "", my_locale_day_names_lt_LT, NULL }; static TYPELIB my_locale_typelib_ab_day_names_lt_LT = { array_elements(my_locale_ab_day_names_lt_LT)-1, "", my_locale_ab_day_names_lt_LT, NULL }; -MY_LOCALE my_locale_lt_LT= -{ +MY_LOCALE my_locale_lt_LT +( 32, "lt_LT", "Lithuanian - Lithuania", @@ -951,7 +950,7 @@ MY_LOCALE my_locale_lt_LT= &my_locale_typelib_ab_month_names_lt_LT, &my_locale_typelib_day_names_lt_LT, &my_locale_typelib_ab_day_names_lt_LT -}; +); /***** LOCALE END lt_LT *****/ /***** LOCALE BEGIN lv_LV: Latvian - Latvia *****/ @@ -971,8 +970,8 @@ static TYPELIB my_locale_typelib_day_names_lv_LV = { array_elements(my_locale_day_names_lv_LV)-1, "", my_locale_day_names_lv_LV, NULL }; static TYPELIB my_locale_typelib_ab_day_names_lv_LV = { array_elements(my_locale_ab_day_names_lv_LV)-1, "", my_locale_ab_day_names_lv_LV, NULL }; -MY_LOCALE my_locale_lv_LV= -{ +MY_LOCALE my_locale_lv_LV +( 33, "lv_LV", "Latvian - Latvia", @@ -981,7 +980,7 @@ MY_LOCALE my_locale_lv_LV= &my_locale_typelib_ab_month_names_lv_LV, &my_locale_typelib_day_names_lv_LV, &my_locale_typelib_ab_day_names_lv_LV -}; +); /***** LOCALE END lv_LV *****/ /***** LOCALE BEGIN mk_MK: Macedonian - FYROM *****/ @@ -1001,8 +1000,8 @@ static TYPELIB my_locale_typelib_day_names_mk_MK = { array_elements(my_locale_day_names_mk_MK)-1, "", my_locale_day_names_mk_MK, NULL }; static TYPELIB my_locale_typelib_ab_day_names_mk_MK = { array_elements(my_locale_ab_day_names_mk_MK)-1, "", my_locale_ab_day_names_mk_MK, NULL }; -MY_LOCALE my_locale_mk_MK= -{ +MY_LOCALE my_locale_mk_MK +( 34, "mk_MK", "Macedonian - FYROM", @@ -1011,7 +1010,7 @@ MY_LOCALE my_locale_mk_MK= &my_locale_typelib_ab_month_names_mk_MK, &my_locale_typelib_day_names_mk_MK, &my_locale_typelib_ab_day_names_mk_MK -}; +); /***** LOCALE END mk_MK *****/ /***** LOCALE BEGIN mn_MN: Mongolia - Mongolian *****/ @@ -1031,8 +1030,8 @@ static TYPELIB my_locale_typelib_day_names_mn_MN = { array_elements(my_locale_day_names_mn_MN)-1, "", my_locale_day_names_mn_MN, NULL }; static TYPELIB my_locale_typelib_ab_day_names_mn_MN = { array_elements(my_locale_ab_day_names_mn_MN)-1, "", my_locale_ab_day_names_mn_MN, NULL }; -MY_LOCALE my_locale_mn_MN= -{ +MY_LOCALE my_locale_mn_MN +( 35, "mn_MN", "Mongolia - Mongolian", @@ -1041,7 +1040,7 @@ MY_LOCALE my_locale_mn_MN= &my_locale_typelib_ab_month_names_mn_MN, &my_locale_typelib_day_names_mn_MN, &my_locale_typelib_ab_day_names_mn_MN -}; +); /***** LOCALE END mn_MN *****/ /***** LOCALE BEGIN ms_MY: Malay - Malaysia *****/ @@ -1061,8 +1060,8 @@ static TYPELIB my_locale_typelib_day_names_ms_MY = { array_elements(my_locale_day_names_ms_MY)-1, "", my_locale_day_names_ms_MY, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ms_MY = { array_elements(my_locale_ab_day_names_ms_MY)-1, "", my_locale_ab_day_names_ms_MY, NULL }; -MY_LOCALE my_locale_ms_MY= -{ +MY_LOCALE my_locale_ms_MY +( 36, "ms_MY", "Malay - Malaysia", @@ -1071,7 +1070,7 @@ MY_LOCALE my_locale_ms_MY= &my_locale_typelib_ab_month_names_ms_MY, &my_locale_typelib_day_names_ms_MY, &my_locale_typelib_ab_day_names_ms_MY -}; +); /***** LOCALE END ms_MY *****/ /***** LOCALE BEGIN nb_NO: Norwegian(Bokml) - Norway *****/ @@ -1091,8 +1090,8 @@ static TYPELIB my_locale_typelib_day_names_nb_NO = { array_elements(my_locale_day_names_nb_NO)-1, "", my_locale_day_names_nb_NO, NULL }; static TYPELIB my_locale_typelib_ab_day_names_nb_NO = { array_elements(my_locale_ab_day_names_nb_NO)-1, "", my_locale_ab_day_names_nb_NO, NULL }; -MY_LOCALE my_locale_nb_NO= -{ +MY_LOCALE my_locale_nb_NO +( 37, "nb_NO", "Norwegian(Bokml) - Norway", @@ -1101,7 +1100,7 @@ MY_LOCALE my_locale_nb_NO= &my_locale_typelib_ab_month_names_nb_NO, &my_locale_typelib_day_names_nb_NO, &my_locale_typelib_ab_day_names_nb_NO -}; +); /***** LOCALE END nb_NO *****/ /***** LOCALE BEGIN nl_NL: Dutch - The Netherlands *****/ @@ -1121,8 +1120,8 @@ static TYPELIB my_locale_typelib_day_names_nl_NL = { array_elements(my_locale_day_names_nl_NL)-1, "", my_locale_day_names_nl_NL, NULL }; static TYPELIB my_locale_typelib_ab_day_names_nl_NL = { array_elements(my_locale_ab_day_names_nl_NL)-1, "", my_locale_ab_day_names_nl_NL, NULL }; -MY_LOCALE my_locale_nl_NL= -{ +MY_LOCALE my_locale_nl_NL +( 38, "nl_NL", "Dutch - The Netherlands", @@ -1131,7 +1130,7 @@ MY_LOCALE my_locale_nl_NL= &my_locale_typelib_ab_month_names_nl_NL, &my_locale_typelib_day_names_nl_NL, &my_locale_typelib_ab_day_names_nl_NL -}; +); /***** LOCALE END nl_NL *****/ /***** LOCALE BEGIN pl_PL: Polish - Poland *****/ @@ -1151,8 +1150,8 @@ static TYPELIB my_locale_typelib_day_names_pl_PL = { array_elements(my_locale_day_names_pl_PL)-1, "", my_locale_day_names_pl_PL, NULL }; static TYPELIB my_locale_typelib_ab_day_names_pl_PL = { array_elements(my_locale_ab_day_names_pl_PL)-1, "", my_locale_ab_day_names_pl_PL, NULL }; -MY_LOCALE my_locale_pl_PL= -{ +MY_LOCALE my_locale_pl_PL +( 39, "pl_PL", "Polish - Poland", @@ -1161,7 +1160,7 @@ MY_LOCALE my_locale_pl_PL= &my_locale_typelib_ab_month_names_pl_PL, &my_locale_typelib_day_names_pl_PL, &my_locale_typelib_ab_day_names_pl_PL -}; +); /***** LOCALE END pl_PL *****/ /***** LOCALE BEGIN pt_BR: Portugese - Brazil *****/ @@ -1181,8 +1180,8 @@ static TYPELIB my_locale_typelib_day_names_pt_BR = { array_elements(my_locale_day_names_pt_BR)-1, "", my_locale_day_names_pt_BR, NULL }; static TYPELIB my_locale_typelib_ab_day_names_pt_BR = { array_elements(my_locale_ab_day_names_pt_BR)-1, "", my_locale_ab_day_names_pt_BR, NULL }; -MY_LOCALE my_locale_pt_BR= -{ +MY_LOCALE my_locale_pt_BR +( 40, "pt_BR", "Portugese - Brazil", @@ -1191,7 +1190,7 @@ MY_LOCALE my_locale_pt_BR= &my_locale_typelib_ab_month_names_pt_BR, &my_locale_typelib_day_names_pt_BR, &my_locale_typelib_ab_day_names_pt_BR -}; +); /***** LOCALE END pt_BR *****/ /***** LOCALE BEGIN pt_PT: Portugese - Portugal *****/ @@ -1211,8 +1210,8 @@ static TYPELIB my_locale_typelib_day_names_pt_PT = { array_elements(my_locale_day_names_pt_PT)-1, "", my_locale_day_names_pt_PT, NULL }; static TYPELIB my_locale_typelib_ab_day_names_pt_PT = { array_elements(my_locale_ab_day_names_pt_PT)-1, "", my_locale_ab_day_names_pt_PT, NULL }; -MY_LOCALE my_locale_pt_PT= -{ +MY_LOCALE my_locale_pt_PT +( 41, "pt_PT", "Portugese - Portugal", @@ -1221,7 +1220,7 @@ MY_LOCALE my_locale_pt_PT= &my_locale_typelib_ab_month_names_pt_PT, &my_locale_typelib_day_names_pt_PT, &my_locale_typelib_ab_day_names_pt_PT -}; +); /***** LOCALE END pt_PT *****/ /***** LOCALE BEGIN ro_RO: Romanian - Romania *****/ @@ -1241,8 +1240,8 @@ static TYPELIB my_locale_typelib_day_names_ro_RO = { array_elements(my_locale_day_names_ro_RO)-1, "", my_locale_day_names_ro_RO, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ro_RO = { array_elements(my_locale_ab_day_names_ro_RO)-1, "", my_locale_ab_day_names_ro_RO, NULL }; -MY_LOCALE my_locale_ro_RO= -{ +MY_LOCALE my_locale_ro_RO +( 42, "ro_RO", "Romanian - Romania", @@ -1251,7 +1250,7 @@ MY_LOCALE my_locale_ro_RO= &my_locale_typelib_ab_month_names_ro_RO, &my_locale_typelib_day_names_ro_RO, &my_locale_typelib_ab_day_names_ro_RO -}; +); /***** LOCALE END ro_RO *****/ /***** LOCALE BEGIN ru_RU: Russian - Russia *****/ @@ -1271,8 +1270,8 @@ static TYPELIB my_locale_typelib_day_names_ru_RU = { array_elements(my_locale_day_names_ru_RU)-1, "", my_locale_day_names_ru_RU, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ru_RU = { array_elements(my_locale_ab_day_names_ru_RU)-1, "", my_locale_ab_day_names_ru_RU, NULL }; -MY_LOCALE my_locale_ru_RU= -{ +MY_LOCALE my_locale_ru_RU +( 43, "ru_RU", "Russian - Russia", @@ -1281,7 +1280,7 @@ MY_LOCALE my_locale_ru_RU= &my_locale_typelib_ab_month_names_ru_RU, &my_locale_typelib_day_names_ru_RU, &my_locale_typelib_ab_day_names_ru_RU -}; +); /***** LOCALE END ru_RU *****/ /***** LOCALE BEGIN ru_UA: Russian - Ukraine *****/ @@ -1301,8 +1300,8 @@ static TYPELIB my_locale_typelib_day_names_ru_UA = { array_elements(my_locale_day_names_ru_UA)-1, "", my_locale_day_names_ru_UA, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ru_UA = { array_elements(my_locale_ab_day_names_ru_UA)-1, "", my_locale_ab_day_names_ru_UA, NULL }; -MY_LOCALE my_locale_ru_UA= -{ +MY_LOCALE my_locale_ru_UA +( 44, "ru_UA", "Russian - Ukraine", @@ -1311,7 +1310,7 @@ MY_LOCALE my_locale_ru_UA= &my_locale_typelib_ab_month_names_ru_UA, &my_locale_typelib_day_names_ru_UA, &my_locale_typelib_ab_day_names_ru_UA -}; +); /***** LOCALE END ru_UA *****/ /***** LOCALE BEGIN sk_SK: Slovak - Slovakia *****/ @@ -1331,8 +1330,8 @@ static TYPELIB my_locale_typelib_day_names_sk_SK = { array_elements(my_locale_day_names_sk_SK)-1, "", my_locale_day_names_sk_SK, NULL }; static TYPELIB my_locale_typelib_ab_day_names_sk_SK = { array_elements(my_locale_ab_day_names_sk_SK)-1, "", my_locale_ab_day_names_sk_SK, NULL }; -MY_LOCALE my_locale_sk_SK= -{ +MY_LOCALE my_locale_sk_SK +( 45, "sk_SK", "Slovak - Slovakia", @@ -1341,7 +1340,7 @@ MY_LOCALE my_locale_sk_SK= &my_locale_typelib_ab_month_names_sk_SK, &my_locale_typelib_day_names_sk_SK, &my_locale_typelib_ab_day_names_sk_SK -}; +); /***** LOCALE END sk_SK *****/ /***** LOCALE BEGIN sl_SI: Slovenian - Slovenia *****/ @@ -1361,8 +1360,8 @@ static TYPELIB my_locale_typelib_day_names_sl_SI = { array_elements(my_locale_day_names_sl_SI)-1, "", my_locale_day_names_sl_SI, NULL }; static TYPELIB my_locale_typelib_ab_day_names_sl_SI = { array_elements(my_locale_ab_day_names_sl_SI)-1, "", my_locale_ab_day_names_sl_SI, NULL }; -MY_LOCALE my_locale_sl_SI= -{ +MY_LOCALE my_locale_sl_SI +( 46, "sl_SI", "Slovenian - Slovenia", @@ -1371,7 +1370,7 @@ MY_LOCALE my_locale_sl_SI= &my_locale_typelib_ab_month_names_sl_SI, &my_locale_typelib_day_names_sl_SI, &my_locale_typelib_ab_day_names_sl_SI -}; +); /***** LOCALE END sl_SI *****/ /***** LOCALE BEGIN sq_AL: Albanian - Albania *****/ @@ -1391,8 +1390,8 @@ static TYPELIB my_locale_typelib_day_names_sq_AL = { array_elements(my_locale_day_names_sq_AL)-1, "", my_locale_day_names_sq_AL, NULL }; static TYPELIB my_locale_typelib_ab_day_names_sq_AL = { array_elements(my_locale_ab_day_names_sq_AL)-1, "", my_locale_ab_day_names_sq_AL, NULL }; -MY_LOCALE my_locale_sq_AL= -{ +MY_LOCALE my_locale_sq_AL +( 47, "sq_AL", "Albanian - Albania", @@ -1401,7 +1400,7 @@ MY_LOCALE my_locale_sq_AL= &my_locale_typelib_ab_month_names_sq_AL, &my_locale_typelib_day_names_sq_AL, &my_locale_typelib_ab_day_names_sq_AL -}; +); /***** LOCALE END sq_AL *****/ /***** LOCALE BEGIN sr_YU: Servian - Yugoslavia *****/ @@ -1421,8 +1420,8 @@ static TYPELIB my_locale_typelib_day_names_sr_YU = { array_elements(my_locale_day_names_sr_YU)-1, "", my_locale_day_names_sr_YU, NULL }; static TYPELIB my_locale_typelib_ab_day_names_sr_YU = { array_elements(my_locale_ab_day_names_sr_YU)-1, "", my_locale_ab_day_names_sr_YU, NULL }; -MY_LOCALE my_locale_sr_YU= -{ +MY_LOCALE my_locale_sr_YU +( 48, "sr_YU", "Servian - Yugoslavia", @@ -1431,7 +1430,7 @@ MY_LOCALE my_locale_sr_YU= &my_locale_typelib_ab_month_names_sr_YU, &my_locale_typelib_day_names_sr_YU, &my_locale_typelib_ab_day_names_sr_YU -}; +); /***** LOCALE END sr_YU *****/ /***** LOCALE BEGIN sv_SE: Swedish - Sweden *****/ @@ -1451,8 +1450,8 @@ static TYPELIB my_locale_typelib_day_names_sv_SE = { array_elements(my_locale_day_names_sv_SE)-1, "", my_locale_day_names_sv_SE, NULL }; static TYPELIB my_locale_typelib_ab_day_names_sv_SE = { array_elements(my_locale_ab_day_names_sv_SE)-1, "", my_locale_ab_day_names_sv_SE, NULL }; -MY_LOCALE my_locale_sv_SE= -{ +MY_LOCALE my_locale_sv_SE +( 3, "sv_SE", "Swedish - Sweden", @@ -1461,7 +1460,7 @@ MY_LOCALE my_locale_sv_SE= &my_locale_typelib_ab_month_names_sv_SE, &my_locale_typelib_day_names_sv_SE, &my_locale_typelib_ab_day_names_sv_SE -}; +); /***** LOCALE END sv_SE *****/ /***** LOCALE BEGIN ta_IN: Tamil - India *****/ @@ -1481,8 +1480,8 @@ static TYPELIB my_locale_typelib_day_names_ta_IN = { array_elements(my_locale_day_names_ta_IN)-1, "", my_locale_day_names_ta_IN, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ta_IN = { array_elements(my_locale_ab_day_names_ta_IN)-1, "", my_locale_ab_day_names_ta_IN, NULL }; -MY_LOCALE my_locale_ta_IN= -{ +MY_LOCALE my_locale_ta_IN +( 49, "ta_IN", "Tamil - India", @@ -1491,7 +1490,7 @@ MY_LOCALE my_locale_ta_IN= &my_locale_typelib_ab_month_names_ta_IN, &my_locale_typelib_day_names_ta_IN, &my_locale_typelib_ab_day_names_ta_IN -}; +); /***** LOCALE END ta_IN *****/ /***** LOCALE BEGIN te_IN: Telugu - India *****/ @@ -1511,8 +1510,8 @@ static TYPELIB my_locale_typelib_day_names_te_IN = { array_elements(my_locale_day_names_te_IN)-1, "", my_locale_day_names_te_IN, NULL }; static TYPELIB my_locale_typelib_ab_day_names_te_IN = { array_elements(my_locale_ab_day_names_te_IN)-1, "", my_locale_ab_day_names_te_IN, NULL }; -MY_LOCALE my_locale_te_IN= -{ +MY_LOCALE my_locale_te_IN +( 50, "te_IN", "Telugu - India", @@ -1521,7 +1520,7 @@ MY_LOCALE my_locale_te_IN= &my_locale_typelib_ab_month_names_te_IN, &my_locale_typelib_day_names_te_IN, &my_locale_typelib_ab_day_names_te_IN -}; +); /***** LOCALE END te_IN *****/ /***** LOCALE BEGIN th_TH: Thai - Thailand *****/ @@ -1541,8 +1540,8 @@ static TYPELIB my_locale_typelib_day_names_th_TH = { array_elements(my_locale_day_names_th_TH)-1, "", my_locale_day_names_th_TH, NULL }; static TYPELIB my_locale_typelib_ab_day_names_th_TH = { array_elements(my_locale_ab_day_names_th_TH)-1, "", my_locale_ab_day_names_th_TH, NULL }; -MY_LOCALE my_locale_th_TH= -{ +MY_LOCALE my_locale_th_TH +( 51, "th_TH", "Thai - Thailand", @@ -1551,7 +1550,7 @@ MY_LOCALE my_locale_th_TH= &my_locale_typelib_ab_month_names_th_TH, &my_locale_typelib_day_names_th_TH, &my_locale_typelib_ab_day_names_th_TH -}; +); /***** LOCALE END th_TH *****/ /***** LOCALE BEGIN tr_TR: Turkish - Turkey *****/ @@ -1571,8 +1570,8 @@ static TYPELIB my_locale_typelib_day_names_tr_TR = { array_elements(my_locale_day_names_tr_TR)-1, "", my_locale_day_names_tr_TR, NULL }; static TYPELIB my_locale_typelib_ab_day_names_tr_TR = { array_elements(my_locale_ab_day_names_tr_TR)-1, "", my_locale_ab_day_names_tr_TR, NULL }; -MY_LOCALE my_locale_tr_TR= -{ +MY_LOCALE my_locale_tr_TR +( 52, "tr_TR", "Turkish - Turkey", @@ -1581,7 +1580,7 @@ MY_LOCALE my_locale_tr_TR= &my_locale_typelib_ab_month_names_tr_TR, &my_locale_typelib_day_names_tr_TR, &my_locale_typelib_ab_day_names_tr_TR -}; +); /***** LOCALE END tr_TR *****/ /***** LOCALE BEGIN uk_UA: Ukrainian - Ukraine *****/ @@ -1601,8 +1600,8 @@ static TYPELIB my_locale_typelib_day_names_uk_UA = { array_elements(my_locale_day_names_uk_UA)-1, "", my_locale_day_names_uk_UA, NULL }; static TYPELIB my_locale_typelib_ab_day_names_uk_UA = { array_elements(my_locale_ab_day_names_uk_UA)-1, "", my_locale_ab_day_names_uk_UA, NULL }; -MY_LOCALE my_locale_uk_UA= -{ +MY_LOCALE my_locale_uk_UA +( 53, "uk_UA", "Ukrainian - Ukraine", @@ -1611,7 +1610,7 @@ MY_LOCALE my_locale_uk_UA= &my_locale_typelib_ab_month_names_uk_UA, &my_locale_typelib_day_names_uk_UA, &my_locale_typelib_ab_day_names_uk_UA -}; +); /***** LOCALE END uk_UA *****/ /***** LOCALE BEGIN ur_PK: Urdu - Pakistan *****/ @@ -1631,8 +1630,8 @@ static TYPELIB my_locale_typelib_day_names_ur_PK = { array_elements(my_locale_day_names_ur_PK)-1, "", my_locale_day_names_ur_PK, NULL }; static TYPELIB my_locale_typelib_ab_day_names_ur_PK = { array_elements(my_locale_ab_day_names_ur_PK)-1, "", my_locale_ab_day_names_ur_PK, NULL }; -MY_LOCALE my_locale_ur_PK= -{ +MY_LOCALE my_locale_ur_PK +( 54, "ur_PK", "Urdu - Pakistan", @@ -1641,7 +1640,7 @@ MY_LOCALE my_locale_ur_PK= &my_locale_typelib_ab_month_names_ur_PK, &my_locale_typelib_day_names_ur_PK, &my_locale_typelib_ab_day_names_ur_PK -}; +); /***** LOCALE END ur_PK *****/ /***** LOCALE BEGIN vi_VN: Vietnamese - Vietnam *****/ @@ -1661,8 +1660,8 @@ static TYPELIB my_locale_typelib_day_names_vi_VN = { array_elements(my_locale_day_names_vi_VN)-1, "", my_locale_day_names_vi_VN, NULL }; static TYPELIB my_locale_typelib_ab_day_names_vi_VN = { array_elements(my_locale_ab_day_names_vi_VN)-1, "", my_locale_ab_day_names_vi_VN, NULL }; -MY_LOCALE my_locale_vi_VN= -{ +MY_LOCALE my_locale_vi_VN +( 55, "vi_VN", "Vietnamese - Vietnam", @@ -1671,7 +1670,7 @@ MY_LOCALE my_locale_vi_VN= &my_locale_typelib_ab_month_names_vi_VN, &my_locale_typelib_day_names_vi_VN, &my_locale_typelib_ab_day_names_vi_VN -}; +); /***** LOCALE END vi_VN *****/ /***** LOCALE BEGIN zh_CN: Chinese - Peoples Republic of China *****/ @@ -1691,8 +1690,8 @@ static TYPELIB my_locale_typelib_day_names_zh_CN = { array_elements(my_locale_day_names_zh_CN)-1, "", my_locale_day_names_zh_CN, NULL }; static TYPELIB my_locale_typelib_ab_day_names_zh_CN = { array_elements(my_locale_ab_day_names_zh_CN)-1, "", my_locale_ab_day_names_zh_CN, NULL }; -MY_LOCALE my_locale_zh_CN= -{ +MY_LOCALE my_locale_zh_CN +( 56, "zh_CN", "Chinese - Peoples Republic of China", @@ -1701,7 +1700,7 @@ MY_LOCALE my_locale_zh_CN= &my_locale_typelib_ab_month_names_zh_CN, &my_locale_typelib_day_names_zh_CN, &my_locale_typelib_ab_day_names_zh_CN -}; +); /***** LOCALE END zh_CN *****/ /***** LOCALE BEGIN zh_TW: Chinese - Taiwan *****/ @@ -1721,8 +1720,8 @@ static TYPELIB my_locale_typelib_day_names_zh_TW = { array_elements(my_locale_day_names_zh_TW)-1, "", my_locale_day_names_zh_TW, NULL }; static TYPELIB my_locale_typelib_ab_day_names_zh_TW = { array_elements(my_locale_ab_day_names_zh_TW)-1, "", my_locale_ab_day_names_zh_TW, NULL }; -MY_LOCALE my_locale_zh_TW= -{ +MY_LOCALE my_locale_zh_TW +( 57, "zh_TW", "Chinese - Taiwan", @@ -1731,12 +1730,12 @@ MY_LOCALE my_locale_zh_TW= &my_locale_typelib_ab_month_names_zh_TW, &my_locale_typelib_day_names_zh_TW, &my_locale_typelib_ab_day_names_zh_TW -}; +); /***** LOCALE END zh_TW *****/ /***** LOCALE BEGIN ar_DZ: Arabic - Algeria *****/ -MY_LOCALE my_locale_ar_DZ= -{ +MY_LOCALE my_locale_ar_DZ +( 58, "ar_DZ", "Arabic - Algeria", @@ -1745,12 +1744,12 @@ MY_LOCALE my_locale_ar_DZ= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_DZ *****/ /***** LOCALE BEGIN ar_EG: Arabic - Egypt *****/ -MY_LOCALE my_locale_ar_EG= -{ +MY_LOCALE my_locale_ar_EG +( 59, "ar_EG", "Arabic - Egypt", @@ -1759,12 +1758,12 @@ MY_LOCALE my_locale_ar_EG= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_EG *****/ /***** LOCALE BEGIN ar_IN: Arabic - Iran *****/ -MY_LOCALE my_locale_ar_IN= -{ +MY_LOCALE my_locale_ar_IN +( 60, "ar_IN", "Arabic - Iran", @@ -1773,12 +1772,12 @@ MY_LOCALE my_locale_ar_IN= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_IN *****/ /***** LOCALE BEGIN ar_IQ: Arabic - Iraq *****/ -MY_LOCALE my_locale_ar_IQ= -{ +MY_LOCALE my_locale_ar_IQ +( 61, "ar_IQ", "Arabic - Iraq", @@ -1787,12 +1786,12 @@ MY_LOCALE my_locale_ar_IQ= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_IQ *****/ /***** LOCALE BEGIN ar_KW: Arabic - Kuwait *****/ -MY_LOCALE my_locale_ar_KW= -{ +MY_LOCALE my_locale_ar_KW +( 62, "ar_KW", "Arabic - Kuwait", @@ -1801,12 +1800,12 @@ MY_LOCALE my_locale_ar_KW= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_KW *****/ /***** LOCALE BEGIN ar_LB: Arabic - Lebanon *****/ -MY_LOCALE my_locale_ar_LB= -{ +MY_LOCALE my_locale_ar_LB +( 63, "ar_LB", "Arabic - Lebanon", @@ -1815,12 +1814,12 @@ MY_LOCALE my_locale_ar_LB= &my_locale_typelib_ab_month_names_ar_JO, &my_locale_typelib_day_names_ar_JO, &my_locale_typelib_ab_day_names_ar_JO -}; +); /***** LOCALE END ar_LB *****/ /***** LOCALE BEGIN ar_LY: Arabic - Libya *****/ -MY_LOCALE my_locale_ar_LY= -{ +MY_LOCALE my_locale_ar_LY +( 64, "ar_LY", "Arabic - Libya", @@ -1829,12 +1828,12 @@ MY_LOCALE my_locale_ar_LY= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_LY *****/ /***** LOCALE BEGIN ar_MA: Arabic - Morocco *****/ -MY_LOCALE my_locale_ar_MA= -{ +MY_LOCALE my_locale_ar_MA +( 65, "ar_MA", "Arabic - Morocco", @@ -1843,12 +1842,12 @@ MY_LOCALE my_locale_ar_MA= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_MA *****/ /***** LOCALE BEGIN ar_OM: Arabic - Oman *****/ -MY_LOCALE my_locale_ar_OM= -{ +MY_LOCALE my_locale_ar_OM +( 66, "ar_OM", "Arabic - Oman", @@ -1857,12 +1856,12 @@ MY_LOCALE my_locale_ar_OM= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_OM *****/ /***** LOCALE BEGIN ar_QA: Arabic - Qatar *****/ -MY_LOCALE my_locale_ar_QA= -{ +MY_LOCALE my_locale_ar_QA +( 67, "ar_QA", "Arabic - Qatar", @@ -1871,12 +1870,12 @@ MY_LOCALE my_locale_ar_QA= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_QA *****/ /***** LOCALE BEGIN ar_SD: Arabic - Sudan *****/ -MY_LOCALE my_locale_ar_SD= -{ +MY_LOCALE my_locale_ar_SD +( 68, "ar_SD", "Arabic - Sudan", @@ -1885,12 +1884,12 @@ MY_LOCALE my_locale_ar_SD= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_SD *****/ /***** LOCALE BEGIN ar_TN: Arabic - Tunisia *****/ -MY_LOCALE my_locale_ar_TN= -{ +MY_LOCALE my_locale_ar_TN +( 69, "ar_TN", "Arabic - Tunisia", @@ -1899,12 +1898,12 @@ MY_LOCALE my_locale_ar_TN= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_TN *****/ /***** LOCALE BEGIN ar_YE: Arabic - Yemen *****/ -MY_LOCALE my_locale_ar_YE= -{ +MY_LOCALE my_locale_ar_YE +( 70, "ar_YE", "Arabic - Yemen", @@ -1913,12 +1912,12 @@ MY_LOCALE my_locale_ar_YE= &my_locale_typelib_ab_month_names_ar_BH, &my_locale_typelib_day_names_ar_BH, &my_locale_typelib_ab_day_names_ar_BH -}; +); /***** LOCALE END ar_YE *****/ /***** LOCALE BEGIN de_BE: German - Belgium *****/ -MY_LOCALE my_locale_de_BE= -{ +MY_LOCALE my_locale_de_BE +( 71, "de_BE", "German - Belgium", @@ -1927,12 +1926,12 @@ MY_LOCALE my_locale_de_BE= &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE -}; +); /***** LOCALE END de_BE *****/ /***** LOCALE BEGIN de_CH: German - Switzerland *****/ -MY_LOCALE my_locale_de_CH= -{ +MY_LOCALE my_locale_de_CH +( 72, "de_CH", "German - Switzerland", @@ -1941,12 +1940,12 @@ MY_LOCALE my_locale_de_CH= &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE -}; +); /***** LOCALE END de_CH *****/ /***** LOCALE BEGIN de_LU: German - Luxembourg *****/ -MY_LOCALE my_locale_de_LU= -{ +MY_LOCALE my_locale_de_LU +( 73, "de_LU", "German - Luxembourg", @@ -1955,12 +1954,12 @@ MY_LOCALE my_locale_de_LU= &my_locale_typelib_ab_month_names_de_DE, &my_locale_typelib_day_names_de_DE, &my_locale_typelib_ab_day_names_de_DE -}; +); /***** LOCALE END de_LU *****/ /***** LOCALE BEGIN en_AU: English - Australia *****/ -MY_LOCALE my_locale_en_AU= -{ +MY_LOCALE my_locale_en_AU +( 74, "en_AU", "English - Australia", @@ -1969,12 +1968,12 @@ MY_LOCALE my_locale_en_AU= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_AU *****/ /***** LOCALE BEGIN en_CA: English - Canada *****/ -MY_LOCALE my_locale_en_CA= -{ +MY_LOCALE my_locale_en_CA +( 75, "en_CA", "English - Canada", @@ -1983,12 +1982,12 @@ MY_LOCALE my_locale_en_CA= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_CA *****/ /***** LOCALE BEGIN en_GB: English - United Kingdom *****/ -MY_LOCALE my_locale_en_GB= -{ +MY_LOCALE my_locale_en_GB +( 1, "en_GB", "English - United Kingdom", @@ -1997,12 +1996,12 @@ MY_LOCALE my_locale_en_GB= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_GB *****/ /***** LOCALE BEGIN en_IN: English - India *****/ -MY_LOCALE my_locale_en_IN= -{ +MY_LOCALE my_locale_en_IN +( 76, "en_IN", "English - India", @@ -2011,12 +2010,12 @@ MY_LOCALE my_locale_en_IN= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_IN *****/ /***** LOCALE BEGIN en_NZ: English - New Zealand *****/ -MY_LOCALE my_locale_en_NZ= -{ +MY_LOCALE my_locale_en_NZ +( 77, "en_NZ", "English - New Zealand", @@ -2025,12 +2024,12 @@ MY_LOCALE my_locale_en_NZ= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_NZ *****/ /***** LOCALE BEGIN en_PH: English - Philippines *****/ -MY_LOCALE my_locale_en_PH= -{ +MY_LOCALE my_locale_en_PH +( 78, "en_PH", "English - Philippines", @@ -2039,12 +2038,12 @@ MY_LOCALE my_locale_en_PH= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_PH *****/ /***** LOCALE BEGIN en_ZA: English - South Africa *****/ -MY_LOCALE my_locale_en_ZA= -{ +MY_LOCALE my_locale_en_ZA +( 79, "en_ZA", "English - South Africa", @@ -2053,12 +2052,12 @@ MY_LOCALE my_locale_en_ZA= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_ZA *****/ /***** LOCALE BEGIN en_ZW: English - Zimbabwe *****/ -MY_LOCALE my_locale_en_ZW= -{ +MY_LOCALE my_locale_en_ZW +( 80, "en_ZW", "English - Zimbabwe", @@ -2067,12 +2066,12 @@ MY_LOCALE my_locale_en_ZW= &my_locale_typelib_ab_month_names_en_US, &my_locale_typelib_day_names_en_US, &my_locale_typelib_ab_day_names_en_US -}; +); /***** LOCALE END en_ZW *****/ /***** LOCALE BEGIN es_AR: Spanish - Argentina *****/ -MY_LOCALE my_locale_es_AR= -{ +MY_LOCALE my_locale_es_AR +( 81, "es_AR", "Spanish - Argentina", @@ -2081,12 +2080,12 @@ MY_LOCALE my_locale_es_AR= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_AR *****/ /***** LOCALE BEGIN es_BO: Spanish - Bolivia *****/ -MY_LOCALE my_locale_es_BO= -{ +MY_LOCALE my_locale_es_BO +( 82, "es_BO", "Spanish - Bolivia", @@ -2095,12 +2094,12 @@ MY_LOCALE my_locale_es_BO= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_BO *****/ /***** LOCALE BEGIN es_CL: Spanish - Chile *****/ -MY_LOCALE my_locale_es_CL= -{ +MY_LOCALE my_locale_es_CL +( 83, "es_CL", "Spanish - Chile", @@ -2109,12 +2108,12 @@ MY_LOCALE my_locale_es_CL= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_CL *****/ /***** LOCALE BEGIN es_CO: Spanish - Columbia *****/ -MY_LOCALE my_locale_es_CO= -{ +MY_LOCALE my_locale_es_CO +( 84, "es_CO", "Spanish - Columbia", @@ -2123,12 +2122,12 @@ MY_LOCALE my_locale_es_CO= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_CO *****/ /***** LOCALE BEGIN es_CR: Spanish - Costa Rica *****/ -MY_LOCALE my_locale_es_CR= -{ +MY_LOCALE my_locale_es_CR +( 85, "es_CR", "Spanish - Costa Rica", @@ -2137,12 +2136,12 @@ MY_LOCALE my_locale_es_CR= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_CR *****/ /***** LOCALE BEGIN es_DO: Spanish - Dominican Republic *****/ -MY_LOCALE my_locale_es_DO= -{ +MY_LOCALE my_locale_es_DO +( 86, "es_DO", "Spanish - Dominican Republic", @@ -2151,12 +2150,12 @@ MY_LOCALE my_locale_es_DO= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_DO *****/ /***** LOCALE BEGIN es_EC: Spanish - Ecuador *****/ -MY_LOCALE my_locale_es_EC= -{ +MY_LOCALE my_locale_es_EC +( 87, "es_EC", "Spanish - Ecuador", @@ -2165,12 +2164,12 @@ MY_LOCALE my_locale_es_EC= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_EC *****/ /***** LOCALE BEGIN es_GT: Spanish - Guatemala *****/ -MY_LOCALE my_locale_es_GT= -{ +MY_LOCALE my_locale_es_GT +( 88, "es_GT", "Spanish - Guatemala", @@ -2179,12 +2178,12 @@ MY_LOCALE my_locale_es_GT= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_GT *****/ /***** LOCALE BEGIN es_HN: Spanish - Honduras *****/ -MY_LOCALE my_locale_es_HN= -{ +MY_LOCALE my_locale_es_HN +( 89, "es_HN", "Spanish - Honduras", @@ -2193,12 +2192,12 @@ MY_LOCALE my_locale_es_HN= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_HN *****/ /***** LOCALE BEGIN es_MX: Spanish - Mexico *****/ -MY_LOCALE my_locale_es_MX= -{ +MY_LOCALE my_locale_es_MX +( 90, "es_MX", "Spanish - Mexico", @@ -2207,12 +2206,12 @@ MY_LOCALE my_locale_es_MX= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_MX *****/ /***** LOCALE BEGIN es_NI: Spanish - Nicaragua *****/ -MY_LOCALE my_locale_es_NI= -{ +MY_LOCALE my_locale_es_NI +( 91, "es_NI", "Spanish - Nicaragua", @@ -2221,12 +2220,12 @@ MY_LOCALE my_locale_es_NI= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_NI *****/ /***** LOCALE BEGIN es_PA: Spanish - Panama *****/ -MY_LOCALE my_locale_es_PA= -{ +MY_LOCALE my_locale_es_PA +( 92, "es_PA", "Spanish - Panama", @@ -2235,12 +2234,12 @@ MY_LOCALE my_locale_es_PA= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_PA *****/ /***** LOCALE BEGIN es_PE: Spanish - Peru *****/ -MY_LOCALE my_locale_es_PE= -{ +MY_LOCALE my_locale_es_PE +( 93, "es_PE", "Spanish - Peru", @@ -2249,12 +2248,12 @@ MY_LOCALE my_locale_es_PE= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_PE *****/ /***** LOCALE BEGIN es_PR: Spanish - Puerto Rico *****/ -MY_LOCALE my_locale_es_PR= -{ +MY_LOCALE my_locale_es_PR +( 94, "es_PR", "Spanish - Puerto Rico", @@ -2263,12 +2262,12 @@ MY_LOCALE my_locale_es_PR= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_PR *****/ /***** LOCALE BEGIN es_PY: Spanish - Paraguay *****/ -MY_LOCALE my_locale_es_PY= -{ +MY_LOCALE my_locale_es_PY +( 95, "es_PY", "Spanish - Paraguay", @@ -2277,12 +2276,12 @@ MY_LOCALE my_locale_es_PY= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_PY *****/ /***** LOCALE BEGIN es_SV: Spanish - El Salvador *****/ -MY_LOCALE my_locale_es_SV= -{ +MY_LOCALE my_locale_es_SV +( 96, "es_SV", "Spanish - El Salvador", @@ -2291,12 +2290,12 @@ MY_LOCALE my_locale_es_SV= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_SV *****/ /***** LOCALE BEGIN es_US: Spanish - United States *****/ -MY_LOCALE my_locale_es_US= -{ +MY_LOCALE my_locale_es_US +( 97, "es_US", "Spanish - United States", @@ -2305,12 +2304,12 @@ MY_LOCALE my_locale_es_US= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_US *****/ /***** LOCALE BEGIN es_UY: Spanish - Uruguay *****/ -MY_LOCALE my_locale_es_UY= -{ +MY_LOCALE my_locale_es_UY +( 98, "es_UY", "Spanish - Uruguay", @@ -2319,12 +2318,12 @@ MY_LOCALE my_locale_es_UY= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_UY *****/ /***** LOCALE BEGIN es_VE: Spanish - Venezuela *****/ -MY_LOCALE my_locale_es_VE= -{ +MY_LOCALE my_locale_es_VE +( 99, "es_VE", "Spanish - Venezuela", @@ -2333,12 +2332,12 @@ MY_LOCALE my_locale_es_VE= &my_locale_typelib_ab_month_names_es_ES, &my_locale_typelib_day_names_es_ES, &my_locale_typelib_ab_day_names_es_ES -}; +); /***** LOCALE END es_VE *****/ /***** LOCALE BEGIN fr_BE: French - Belgium *****/ -MY_LOCALE my_locale_fr_BE= -{ +MY_LOCALE my_locale_fr_BE +( 100, "fr_BE", "French - Belgium", @@ -2347,12 +2346,12 @@ MY_LOCALE my_locale_fr_BE= &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR -}; +); /***** LOCALE END fr_BE *****/ /***** LOCALE BEGIN fr_CA: French - Canada *****/ -MY_LOCALE my_locale_fr_CA= -{ +MY_LOCALE my_locale_fr_CA +( 101, "fr_CA", "French - Canada", @@ -2361,12 +2360,12 @@ MY_LOCALE my_locale_fr_CA= &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR -}; +); /***** LOCALE END fr_CA *****/ /***** LOCALE BEGIN fr_CH: French - Switzerland *****/ -MY_LOCALE my_locale_fr_CH= -{ +MY_LOCALE my_locale_fr_CH +( 102, "fr_CH", "French - Switzerland", @@ -2375,12 +2374,12 @@ MY_LOCALE my_locale_fr_CH= &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR -}; +); /***** LOCALE END fr_CH *****/ /***** LOCALE BEGIN fr_LU: French - Luxembourg *****/ -MY_LOCALE my_locale_fr_LU= -{ +MY_LOCALE my_locale_fr_LU +( 103, "fr_LU", "French - Luxembourg", @@ -2389,12 +2388,12 @@ MY_LOCALE my_locale_fr_LU= &my_locale_typelib_ab_month_names_fr_FR, &my_locale_typelib_day_names_fr_FR, &my_locale_typelib_ab_day_names_fr_FR -}; +); /***** LOCALE END fr_LU *****/ /***** LOCALE BEGIN it_IT: Italian - Italy *****/ -MY_LOCALE my_locale_it_IT= -{ +MY_LOCALE my_locale_it_IT +( 104, "it_IT", "Italian - Italy", @@ -2403,12 +2402,12 @@ MY_LOCALE my_locale_it_IT= &my_locale_typelib_ab_month_names_it_CH, &my_locale_typelib_day_names_it_CH, &my_locale_typelib_ab_day_names_it_CH -}; +); /***** LOCALE END it_IT *****/ /***** LOCALE BEGIN nl_BE: Dutch - Belgium *****/ -MY_LOCALE my_locale_nl_BE= -{ +MY_LOCALE my_locale_nl_BE +( 105, "nl_BE", "Dutch - Belgium", @@ -2417,12 +2416,12 @@ MY_LOCALE my_locale_nl_BE= &my_locale_typelib_ab_month_names_nl_NL, &my_locale_typelib_day_names_nl_NL, &my_locale_typelib_ab_day_names_nl_NL -}; +); /***** LOCALE END nl_BE *****/ /***** LOCALE BEGIN no_NO: Norwegian - Norway *****/ -MY_LOCALE my_locale_no_NO= -{ +MY_LOCALE my_locale_no_NO +( 106, "no_NO", "Norwegian - Norway", @@ -2431,12 +2430,12 @@ MY_LOCALE my_locale_no_NO= &my_locale_typelib_ab_month_names_nb_NO, &my_locale_typelib_day_names_nb_NO, &my_locale_typelib_ab_day_names_nb_NO -}; +); /***** LOCALE END no_NO *****/ /***** LOCALE BEGIN sv_FI: Swedish - Finland *****/ -MY_LOCALE my_locale_sv_FI= -{ +MY_LOCALE my_locale_sv_FI +( 107, "sv_FI", "Swedish - Finland", @@ -2445,12 +2444,12 @@ MY_LOCALE my_locale_sv_FI= &my_locale_typelib_ab_month_names_sv_SE, &my_locale_typelib_day_names_sv_SE, &my_locale_typelib_ab_day_names_sv_SE -}; +); /***** LOCALE END sv_FI *****/ /***** LOCALE BEGIN zh_HK: Chinese - Hong Kong SAR *****/ -MY_LOCALE my_locale_zh_HK= -{ +MY_LOCALE my_locale_zh_HK +( 108, "zh_HK", "Chinese - Hong Kong SAR", @@ -2459,7 +2458,7 @@ MY_LOCALE my_locale_zh_HK= &my_locale_typelib_ab_month_names_zh_CN, &my_locale_typelib_day_names_zh_CN, &my_locale_typelib_ab_day_names_zh_CN -}; +); /***** LOCALE END zh_HK *****/ diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc index 0af6a80d4c2..33905bdb913 100644 --- a/sql/sql_manager.cc +++ b/sql/sql_manager.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000, 2002, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -32,7 +31,7 @@ pthread_t manager_thread; pthread_mutex_t LOCK_manager; pthread_cond_t COND_manager; -extern "C" pthread_handler_decl(handle_manager,arg __attribute__((unused))) +pthread_handler_t handle_manager(void *arg __attribute__((unused))) { int error = 0; ulong status; @@ -58,12 +57,14 @@ extern "C" pthread_handler_decl(handle_manager,arg __attribute__((unused))) set_timespec(abstime, flush_time); reset_flush_time = FALSE; } - while (!manager_status && !error && !abort_loop) - error = pthread_cond_timedwait(&COND_manager, &LOCK_manager, &abstime); + while (!manager_status && (!error || error == EINTR) && !abort_loop) + error= pthread_cond_timedwait(&COND_manager, &LOCK_manager, &abstime); } else - while (!manager_status && !error && !abort_loop) - error = pthread_cond_wait(&COND_manager, &LOCK_manager); + { + while (!manager_status && (!error || error == EINTR) && !abort_loop) + error= pthread_cond_wait(&COND_manager, &LOCK_manager); + } status = manager_status; manager_status = 0; pthread_mutex_unlock(&LOCK_manager); @@ -71,7 +72,7 @@ extern "C" pthread_handler_decl(handle_manager,arg __attribute__((unused))) if (abort_loop) break; - if (error) /* == ETIMEDOUT */ + if (error == ETIMEDOUT || error == ETIME) { flush_tables(); error = 0; diff --git a/sql/sql_manager.h b/sql/sql_manager.h index 35704705820..7ba1e9c0de2 100644 --- a/sql/sql_manager.h +++ b/sql/sql_manager.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/sql_map.cc b/sql/sql_map.cc index aac44949d89..03dc091b9b7 100644 --- a/sql/sql_map.cc +++ b/sql/sql_map.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2004-2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -20,9 +19,9 @@ #endif #include "mysql_priv.h" -#ifdef HAVE_MMAP -#include <sys/mman.h> #include <sys/stat.h> +#ifdef HAVE_SYS_MMAN_H +#include <sys/mman.h> #endif #ifndef MAP_NORESERVE @@ -42,19 +41,18 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt struct stat stat_buf; if (!fstat(file,&stat_buf)) { - if (!(map=(byte*) mmap(0,(size=(ulong) stat_buf.st_size),PROT_READ, + if (!(map=(byte*) my_mmap(0,(size=(ulong) stat_buf.st_size),PROT_READ, MAP_SHARED | MAP_NORESERVE,file, 0L))) { error=errno; - my_printf_error(0,"Can't map file: %s, errno: %d",MYF(0), - (my_string) name,error); + my_error(ER_NO_FILE_MAPPING, MYF(0), (my_string) name, error); } } if (map && memcmp(map,magic,magic_length)) { - my_printf_error(0,"Wrong magic in %s",MYF(0),name); - VOID(munmap(map,size)); + my_error(ER_WRONG_MAGIC, MYF(0), name); + VOID(my_munmap(map,size)); map=0; } if (!map) @@ -72,7 +70,7 @@ mapped_files::~mapped_files() #ifdef HAVE_MMAP if (file >= 0) { - VOID(munmap((caddr_t) map,size)); + VOID(my_munmap(map,size)); VOID(my_close(file,MYF(0))); file= -1; map=0; } @@ -112,8 +110,7 @@ mapped_files *map_file(const my_string name,byte *magic,uint magic_length) { map->use_count++; if (!map->map) - my_printf_error(0,"Can't map file: %s, error: %d",MYF(0),path, - map->error); + my_error(ER_NO_FILE_MAPPING, MYF(0), path, map->error); } VOID(pthread_mutex_unlock(&LOCK_mapped_file)); return map; @@ -140,7 +137,7 @@ void unmap_file(mapped_files *map) ** Instansiate templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION /* Used templates */ template class I_List<mapped_files>; template class I_List_iterator<mapped_files>; diff --git a/sql/sql_map.h b/sql/sql_map.h index bfa6011ac54..d8eb64995aa 100644 --- a/sql/sql_map.h +++ b/sql/sql_map.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2005 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc index 024abb6c74b..2749b0d1ec6 100644 --- a/sql/sql_olap.cc +++ b/sql/sql_olap.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -77,7 +76,8 @@ static int make_new_olap_select(LEX *lex, SELECT_LEX *select_lex, List<Item> new { not_found= 0; ((Item_field*)new_item)->db_name=iif->db_name; - Item_field *new_one=new Item_field(iif->db_name, iif->table_name, iif->field_name); + Item_field *new_one=new Item_field(&select_lex->context, + iif->db_name, iif->table_name, iif->field_name); privlist.push_back(new_one); if (add_to_list(new_select->group_list,new_one,1)) return 1; @@ -152,11 +152,11 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex) List<Item> all_fields(select_lex->item_list); - if (setup_tables((TABLE_LIST *)select_lex->table_list.first) || - setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, - select_lex->item_list, 1, &all_fields,1) || - setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, - item_list_copy, 1, &all_fields, 1)) + if (setup_tables(lex->thd, &select_lex->context, &select_lex->top_join_list, + (TABLE_LIST *)select_lex->table_list.first + &select_lex->where, &select_lex->leaf_tables, FALSE) || + setup_fields(lex->thd, 0, select_lex->item_list, 1, &all_fields,1) || + setup_fields(lex->thd, 0, item_list_copy, 1, &all_fields, 1)) return -1; if (select_lex->olap == CUBE_TYPE) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 66b68cfc2f1..1b8bfd38fc4 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -14,6 +13,7 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#define MYSQL_LEX 1 #include "mysql_priv.h" #include "sql_repl.h" #include "repl_failsafe.h" @@ -29,6 +29,11 @@ #include "ha_ndbcluster.h" #endif +#include "sp_head.h" +#include "sp.h" +#include "sp_cache.h" +#include "sql_trigger.h" + #ifdef HAVE_OPENSSL /* Without SSL the handshake consists of one packet. This packet @@ -47,25 +52,29 @@ #define MIN_HANDSHAKE_SIZE 6 #endif /* HAVE_OPENSSL */ +/* Used in error handling only */ +#define SP_TYPE_STRING(LP) \ + ((LP)->sphead->m_type == TYPE_ENUM_FUNCTION ? "FUNCTION" : "PROCEDURE") +#define SP_COM_STRING(LP) \ + ((LP)->sql_command == SQLCOM_CREATE_SPFUNCTION || \ + (LP)->sql_command == SQLCOM_ALTER_FUNCTION || \ + (LP)->sql_command == SQLCOM_SHOW_CREATE_FUNC || \ + (LP)->sql_command == SQLCOM_DROP_FUNCTION ? \ + "FUNCTION" : "PROCEDURE") + #ifdef SOLARIS extern "C" int gethostname(char *name, int namelen); #endif -static void time_out_user_resource_limits(THD *thd, USER_CONN *uc); #ifndef NO_EMBEDDED_ACCESS_CHECKS +static void time_out_user_resource_limits(THD *thd, USER_CONN *uc); static int check_for_max_user_connections(THD *thd, USER_CONN *uc); static void decrease_user_connections(USER_CONN *uc); #endif /* NO_EMBEDDED_ACCESS_CHECKS */ static bool check_db_used(THD *thd,TABLE_LIST *tables); -static bool check_multi_update_lock(THD *thd, TABLE_LIST *tables, - List<Item> *fields, SELECT_LEX *select_lex); static void remove_escape(char *name); -static void refresh_status(void); static bool append_file_to_dir(THD *thd, const char **filename_ptr, const char *table_name); - -static TABLE_LIST* get_table_by_alias(TABLE_LIST* tl, const char* db, - const char* alias); const char *any_db="*any*"; // Special symbol for check_access @@ -75,11 +84,13 @@ const char *command_name[]={ "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user", "Binlog Dump","Table Dump", "Connect Out", "Register Slave", "Prepare", "Execute", "Long Data", "Close stmt", - "Reset stmt", "Set option", + "Reset stmt", "Set option", "Fetch", "Error" // Last command number }; -static char empty_c_string[1]= {0}; // Used for not defined 'db' +const char *xa_state_names[]={ + "NON-EXISTING", "ACTIVE", "IDLE", "PREPARED" +}; #ifdef __WIN__ static void test_signal(int sig_ptr) @@ -105,7 +116,7 @@ static void unlock_locked_tables(THD *thd) if (thd->locked_tables) { thd->lock=thd->locked_tables; - thd->locked_tables=0; // Will be automaticly closed + thd->locked_tables=0; // Will be automatically closed close_thread_tables(thd); // Free tables } } @@ -114,17 +125,60 @@ static void unlock_locked_tables(THD *thd) static bool end_active_trans(THD *thd) { int error=0; + DBUG_ENTER("end_active_trans"); + if (unlikely(thd->in_sub_stmt)) + { + my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0)); + DBUG_RETURN(1); + } + if (thd->transaction.xid_state.xa_state != XA_NOTR) + { + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + DBUG_RETURN(1); + } if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | OPTION_TABLE_LOCK)) { - thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); + DBUG_PRINT("info",("options: 0x%lx", (ulong) thd->options)); + /* Safety if one did "drop table" on locked tables */ + if (!thd->locked_tables) + thd->options&= ~OPTION_TABLE_LOCK; thd->server_status&= ~SERVER_STATUS_IN_TRANS; if (ha_commit(thd)) error=1; + thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); } - return error; + DBUG_RETURN(error); } +static bool begin_trans(THD *thd) +{ + int error=0; + if (unlikely(thd->in_sub_stmt)) + { + my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0)); + return 1; + } + if (thd->locked_tables) + { + thd->lock=thd->locked_tables; + thd->locked_tables=0; // Will be automatically closed + close_thread_tables(thd); // Free tables + } + if (end_active_trans(thd)) + error= -1; + else + { + LEX *lex= thd->lex; + thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) | + OPTION_BEGIN); + thd->server_status|= SERVER_STATUS_IN_TRANS; + if (lex->start_transaction_opt & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT) + error= ha_start_consistent_snapshot(thd); + } + return error; +} #ifdef HAVE_REPLICATION /* @@ -132,11 +186,23 @@ static bool end_active_trans(THD *thd) */ inline bool all_tables_not_ok(THD *thd, TABLE_LIST *tables) { - return (table_rules_on && tables && !tables_ok(thd,tables)); + return table_rules_on && tables && !tables_ok(thd,tables); } #endif +static bool some_non_temp_table_to_be_updated(THD *thd, TABLE_LIST *tables) +{ + for (TABLE_LIST *table= tables; table; table= table->next_global) + { + DBUG_ASSERT(table->db && table->table_name); + if (table->updating && + !find_temporary_table(thd, table->db, table->table_name)) + return 1; + } + return 0; +} + #ifndef NO_EMBEDDED_ACCESS_CHECKS static HASH hash_user_connections; @@ -146,7 +212,7 @@ static int get_or_create_user_conn(THD *thd, const char *user, { int return_val= 0; uint temp_len, user_len; - char temp_user[USERNAME_LENGTH+HOSTNAME_LENGTH+2]; + char temp_user[USER_HOST_BUFF_SIZE]; struct user_conn *uc; DBUG_ASSERT(user != 0); @@ -163,23 +229,21 @@ static int get_or_create_user_conn(THD *thd, const char *user, my_malloc(sizeof(struct user_conn) + temp_len+1, MYF(MY_WME))))) { - send_error(thd, 0, NullS); // Out of memory + net_send_error(thd, 0, NullS); // Out of memory return_val= 1; goto end; } uc->user=(char*) (uc+1); memcpy(uc->user,temp_user,temp_len+1); - uc->user_len= user_len; - uc->host= uc->user + uc->user_len + 1; + uc->host= uc->user + user_len + 1; uc->len= temp_len; - uc->connections= 0; - uc->questions= uc->updates= uc->conn_per_hour=0; + uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0; uc->user_resources= *mqh; uc->intime= thd->thr_create_time; if (my_hash_insert(&hash_user_connections, (byte*) uc)) { my_free((char*) uc,0); - send_error(thd, 0, NullS); // Out of memory + net_send_error(thd, 0, NullS); // Out of memory return_val= 1; goto end; } @@ -195,26 +259,27 @@ end: /* - Check if user exist and password supplied is correct. + Check if user exist and password supplied is correct. + SYNOPSIS check_user() - thd thread handle, thd->{host,user,ip} are used + thd thread handle, thd->security_ctx->{host,user,ip} are used command originator of the check: now check_user is called during connect and change user procedures; used for logging. - passwd scrambled password recieved from client + passwd scrambled password received from client passwd_len length of scrambled password db database name to connect to, may be NULL check_count dont know exactly Note, that host, user and passwd may point to communication buffer. - Current implementation does not depened on that, but future changes + Current implementation does not depend on that, but future changes should be done with this in mind; 'thd' is INOUT, all other params are 'IN'. RETURN VALUE - 0 OK; thd->user, thd->master_access, thd->priv_user, thd->db and - thd->db_access are updated; OK is sent to client; + 0 OK; thd->security_ctx->user/master_access/priv_user/db_access and + thd->db are updated; OK is sent to client; -1 access denied or handshake error; error is sent to client; >0 error, not sent to client */ @@ -226,17 +291,23 @@ int check_user(THD *thd, enum enum_server_command command, DBUG_ENTER("check_user"); #ifdef NO_EMBEDDED_ACCESS_CHECKS - thd->master_access= GLOBAL_ACLS; // Full rights - /* Change database if necessary: OK or FAIL is sent in mysql_change_db */ + thd->main_security_ctx.master_access= GLOBAL_ACLS; // Full rights + /* Change database if necessary */ if (db && db[0]) { - thd->db= 0; - thd->db_length= 0; - if (mysql_change_db(thd, db)) + /* + thd->db is saved in caller and needs to be freed by caller if this + function returns 0 + */ + thd->reset_db(NULL, 0); + if (mysql_change_db(thd, db, FALSE)) + { + /* Send the error to the client */ + net_send_error(thd); DBUG_RETURN(-1); + } } - else - send_ok(thd); + send_ok(thd); DBUG_RETURN(0); #else @@ -251,7 +322,7 @@ int check_user(THD *thd, enum enum_server_command command, */ if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323) { - net_printf(thd, ER_NOT_SUPPORTED_AUTH_MODE); + net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); mysql_log.write(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN(-1); } @@ -262,13 +333,12 @@ int check_user(THD *thd, enum enum_server_command command, /* Clear thd->db as it points to something, that will be freed when - connection is closed. We don't want to accidently free a wrong pointer + connection is closed. We don't want to accidentally free a wrong pointer if connect failed. Also in case of 'CHANGE USER' failure, current database will be switched to 'no database selected'. */ - thd->db= 0; - thd->db_length= 0; - + thd->reset_db(NULL, 0); + USER_RESOURCES ur; int res= acl_getroot(thd, &ur, passwd, passwd_len); #ifndef EMBEDDED_LIBRARY @@ -283,15 +353,18 @@ int check_user(THD *thd, enum enum_server_command command, NET *net= &thd->net; if (opt_secure_auth_local) { - net_printf(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, - thd->user, thd->host_or_ip); + net_printf_error(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip); mysql_log.write(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE), - thd->user, thd->host_or_ip); + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip); DBUG_RETURN(-1); } + /* We have to read very specific packet size */ if (send_old_password_request(thd) || - my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) // We have to read very - { // specific packet size + my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) + { inc_host_errors(&thd->remote.sin_addr); DBUG_RETURN(ER_HANDSHAKE_ERROR); } @@ -303,36 +376,43 @@ int check_user(THD *thd, enum enum_server_command command, /* here res is always >= 0 */ if (res == 0) { - if (!(thd->master_access & NO_ACCESS)) // authentification is OK + if (!(thd->main_security_ctx.master_access & + NO_ACCESS)) // authentication is OK { DBUG_PRINT("info", - ("Capabilities: %d packet_length: %ld Host: '%s' " + ("Capabilities: %lu packet_length: %ld Host: '%s' " "Login user: '%s' Priv_user: '%s' Using password: %s " - "Access: %u db: '%s'", - thd->client_capabilities, thd->max_client_packet_length, - thd->host_or_ip, thd->user, thd->priv_user, + "Access: %lu db: '%s'", + thd->client_capabilities, + thd->max_client_packet_length, + thd->main_security_ctx.host_or_ip, + thd->main_security_ctx.user, + thd->main_security_ctx.priv_user, passwd_len ? "yes": "no", - thd->master_access, thd->db ? thd->db : "*none*")); + thd->main_security_ctx.master_access, + (thd->db ? thd->db : "*none*"))); if (check_count) { VOID(pthread_mutex_lock(&LOCK_thread_count)); - bool count_ok= thread_count < max_connections + delayed_insert_threads - || (thd->master_access & SUPER_ACL); + bool count_ok= thread_count <= max_connections + delayed_insert_threads + || (thd->main_security_ctx.master_access & SUPER_ACL); VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (!count_ok) - { // too many connections - send_error(thd, ER_CON_COUNT_ERROR); + { // too many connections + net_send_error(thd, ER_CON_COUNT_ERROR); DBUG_RETURN(-1); } } /* Why logging is performed before all checks've passed? */ - mysql_log.write(thd,command, - (thd->priv_user == thd->user ? + mysql_log.write(thd, command, + (thd->main_security_ctx.priv_user == + thd->main_security_ctx.user ? (char*) "%s@%s on %s" : (char*) "%s@%s as anonymous on %s"), - thd->user, thd->host_or_ip, + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, db ? db : (char*) ""); /* @@ -340,31 +420,38 @@ int check_user(THD *thd, enum enum_server_command command, set to 0 here because we don't have an active database yet (and we may not have an active database to set. */ - thd->db_access=0; + thd->main_security_ctx.db_access=0; /* Don't allow user to connect if he has done too many queries */ - if ((ur.questions || ur.updates || ur.connections || + if ((ur.questions || ur.updates || ur.conn_per_hour || ur.user_conn || max_user_connections) && - get_or_create_user_conn(thd,thd->user,thd->host_or_ip,&ur)) + get_or_create_user_conn(thd, + (opt_old_style_user_limits ? thd->main_security_ctx.user : + thd->main_security_ctx.priv_user), + (opt_old_style_user_limits ? thd->main_security_ctx.host_or_ip : + thd->main_security_ctx.priv_host), + &ur)) DBUG_RETURN(-1); if (thd->user_connect && - (thd->user_connect->user_resources.connections || + (thd->user_connect->user_resources.conn_per_hour || + thd->user_connect->user_resources.user_conn || max_user_connections) && check_for_max_user_connections(thd, thd->user_connect)) DBUG_RETURN(-1); - /* Change database if necessary: OK or FAIL is sent in mysql_change_db */ + /* Change database if necessary */ if (db && db[0]) { - if (mysql_change_db(thd, db)) + if (mysql_change_db(thd, db, FALSE)) { + /* Send error to the client */ + net_send_error(thd); if (thd->user_connect) decrease_user_connections(thd->user_connect); DBUG_RETURN(-1); } } - else - send_ok(thd); + send_ok(thd); thd->password= test(passwd_len); // remember for error messages /* Ready to handle queries */ DBUG_RETURN(0); @@ -372,17 +459,17 @@ int check_user(THD *thd, enum enum_server_command command, } else if (res == 2) // client gave short hash, server has long hash { - net_printf(thd, ER_NOT_SUPPORTED_AUTH_MODE); + net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); mysql_log.write(thd,COM_CONNECT,ER(ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN(-1); } - net_printf(thd, ER_ACCESS_DENIED_ERROR, - thd->user, - thd->host_or_ip, - passwd_len ? ER(ER_YES) : ER(ER_NO)); + net_printf_error(thd, ER_ACCESS_DENIED_ERROR, + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, + passwd_len ? ER(ER_YES) : ER(ER_NO)); mysql_log.write(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR), - thd->user, - thd->host_or_ip, + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, passwd_len ? ER(ER_YES) : ER(ER_NO)); DBUG_RETURN(-1); #endif /* NO_EMBEDDED_ACCESS_CHECKS */ @@ -441,20 +528,29 @@ static int check_for_max_user_connections(THD *thd, USER_CONN *uc) DBUG_ENTER("check_for_max_user_connections"); (void) pthread_mutex_lock(&LOCK_user_conn); - if (max_user_connections && + if (max_user_connections && !uc->user_resources.user_conn && max_user_connections < (uint) uc->connections) { - net_printf(thd,ER_TOO_MANY_USER_CONNECTIONS, uc->user); + net_printf_error(thd, ER_TOO_MANY_USER_CONNECTIONS, uc->user); error=1; goto end; } time_out_user_resource_limits(thd, uc); - if (uc->user_resources.connections && - uc->user_resources.connections <= uc->conn_per_hour) + if (uc->user_resources.user_conn && + uc->user_resources.user_conn < uc->connections) { - net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, - "max_connections_per_hour", - (long) uc->user_resources.connections); + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, + "max_user_connections", + (long) uc->user_resources.user_conn); + error= 1; + goto end; + } + if (uc->user_resources.conn_per_hour && + uc->user_resources.conn_per_hour <= uc->conn_per_hour) + { + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, + "max_connections_per_hour", + (long) uc->user_resources.conn_per_hour); error=1; goto end; } @@ -517,6 +613,12 @@ void free_max_user_conn(void) sql_command is actually set to SQLCOM_END sometimes so we need the +1 to include it in the array. + + numbers are: + 0 - read-only query + != 0 - query that may change a table + 2 - query that returns meaningful ROW_COUNT() - + a number of modified rows */ char uc_update_queries[SQLCOM_END+1]; @@ -528,29 +630,31 @@ void init_update_queries(void) uc_update_queries[SQLCOM_CREATE_TABLE]=1; uc_update_queries[SQLCOM_CREATE_INDEX]=1; uc_update_queries[SQLCOM_ALTER_TABLE]=1; - uc_update_queries[SQLCOM_UPDATE]=1; - uc_update_queries[SQLCOM_INSERT]=1; - uc_update_queries[SQLCOM_INSERT_SELECT]=1; - uc_update_queries[SQLCOM_DELETE]=1; + uc_update_queries[SQLCOM_UPDATE]=2; + uc_update_queries[SQLCOM_UPDATE_MULTI]=2; + uc_update_queries[SQLCOM_INSERT]=2; + uc_update_queries[SQLCOM_INSERT_SELECT]=2; + uc_update_queries[SQLCOM_DELETE]=2; + uc_update_queries[SQLCOM_DELETE_MULTI]=2; uc_update_queries[SQLCOM_TRUNCATE]=1; uc_update_queries[SQLCOM_DROP_TABLE]=1; uc_update_queries[SQLCOM_LOAD]=1; uc_update_queries[SQLCOM_CREATE_DB]=1; uc_update_queries[SQLCOM_DROP_DB]=1; - uc_update_queries[SQLCOM_REPLACE]=1; - uc_update_queries[SQLCOM_REPLACE_SELECT]=1; + uc_update_queries[SQLCOM_REPLACE]=2; + uc_update_queries[SQLCOM_REPLACE_SELECT]=2; uc_update_queries[SQLCOM_RENAME_TABLE]=1; uc_update_queries[SQLCOM_BACKUP_TABLE]=1; uc_update_queries[SQLCOM_RESTORE_TABLE]=1; - uc_update_queries[SQLCOM_DELETE_MULTI]=1; uc_update_queries[SQLCOM_DROP_INDEX]=1; - uc_update_queries[SQLCOM_UPDATE_MULTI]=1; + uc_update_queries[SQLCOM_CREATE_VIEW]=1; + uc_update_queries[SQLCOM_DROP_VIEW]=1; } bool is_update_query(enum enum_sql_command command) { DBUG_ASSERT(command >= 0 && command <= SQLCOM_END); - return uc_update_queries[command]; + return uc_update_queries[command] != 0; } /* @@ -567,6 +671,8 @@ bool is_update_query(enum enum_sql_command command) safe to test and modify members of the USER_CONN structure. */ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + static void time_out_user_resource_limits(THD *thd, USER_CONN *uc) { time_t check_time = thd->start_time ? thd->start_time : time(NULL); @@ -584,7 +690,6 @@ static void time_out_user_resource_limits(THD *thd, USER_CONN *uc) DBUG_VOID_RETURN; } - /* Check if maximum queries per hour limit has been reached returns 0 if OK. @@ -592,7 +697,6 @@ static void time_out_user_resource_limits(THD *thd, USER_CONN *uc) static bool check_mqh(THD *thd, uint check_command) { -#ifndef NO_EMBEDDED_ACCESS_CHECKS bool error= 0; USER_CONN *uc=thd->user_connect; DBUG_ENTER("check_mqh"); @@ -606,8 +710,8 @@ static bool check_mqh(THD *thd, uint check_command) if (uc->user_resources.questions && uc->questions++ >= uc->user_resources.questions) { - net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, "max_questions", - (long) uc->user_resources.questions); + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_questions", + (long) uc->user_resources.questions); error=1; goto end; } @@ -617,8 +721,8 @@ static bool check_mqh(THD *thd, uint check_command) if (uc->user_resources.updates && uc_update_queries[check_command] && uc->updates++ >= uc->user_resources.updates) { - net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates", - (long) uc->user_resources.updates); + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates", + (long) uc->user_resources.updates); error=1; goto end; } @@ -626,13 +730,12 @@ static bool check_mqh(THD *thd, uint check_command) end: (void) pthread_mutex_unlock(&LOCK_user_conn); DBUG_RETURN(error); -#else - return (0); -#endif /* NO_EMBEDDED_ACCESS_CHECKS */ } +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + -static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) +static void reset_mqh(LEX_USER *lu, bool get_them= 0) { #ifndef NO_EMBEDDED_ACCESS_CHECKS (void) pthread_mutex_lock(&LOCK_user_conn); @@ -640,7 +743,7 @@ static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) { USER_CONN *uc; uint temp_len=lu->user.length+lu->host.length+2; - char temp_user[USERNAME_LENGTH+HOSTNAME_LENGTH+2]; + char temp_user[USER_HOST_BUFF_SIZE]; memcpy(temp_user,lu->user.str,lu->user.length); memcpy(temp_user+lu->user.length+1,lu->host.str,lu->host.length); @@ -654,8 +757,9 @@ static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) uc->conn_per_hour=0; } } - else // for FLUSH PRIVILEGES and FLUSH USER_RESOURCES + else { + /* for FLUSH PRIVILEGES and FLUSH USER_RESOURCES */ for (uint idx=0;idx < hash_user_connections.records; idx++) { USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections, @@ -728,41 +832,45 @@ static int check_connection(THD *thd) thd->set_active_vio(net->vio); #endif - if (!thd->host) // If TCP/IP connection + if (!thd->main_security_ctx.host) // If TCP/IP connection { char ip[30]; if (vio_peer_addr(net->vio, ip, &thd->peer_port)) return (ER_BAD_HOST_ERROR); - if (!(thd->ip= my_strdup(ip,MYF(0)))) + if (!(thd->main_security_ctx.ip= my_strdup(ip,MYF(0)))) return (ER_OUT_OF_RESOURCES); - thd->host_or_ip= thd->ip; + thd->main_security_ctx.host_or_ip= thd->main_security_ctx.ip; vio_in_addr(net->vio,&thd->remote.sin_addr); if (!(specialflag & SPECIAL_NO_RESOLVE)) { vio_in_addr(net->vio,&thd->remote.sin_addr); - thd->host=ip_to_hostname(&thd->remote.sin_addr,&connect_errors); + thd->main_security_ctx.host= + ip_to_hostname(&thd->remote.sin_addr, &connect_errors); /* Cut very long hostnames to avoid possible overflows */ - if (thd->host) + if (thd->main_security_ctx.host) { - if (thd->host != my_localhost) - thd->host[min(strlen(thd->host), HOSTNAME_LENGTH)]= 0; - thd->host_or_ip= thd->host; + if (thd->main_security_ctx.host != my_localhost) + thd->main_security_ctx.host[min(strlen(thd->main_security_ctx.host), + HOSTNAME_LENGTH)]= 0; + thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host; } if (connect_errors > max_connect_errors) return(ER_HOST_IS_BLOCKED); } DBUG_PRINT("info",("Host: %s ip: %s", - thd->host ? thd->host : "unknown host", - thd->ip ? thd->ip : "unknown ip")); - if (acl_check_host(thd->host,thd->ip)) + (thd->main_security_ctx.host ? + thd->main_security_ctx.host : "unknown host"), + (thd->main_security_ctx.ip ? + thd->main_security_ctx.ip : "unknown ip"))); + if (acl_check_host(thd->main_security_ctx.host, thd->main_security_ctx.ip)) return(ER_HOST_NOT_PRIVILEGED); } else /* Hostname given means that the connection was on a socket */ { - DBUG_PRINT("info",("Host: %s",thd->host)); - thd->host_or_ip= thd->host; - thd->ip= 0; + DBUG_PRINT("info",("Host: %s", thd->main_security_ctx.host)); + thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host; + thd->main_security_ctx.ip= 0; /* Reset sin_addr */ bzero((char*) &thd->remote, sizeof(thd->remote)); } @@ -780,7 +888,7 @@ static int check_connection(THD *thd) #endif /* HAVE_COMPRESS */ #ifdef HAVE_OPENSSL if (ssl_acceptor_fd) - client_flags |= CLIENT_SSL; /* Wow, SSL is avalaible! */ + client_flags |= CLIENT_SSL; /* Wow, SSL is available! */ #endif /* HAVE_OPENSSL */ end= strnmov(buff, server_version, SERVER_VERSION_LENGTH) + 1; @@ -828,17 +936,6 @@ static int check_connection(THD *thd) return(ER_OUT_OF_RESOURCES); thd->client_capabilities=uint2korr(net->read_pos); -#ifdef TO_BE_REMOVED_IN_4_1_RELEASE - /* - This is just a safety check against any client that would use the old - CLIENT_CHANGE_USER flag - */ - if ((thd->client_capabilities & CLIENT_PROTOCOL_41) && - !(thd->client_capabilities & (CLIENT_RESERVED | - CLIENT_SECURE_CONNECTION | - CLIENT_MULTI_RESULTS))) - thd->client_capabilities&= ~CLIENT_PROTOCOL_41; -#endif if (thd->client_capabilities & CLIENT_PROTOCOL_41) { thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; @@ -857,7 +954,7 @@ static int check_connection(THD *thd) if (thd->client_capabilities & CLIENT_IGNORE_SPACE) thd->variables.sql_mode|= MODE_IGNORE_SPACE; #ifdef HAVE_OPENSSL - DBUG_PRINT("info", ("client capabilities: %d", thd->client_capabilities)); + DBUG_PRINT("info", ("client capabilities: %lu", thd->client_capabilities)); if (thd->client_capabilities & CLIENT_SSL) { /* Do the SSL layering. */ @@ -869,8 +966,7 @@ static int check_connection(THD *thd) DBUG_PRINT("info", ("IO layer change in progress...")); if (sslaccept(ssl_acceptor_fd, net->vio, net->read_timeout)) { - DBUG_PRINT("error", ("Failed to read user information (pkt_len= %lu)", - pkt_len)); + DBUG_PRINT("error", ("Failed to accept new SSL connection")); inc_host_errors(&thd->remote.sin_addr); return(ER_HANDSHAKE_ERROR); } @@ -900,6 +996,7 @@ static int check_connection(THD *thd) char *user= end; char *passwd= strend(user)+1; + uint user_len= passwd - user - 1; char *db= passwd; char db_buff[NAME_LEN + 1]; // buffer to store db in utf8 char user_buff[USERNAME_LENGTH + 1]; // buffer to store user in utf8 @@ -932,14 +1029,22 @@ static int check_connection(THD *thd) db= db_buff; } - user_buff[copy_and_convert(user_buff, sizeof(user_buff)-1, - system_charset_info, user, strlen(user), - thd->charset(), &dummy_errors)]= '\0'; + user_buff[user_len= copy_and_convert(user_buff, sizeof(user_buff)-1, + system_charset_info, user, user_len, + thd->charset(), &dummy_errors)]= '\0'; user= user_buff; - if (thd->user) - x_free(thd->user); - if (!(thd->user= my_strdup(user, MYF(0)))) + /* If username starts and ends in "'", chop them off */ + if (user_len > 1 && user[0] == '\'' && user[user_len - 1] == '\'') + { + user[user_len-1]= 0; + user++; + user_len-= 2; + } + + if (thd->main_security_ctx.user) + x_free(thd->main_security_ctx.user); + if (!(thd->main_security_ctx.user= my_strdup(user, MYF(0)))) return (ER_OUT_OF_RESOURCES); return check_user(thd, COM_CONNECT, passwd, passwd_len, db, TRUE); } @@ -968,6 +1073,7 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var, */ save_vio= thd->net.vio; thd->net.vio= 0; + thd->net.no_send_error= 0; dispatch_command(COM_QUERY, thd, thd->query, thd->query_length+1); rw_unlock(var_mutex); thd->client_capabilities= save_client_capabilities; @@ -975,7 +1081,7 @@ void execute_init_command(THD *thd, sys_var_str *init_command_var, } -pthread_handler_decl(handle_one_connection,arg) +pthread_handler_t handle_one_connection(void *arg) { THD *thd=(THD*) arg; uint launch_time = @@ -986,7 +1092,7 @@ pthread_handler_decl(handle_one_connection,arg) pthread_detach_this_thread(); #if !defined( __WIN__) && !defined(OS2) // Win32 calls this in pthread_create - // The following calls needs to be done before we call DBUG_ macros + /* The following calls needs to be done before we call DBUG_ macros */ if (!(test_flags & TEST_NO_THREADS) & my_thread_init()) { close_connection(thd, ER_OUT_OF_RESOURCES, 1); @@ -1003,17 +1109,18 @@ pthread_handler_decl(handle_one_connection,arg) of handle_one_connection, which is thd. We need to know the start of the stack so that we could check for stack overruns. */ - DBUG_PRINT("info", ("handle_one_connection called by thread %d\n", + DBUG_PRINT("info", ("handle_one_connection called by thread %lu\n", thd->thread_id)); - // now that we've called my_thread_init(), it is safe to call DBUG_* + /* now that we've called my_thread_init(), it is safe to call DBUG_* */ #if defined(__WIN__) - init_signals(); // IRENA; testing ? + init_signals(); #elif !defined(OS2) && !defined(__NETWARE__) sigset_t set; VOID(sigemptyset(&set)); // Get mask in use VOID(pthread_sigmask(SIG_UNBLOCK,&set,&thd->block_signals)); #endif + thd->thread_stack= (char*) &thd; if (thd->store_globals()) { close_connection(thd, ER_OUT_OF_RESOURCES, 1); @@ -1026,7 +1133,8 @@ pthread_handler_decl(handle_one_connection,arg) { int error; NET *net= &thd->net; - thd->thread_stack= (char*) &thd; + Security_context *sctx= thd->security_ctx; + net->no_send_error= 0; /* Use "connect_timeout" value during connection phase */ net_set_read_timeout(net, connect_timeout); @@ -1035,7 +1143,7 @@ pthread_handler_decl(handle_one_connection,arg) if ((error=check_connection(thd))) { // Wrong permissions if (error > 0) - net_printf(thd,error,thd->host_or_ip); + net_printf_error(thd, error, sctx->host_or_ip); #ifdef __NT__ if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE) my_sleep(1000); /* must wait after eof() */ @@ -1044,7 +1152,7 @@ pthread_handler_decl(handle_one_connection,arg) goto end_thread; } #ifdef __NETWARE__ - netware_reg_user(thd->ip, thd->user, "MySQL"); + netware_reg_user(sctx->ip, sctx->user, "MySQL"); #endif if (thd->variables.max_join_size == HA_POS_ERROR) thd->options |= OPTION_BIG_SELECTS; @@ -1053,37 +1161,48 @@ pthread_handler_decl(handle_one_connection,arg) thd->version= refresh_version; thd->proc_info= 0; - thd->set_time(); + thd->command= COM_SLEEP; thd->init_for_queries(); - if (sys_init_connect.value_length && !(thd->master_access & SUPER_ACL)) + + if (sys_init_connect.value_length && !(sctx->master_access & SUPER_ACL)) { execute_init_command(thd, &sys_init_connect, &LOCK_sys_init_connect); if (thd->query_error) - thd->killed= 1; + { + thd->killed= THD::KILL_CONNECTION; + sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), + thd->thread_id,(thd->db ? thd->db : "unconnected"), + sctx->user ? sctx->user : "unauthenticated", + sctx->host_or_ip, "init_connect command failed"); + sql_print_warning("%s", net->last_error); + } + thd->proc_info=0; + thd->init_for_queries(); } /* Connect completed, set read/write timeouts back to tdefault */ net_set_read_timeout(net, thd->variables.net_read_timeout); net_set_write_timeout(net, thd->variables.net_write_timeout); - while (!net->error && net->vio != 0 && !thd->killed) + while (!net->error && net->vio != 0 && + !(thd->killed == THD::KILL_CONNECTION)) { + net->no_send_error= 0; if (do_command(thd)) break; } if (thd->user_connect) decrease_user_connections(thd->user_connect); - free_root(thd->mem_root,MYF(0)); if (net->error && net->vio != 0 && net->report_error) { if (!thd->killed && thd->variables.log_warnings > 1) - sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), + sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), thd->thread_id,(thd->db ? thd->db : "unconnected"), - thd->user ? thd->user : "unauthenticated", - thd->host_or_ip, + sctx->user ? sctx->user : "unauthenticated", + sctx->host_or_ip, (net->last_errno ? ER(net->last_errno) : ER(ER_UNKNOWN_ERROR))); - send_error(thd,net->last_errno,NullS); + net_send_error(thd, net->last_errno, NullS); statistic_increment(aborted_threads,&LOCK_status); } else if (thd->killed) @@ -1099,6 +1218,7 @@ end_thread: or this thread has been schedule to handle the next query */ thd= current_thd; + thd->thread_stack= (char*) &thd; } while (!(test_flags & TEST_NO_THREADS)); /* The following is only executed if we are not using --one-thread */ return(0); /* purecov: deadcode */ @@ -1111,13 +1231,14 @@ end_thread: Used when creating the initial grant tables */ -extern "C" pthread_handler_decl(handle_bootstrap,arg) +pthread_handler_t handle_bootstrap(void *arg) { THD *thd=(THD*) arg; FILE *file=bootstrap_file; char *buff; /* The following must be called before DBUG_ENTER */ + thd->thread_stack= (char*) &thd; if (my_thread_init() || thd->store_globals()) { #ifndef EMBEDDED_LIBRARY @@ -1143,57 +1264,82 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg) thd->proc_info=0; thd->version=refresh_version; - thd->priv_user=thd->user=(char*) my_strdup("boot", MYF(MY_WME)); + thd->security_ctx->priv_user= + thd->security_ctx->user= (char*) my_strdup("boot", MYF(MY_WME)); + /* + Make the "client" handle multiple results. This is necessary + to enable stored procedures with SELECTs and Dynamic SQL + in init-file. + */ + thd->client_capabilities|= CLIENT_MULTI_RESULTS; buff= (char*) thd->net.buff; thd->init_for_queries(); while (fgets(buff, thd->net.max_packet, file)) { - ulong length= (ulong) strlen(buff); - while (buff[length-1] != '\n' && !feof(file)) - { - /* - We got only a part of the current string. Will try to increase - net buffer then read the rest of the current string. - */ - if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) - { - send_error(thd, thd->net.last_errno, NullS); - thd->is_fatal_error= 1; - break; - } - buff= (char*) thd->net.buff; - fgets(buff + length, thd->net.max_packet - length, file); - length+= (ulong) strlen(buff + length); - } - if (thd->is_fatal_error) - break; + ulong length= (ulong) strlen(buff); + while (buff[length-1] != '\n' && !feof(file)) + { + /* + We got only a part of the current string. Will try to increase + net buffer then read the rest of the current string. + */ + if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) + { + net_send_error(thd, ER_NET_PACKET_TOO_LARGE, NullS); + thd->fatal_error(); + break; + } + buff= (char*) thd->net.buff; + fgets(buff + length, thd->net.max_packet - length, file); + length+= (ulong) strlen(buff + length); + } + if (thd->is_fatal_error) + break; + while (length && (my_isspace(thd->charset(), buff[length-1]) || buff[length-1] == ';')) length--; buff[length]=0; thd->query_length=length; - thd->query= thd->memdup_w_gap(buff, length+1, thd->db_length+1); + thd->query= thd->memdup_w_gap(buff, length+1, + thd->db_length+1+QUERY_CACHE_FLAGS_SIZE); thd->query[length] = '\0'; - thd->query_id=query_id++; - if (mqh_used && thd->user_connect && check_mqh(thd, SQLCOM_END)) - { - thd->net.error = 0; - close_thread_tables(thd); // Free tables - free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); - break; - } + DBUG_PRINT("query",("%-.4096s",thd->query)); + /* + We don't need to obtain LOCK_thread_count here because in bootstrap + mode we have only one thread. + */ + thd->query_id=next_query_id(); thd->set_time(); mysql_parse(thd,thd->query,length); close_thread_tables(thd); // Free tables + if (thd->is_fatal_error) break; + + if (thd->net.report_error) + { + /* The query failed, send error to log and abort bootstrap */ + net_send_error(thd); + thd->fatal_error(); + break; + } + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); +#ifdef USING_TRANSACTIONS free_root(&thd->transaction.mem_root,MYF(MY_KEEP_PREALLOC)); +#endif } - /* thd->fatal_error should be set in case something went wrong */ end: + /* Remember the exit code of bootstrap */ + bootstrap_error= thd->is_fatal_error; + + net_end(&thd->net); + thd->cleanup(); + delete thd; + #ifndef EMBEDDED_LIBRARY (void) pthread_mutex_lock(&LOCK_thread_count); thread_count--; @@ -1202,26 +1348,40 @@ end: my_thread_end(); pthread_exit(0); #endif - DBUG_RETURN(0); // Never reached + DBUG_RETURN(0); } - /* This works because items are allocated with sql_alloc() */ - -void free_items(Item *item) -{ - for (; item ; item=item->next) - item->delete_self(); -} /* This works because items are allocated with sql_alloc() */ void cleanup_items(Item *item) { + DBUG_ENTER("cleanup_items"); for (; item ; item=item->next) item->cleanup(); + DBUG_VOID_RETURN; } -int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) +/* + Handle COM_TABLE_DUMP command + + SYNOPSIS + mysql_table_dump + thd thread handle + db database name or an empty string. If empty, + the current database of the connection is used + tbl_name name of the table to dump + + NOTES + This function is written to handle one specific command only. + + RETURN VALUE + 0 success + 1 error, the error message is set in THD +*/ + +static +int mysql_table_dump(THD* thd, char* db, char* tbl_name) { TABLE* table; TABLE_LIST* table_list; @@ -1230,19 +1390,19 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) db = (db && db[0]) ? db : thd->db; if (!(table_list = (TABLE_LIST*) thd->calloc(sizeof(TABLE_LIST)))) DBUG_RETURN(1); // out of memory - table_list->db = db; - table_list->real_name = table_list->alias = tbl_name; - table_list->lock_type = TL_READ_NO_INSERT; - table_list->next = 0; + table_list->db= db; + table_list->table_name= table_list->alias= tbl_name; + table_list->lock_type= TL_READ_NO_INSERT; + table_list->prev_global= &table_list; // can be removed after merge with 4.1 if (!db || check_db_name(db)) { - net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME ,MYF(0), db ? db : "NULL"); goto err; } if (lower_case_table_names) my_casedn_str(files_charset_info, tbl_name); - remove_escape(table_list->real_name); + remove_escape(table_list->table_name); if (!(table=open_ltable(thd, table_list, TL_READ_NO_INSERT))) DBUG_RETURN(1); @@ -1252,20 +1412,92 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) thd->free_list = 0; thd->query_length=(uint) strlen(tbl_name); thd->query = tbl_name; - if ((error = mysqld_dump_create_info(thd, table, -1))) + if ((error = mysqld_dump_create_info(thd, table_list, -1))) { my_error(ER_GET_ERRNO, MYF(0), my_errno); goto err; } net_flush(&thd->net); - if ((error= table->file->dump(thd,fd))) + if ((error= table->file->dump(thd,-1))) my_error(ER_GET_ERRNO, MYF(0), error); err: - close_thread_tables(thd); DBUG_RETURN(error); } +/* + Ends the current transaction and (maybe) begin the next + + SYNOPSIS + end_trans() + thd Current thread + completion Completion type + + RETURN + 0 - OK +*/ + +int end_trans(THD *thd, enum enum_mysql_completiontype completion) +{ + bool do_release= 0; + int res= 0; + DBUG_ENTER("end_trans"); + + if (unlikely(thd->in_sub_stmt)) + { + my_error(ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG, MYF(0)); + DBUG_RETURN(1); + } + if (thd->transaction.xid_state.xa_state != XA_NOTR) + { + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + DBUG_RETURN(1); + } + switch (completion) { + case COMMIT: + /* + We don't use end_active_trans() here to ensure that this works + even if there is a problem with the OPTION_AUTO_COMMIT flag + (Which of course should never happen...) + */ + thd->server_status&= ~SERVER_STATUS_IN_TRANS; + res= ha_commit(thd); + thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); + break; + case COMMIT_RELEASE: + do_release= 1; /* fall through */ + case COMMIT_AND_CHAIN: + res= end_active_trans(thd); + if (!res && completion == COMMIT_AND_CHAIN) + res= begin_trans(thd); + break; + case ROLLBACK_RELEASE: + do_release= 1; /* fall through */ + case ROLLBACK: + case ROLLBACK_AND_CHAIN: + { + thd->server_status&= ~SERVER_STATUS_IN_TRANS; + if (ha_rollback(thd)) + res= -1; + thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); + if (!res && (completion == ROLLBACK_AND_CHAIN)) + res= begin_trans(thd); + break; + } + default: + res= -1; + my_error(ER_UNKNOWN_COM_ERROR, MYF(0)); + DBUG_RETURN(-1); + } + + if (res < 0) + my_error(thd->killed_errno(), MYF(0)); + else if ((res == 0) && do_release) + thd->killed= THD::KILL_CONNECTION; + + DBUG_RETURN(res); +} #ifndef EMBEDDED_LIBRARY @@ -1315,7 +1547,7 @@ bool do_command(THD *thd) statistic_increment(aborted_threads,&LOCK_status); DBUG_RETURN(TRUE); // We have to close it. } - send_error(thd,net->last_errno,NullS); + net_send_error(thd, net->last_errno, NullS); net->error= 0; DBUG_RETURN(FALSE); } @@ -1346,8 +1578,10 @@ bool do_command(THD *thd) } #endif /* EMBEDDED_LIBRARY */ + /* Perform one connection-level (COM_XXXX) command. + SYNOPSIS dispatch_command() thd connection handle @@ -1369,17 +1603,24 @@ bool dispatch_command(enum enum_server_command command, THD *thd, bool error= 0; DBUG_ENTER("dispatch_command"); + if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA) + { + thd->killed= THD::NOT_KILLED; + thd->mysys_var->abort= 0; + } + thd->command=command; /* Commands which always take a long time are logged into the slow log only if opt_log_slow_admin_statements is set. */ thd->enable_slow_log= TRUE; + thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */ thd->set_time(); VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query_id=query_id; + thd->query_id= global_query_id; if (command != COM_STATISTICS && command != COM_PING) - query_id++; + next_query_id(); thread_running++; /* TODO: set thd->lex->sql_command to SQLCOM_END here */ VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -1390,11 +1631,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_INIT_DB: { LEX_STRING tmp; - statistic_increment(com_stat[SQLCOM_CHANGE_DB],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_CHANGE_DB], + &LOCK_status); thd->convert_string(&tmp, system_charset_info, packet, strlen(packet), thd->charset()); - if (!mysql_change_db(thd, tmp.str)) + if (!mysql_change_db(thd, tmp.str, FALSE)) + { mysql_log.write(thd,command,"%s",thd->db); + send_ok(thd); + } break; } #ifdef HAVE_REPLICATION @@ -1411,23 +1656,27 @@ bool dispatch_command(enum enum_server_command command, THD *thd, uint db_len= *(uchar*) packet; if (db_len >= packet_length || db_len > NAME_LEN) { - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } uint tbl_len= *(uchar*) (packet + db_len + 1); if (db_len+tbl_len+2 > packet_length || tbl_len > NAME_LEN) { - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } - statistic_increment(com_other, &LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); thd->enable_slow_log= opt_log_slow_admin_statements; db= thd->alloc(db_len + tbl_len + 2); + if (!db) + { + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + break; + } tbl_name= strmake(db, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); - if (mysql_table_dump(thd, db, tbl_name, -1)) - send_error(thd); // dump to NET + mysql_table_dump(thd, db, tbl_name); break; } case COM_CHANGE_USER: @@ -1435,24 +1684,24 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->change_user(); thd->clear_error(); // if errors from rollback - statistic_increment(com_other, &LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); char *user= (char*) packet; char *passwd= strend(user)+1; - /* + /* Old clients send null-terminated string ('\0' for empty string) for password. New clients send the size (1 byte) + string (not null terminated, so also '\0' for empty string). */ - char db_buff[NAME_LEN+1]; // buffer to store db in utf8 + char db_buff[NAME_LEN+1]; // buffer to store db in utf8 char *db= passwd; - uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? + uint passwd_len= thd->client_capabilities & CLIENT_SECURE_CONNECTION ? *passwd++ : strlen(passwd); db+= passwd_len + 1; #ifndef EMBEDDED_LIBRARY - /* Small check for incomming packet */ + /* Small check for incoming packet */ if ((uint) ((uchar*) db - net->read_pos) > packet_length) { - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } #endif @@ -1464,18 +1713,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd, db= db_buff; /* Save user and privileges */ - uint save_master_access= thd->master_access; - uint save_db_access= thd->db_access; uint save_db_length= thd->db_length; - char *save_user= thd->user; - char *save_priv_user= thd->priv_user; char *save_db= thd->db; + Security_context save_security_ctx= *thd->security_ctx; USER_CONN *save_user_connect= thd->user_connect; - - if (!(thd->user= my_strdup(user, MYF(0)))) + + if (!(thd->security_ctx->user= my_strdup(user, MYF(0)))) { - thd->user= save_user; - send_error(thd, ER_OUT_OF_RESOURCES); + thd->security_ctx->user= save_security_ctx.user; + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); break; } @@ -1485,15 +1731,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (res) { - /* authentification failure, we shall restore old user */ + /* authentication failure, we shall restore old user */ if (res > 0) - send_error(thd, ER_UNKNOWN_COM_ERROR); - x_free(thd->user); - thd->user= save_user; - thd->priv_user= save_priv_user; + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); + x_free(thd->security_ctx->user); + *thd->security_ctx= save_security_ctx; thd->user_connect= save_user_connect; - thd->master_access= save_master_access; - thd->db_access= save_db_access; thd->db= save_db; thd->db_length= save_db_length; } @@ -1505,31 +1748,36 @@ bool dispatch_command(enum enum_server_command command, THD *thd, decrease_user_connections(save_user_connect); #endif /* NO_EMBEDDED_ACCESS_CHECKS */ x_free((gptr) save_db); - x_free((gptr) save_user); + x_free((gptr) save_security_ctx.user); } break; } - case COM_EXECUTE: + case COM_STMT_EXECUTE: { mysql_stmt_execute(thd, packet, packet_length); break; } - case COM_LONG_DATA: + case COM_STMT_FETCH: + { + mysql_stmt_fetch(thd, packet, packet_length); + break; + } + case COM_STMT_SEND_LONG_DATA: { mysql_stmt_get_longdata(thd, packet, packet_length); break; } - case COM_PREPARE: + case COM_STMT_PREPARE: { mysql_stmt_prepare(thd, packet, packet_length); break; } - case COM_CLOSE_STMT: + case COM_STMT_CLOSE: { - mysql_stmt_free(thd, packet); + mysql_stmt_close(thd, packet); break; } - case COM_RESET_STMT: + case COM_STMT_RESET: { mysql_stmt_reset(thd, packet); break; @@ -1539,57 +1787,44 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (alloc_query(thd, packet, packet_length)) break; // fatal error is set char *packet_end= thd->query + thd->query_length; - mysql_log.write(thd,command,"%s",thd->query); + /* 'b' stands for 'buffer' parameter', special for 'my_snprintf' */ + const char *format= "%.*b"; + mysql_log.write(thd,command, format, thd->query_length, thd->query); DBUG_PRINT("query",("%-.4096s",thd->query)); + + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),QUERY_PRIOR); + mysql_parse(thd,thd->query, thd->query_length); - while (!thd->killed && !thd->is_fatal_error && thd->lex->found_colon) + while (!thd->killed && thd->lex->found_semicolon && !thd->net.report_error) { - char *packet= thd->lex->found_colon; + char *next_packet= thd->lex->found_semicolon; + net->no_send_error= 0; /* Multiple queries exits, execute them individually - in embedded server - just store them to be executed later */ -#ifndef EMBEDDED_LIBRARY - if (thd->lock || thd->open_tables || thd->derived_tables) + if (thd->lock || thd->open_tables || thd->derived_tables || + thd->prelocked_mode) close_thread_tables(thd); -#endif - ulong length= (ulong)(packet_end-packet); + ulong length= (ulong)(packet_end - next_packet); log_slow_statement(thd); /* Remove garbage at start of query */ - while (my_isspace(thd->charset(), *packet) && length > 0) + while (my_isspace(thd->charset(), *next_packet) && length > 0) { - packet++; + next_packet++; length--; } VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query_length= length; - thd->query= packet; - thd->query_id= query_id++; + thd->query= next_packet; + thd->query_id= next_query_id(); thd->set_time(); /* Reset the query start time. */ /* TODO: set thd->lex->sql_command to SQLCOM_END here */ VOID(pthread_mutex_unlock(&LOCK_thread_count)); -#ifndef EMBEDDED_LIBRARY - mysql_parse(thd, packet, length); -#else - /* - 'packet' can point inside the query_rest's buffer - so we have to do memmove here - */ - if (thd->query_rest.length() > length) - { - memmove(thd->query_rest.c_ptr(), packet, length); - thd->query_rest.length(length); - } - else - thd->query_rest.copy(packet, length, thd->query_rest.charset()); - - thd->server_status&= ~ (SERVER_QUERY_NO_INDEX_USED | - SERVER_QUERY_NO_GOOD_INDEX_USED); - break; -#endif /*EMBEDDED_LIBRARY*/ + mysql_parse(thd, next_packet, length); } if (!(specialflag & SPECIAL_NO_PRIOR)) @@ -1599,44 +1834,65 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } case COM_FIELD_LIST: // This isn't actually needed #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ break; #else { char *fields, *pend; + /* Locked closure of all tables */ TABLE_LIST table_list; LEX_STRING conv_name; - statistic_increment(com_stat[SQLCOM_SHOW_FIELDS],&LOCK_status); + /* used as fields initializator */ + lex_start(thd, 0, 0); + + statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS], + &LOCK_status); bzero((char*) &table_list,sizeof(table_list)); - if (!(table_list.db=thd->db)) - { - send_error(thd,ER_NO_DB_ERROR); + if (thd->copy_db_to(&table_list.db, 0)) break; - } - thd->free_list=0; pend= strend(packet); thd->convert_string(&conv_name, system_charset_info, packet, (uint) (pend-packet), thd->charset()); - table_list.alias= table_list.real_name= conv_name.str; + table_list.alias= table_list.table_name= conv_name.str; packet= pend+1; + + if (!my_strcasecmp(system_charset_info, table_list.db, + information_schema_name.str)) + { + ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, table_list.alias); + if (schema_table) + table_list.schema_table= schema_table; + } + thd->query_length= strlen(packet); // for simplicity: don't optimize if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; - mysql_log.write(thd,command,"%s %s",table_list.real_name,fields); + mysql_log.write(thd,command,"%s %s",table_list.table_name, fields); if (lower_case_table_names) - my_casedn_str(files_charset_info, table_list.real_name); - remove_escape(table_list.real_name); // This can't have wildcards + my_casedn_str(files_charset_info, table_list.table_name); + remove_escape(table_list.table_name); // This can't have wildcards if (check_access(thd,SELECT_ACL,table_list.db,&table_list.grant.privilege, - 0, 0)) + 0, 0, test(table_list.schema_table))) break; if (grant_option && check_grant(thd, SELECT_ACL, &table_list, 2, UINT_MAX, 0)) break; + /* init structures for VIEW processing */ + table_list.select_lex= &(thd->lex->select_lex); + mysql_init_query(thd, (uchar*)"", 0); + thd->lex-> + select_lex.table_list.link_in_list((byte*) &table_list, + (byte**) &table_list.next_local); + thd->lex->add_to_query_tables(&table_list); + + /* switch on VIEW optimisation: do not fill temporary tables */ + thd->lex->sql_command= SQLCOM_SHOW_FIELDS; mysqld_list_fields(thd,&table_list,fields); - free_items(thd->free_list); - thd->free_list= 0; + thd->lex->unit.cleanup(); + thd->cleanup_after_query(); break; } #endif @@ -1652,43 +1908,43 @@ bool dispatch_command(enum enum_server_command command, THD *thd, char *db=thd->strdup(packet), *alias; HA_CREATE_INFO create_info; - statistic_increment(com_stat[SQLCOM_CREATE_DB],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_CREATE_DB], + &LOCK_status); // null test to handle EOM if (!db || !(alias= thd->strdup(db)) || check_db_name(db)) { - net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL"); break; } - if (check_access(thd,CREATE_ACL,db,0,1,0)) + if (check_access(thd,CREATE_ACL,db,0,1,0,is_schema_db(db))) break; mysql_log.write(thd,command,packet); bzero(&create_info, sizeof(create_info)); - if (mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db), - &create_info, 0) < 0) - send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); + mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db), + &create_info, 0); break; } case COM_DROP_DB: // QQ: To be removed { - statistic_increment(com_stat[SQLCOM_DROP_DB],&LOCK_status); - char *db=thd->strdup(packet), *alias; - // null test to handle EOM - if (!db || !(alias= thd->strdup(db)) || check_db_name(db)) + statistic_increment(thd->status_var.com_stat[SQLCOM_DROP_DB], + &LOCK_status); + char *db=thd->strdup(packet); + /* null test to handle EOM */ + if (!db || check_db_name(db)) { - net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL"); break; } - if (check_access(thd,DROP_ACL,db,0,1,0)) + if (check_access(thd,DROP_ACL,db,0,1,0,is_schema_db(db))) break; if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); break; } mysql_log.write(thd,command,db); - if (mysql_rm_db(thd, (lower_case_table_names == 2 ? alias : db), - 0, 0) < 0) - send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); + mysql_rm_db(thd, db, 0, 0); break; } #ifndef EMBEDDED_LIBRARY @@ -1698,7 +1954,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, ushort flags; uint32 slave_server_id; - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other,&LOCK_status); thd->enable_slow_log= opt_log_slow_admin_statements; if (check_global_access(thd, REPL_SLAVE_ACL)) break; @@ -1715,29 +1971,29 @@ bool dispatch_command(enum enum_server_command command, THD *thd, (long) pos); mysql_binlog_send(thd, thd->strdup(packet + 10), (my_off_t) pos, flags); unregister_slave(thd,1,1); - // fake COM_QUIT -- if we get here, the thread needs to terminate + /* fake COM_QUIT -- if we get here, the thread needs to terminate */ error = TRUE; net->error = 0; break; } #endif case COM_REFRESH: - { - statistic_increment(com_stat[SQLCOM_FLUSH],&LOCK_status); - ulong options= (ulong) (uchar) packet[0]; - if (check_global_access(thd,RELOAD_ACL)) - break; - mysql_log.write(thd,command,NullS); - if (reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, NULL)) - send_error(thd, 0); - else - send_ok(thd); + { + bool not_used; + statistic_increment(thd->status_var.com_stat[SQLCOM_FLUSH], + &LOCK_status); + ulong options= (ulong) (uchar) packet[0]; + if (check_global_access(thd,RELOAD_ACL)) break; - } + mysql_log.write(thd,command,NullS); + if (!reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, ¬_used)) + send_ok(thd); + break; + } #ifndef EMBEDDED_LIBRARY case COM_SHUTDOWN: { - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); if (check_global_access(thd,SHUTDOWN_ACL)) break; /* purecov: inspected */ /* @@ -1754,7 +2010,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, else if (level != SHUTDOWN_WAIT_ALL_BUFFERS) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "this shutdown level"); - send_error(thd); break; } DBUG_PRINT("quit",("Got shutdown command for level %u", level)); @@ -1768,8 +2023,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif close_connection(thd, 0, 1); close_thread_tables(thd); // Free before kill - free_root(thd->mem_root,MYF(0)); - free_root(&thd->transaction.mem_root,MYF(0)); kill_mysql(); error=TRUE; break; @@ -1778,19 +2031,26 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_STATISTICS: { mysql_log.write(thd,command,NullS); - statistic_increment(com_stat[SQLCOM_SHOW_STATUS],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS], + &LOCK_status); #ifndef EMBEDDED_LIBRARY char buff[200]; #else char *buff= thd->net.last_error; #endif - ulong uptime = (ulong) (thd->start_time - start_time); + + STATUS_VAR current_global_status_var; + calc_sum_of_all_status(¤t_global_status_var); + + ulong uptime = (ulong) (thd->start_time - server_start_time); sprintf((char*) buff, - "Uptime: %ld Threads: %d Questions: %lu Slow queries: %ld Opens: %ld Flush tables: %ld Open tables: %u Queries per second avg: %.3f", + "Uptime: %lu Threads: %d Questions: %lu Slow queries: %lu Opens: %lu Flush tables: %lu Open tables: %u Queries per second avg: %.3f", uptime, - (int) thread_count,thd->query_id,long_query_count, - opened_tables,refresh_version, cached_tables(), - uptime ? (float)thd->query_id/(float)uptime : 0); + (int) thread_count, (ulong) thd->query_id, + current_global_status_var.long_query_count, + current_global_status_var.opened_tables, refresh_version, cached_tables(), + (uptime ? (ulonglong2double(thd->query_id) / (double) uptime) : + (double) 0)); #ifdef SAFEMALLOC if (sf_malloc_cur_memory) // Using SAFEMALLOC sprintf(strend(buff), " Memory in use: %ldK Max memory used: %ldK", @@ -1804,49 +2064,53 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; } case COM_PING: - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); send_ok(thd); // Tell client we are alive break; case COM_PROCESS_INFO: - statistic_increment(com_stat[SQLCOM_SHOW_PROCESSLIST],&LOCK_status); - if (!thd->priv_user[0] && check_global_access(thd,PROCESS_ACL)) + statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_PROCESSLIST], + &LOCK_status); + if (!thd->security_ctx->priv_user[0] && + check_global_access(thd, PROCESS_ACL)) break; mysql_log.write(thd,command,NullS); mysqld_list_processes(thd, - thd->master_access & PROCESS_ACL ? - NullS : thd->priv_user, 0); + thd->security_ctx->master_access & PROCESS_ACL ? + NullS : thd->security_ctx->priv_user, 0); break; case COM_PROCESS_KILL: { - statistic_increment(com_stat[SQLCOM_KILL],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_KILL], &LOCK_status); ulong id=(ulong) uint4korr(packet); - kill_one_thread(thd,id); + kill_one_thread(thd,id,false); break; } case COM_SET_OPTION: { - statistic_increment(com_stat[SQLCOM_SET_OPTION], &LOCK_status); - enum_mysql_set_option command= (enum_mysql_set_option) uint2korr(packet); - switch (command) { - case MYSQL_OPTION_MULTI_STATEMENTS_ON: + statistic_increment(thd->status_var.com_stat[SQLCOM_SET_OPTION], + &LOCK_status); + uint opt_command= uint2korr(packet); + + switch (opt_command) { + case (int) MYSQL_OPTION_MULTI_STATEMENTS_ON: thd->client_capabilities|= CLIENT_MULTI_STATEMENTS; send_eof(thd); break; - case MYSQL_OPTION_MULTI_STATEMENTS_OFF: + case (int) MYSQL_OPTION_MULTI_STATEMENTS_OFF: thd->client_capabilities&= ~CLIENT_MULTI_STATEMENTS; send_eof(thd); break; default: - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } break; } case COM_DEBUG: - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); if (check_global_access(thd, SUPER_ACL)) break; /* purecov: inspected */ - mysql_print_status(thd); + mysql_print_status(); mysql_log.write(thd,command,NullS); send_eof(thd); break; @@ -1856,17 +2120,32 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_DELAYED_INSERT: case COM_END: default: - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } - if (thd->lock || thd->open_tables || thd->derived_tables) + if (thd->lock || thd->open_tables || thd->derived_tables || + thd->prelocked_mode) { thd->proc_info="closing tables"; close_thread_tables(thd); /* Free tables */ } + /* + assume handlers auto-commit (if some doesn't - transaction handling + in MySQL should be redesigned to support it; it's a big change, + and it's not worth it - better to commit explicitly only writing + transactions, read-only ones should better take care of themselves. + saves some work in 2pc too) + see also sql_base.cc - close_thread_tables() + */ + bzero(&thd->transaction.stmt, sizeof(thd->transaction.stmt)); + if (!thd->active_transaction()) + thd->transaction.xid_state.xid.null(); - if (thd->is_fatal_error) - send_error(thd,0); // End of memory ? + /* report error issued during command execution */ + if (thd->killed_errno() && !thd->net.report_error) + thd->send_kill_message(); + if (thd->net.report_error) + net_send_error(thd); log_slow_statement(thd); @@ -1886,7 +2165,17 @@ bool dispatch_command(enum enum_server_command command, THD *thd, void log_slow_statement(THD *thd) { - time_t start_of_query=thd->start_time; + time_t start_of_query; + + /* + The following should never be true with our current code base, + but better to keep this here so we don't accidently try to log a + statement in a trigger or stored function + */ + if (unlikely(thd->in_sub_stmt)) + return; // Don't set time for sub stmt + + start_of_query= thd->start_time; thd->end_time(); // Set start time /* @@ -1899,11 +2188,13 @@ void log_slow_statement(THD *thd) if ((ulong) (thd->start_time - thd->time_after_lock) > thd->variables.long_query_time || - ((thd->server_status & + (thd->server_status & (SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED)) && - (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES))) + (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES) && + /* == SQLCOM_END unless this is a SHOW command */ + thd->lex->orig_sql_command == SQLCOM_END) { - long_query_count++; + thd->status_var.long_query_count++; mysql_slow_log.write(thd, thd->query, thd->query_length, start_of_query); } } @@ -1911,8 +2202,152 @@ void log_slow_statement(THD *thd) /* + Create a TABLE_LIST object for an INFORMATION_SCHEMA table. + + SYNOPSIS + prepare_schema_table() + thd thread handle + lex current lex + table_ident table alias if it's used + schema_table_idx the type of the INFORMATION_SCHEMA table to be + created + + DESCRIPTION + This function is used in the parser to convert a SHOW or DESCRIBE + table_name command to a SELECT from INFORMATION_SCHEMA. + It prepares a SELECT_LEX and a TABLE_LIST object to represent the + given command as a SELECT parse tree. + + NOTES + Due to the way this function works with memory and LEX it cannot + be used outside the parser (parse tree transformations outside + the parser break PS and SP). + + RETURN VALUE + 0 success + 1 out of memory or SHOW commands are not allowed + in this version of the server. +*/ + +int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, + enum enum_schema_tables schema_table_idx) +{ + DBUG_ENTER("prepare_schema_table"); + SELECT_LEX *sel= 0; + switch (schema_table_idx) { + case SCH_SCHEMATA: +#if defined(DONT_ALLOW_SHOW_COMMANDS) + my_message(ER_NOT_ALLOWED_COMMAND, + ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ + DBUG_RETURN(1); +#else + if ((specialflag & SPECIAL_SKIP_SHOW_DB) && + check_global_access(thd, SHOW_DB_ACL)) + DBUG_RETURN(1); + break; +#endif + case SCH_TABLE_NAMES: + case SCH_TABLES: + case SCH_VIEWS: + case SCH_TRIGGERS: +#ifdef DONT_ALLOW_SHOW_COMMANDS + my_message(ER_NOT_ALLOWED_COMMAND, + ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ + DBUG_RETURN(1); +#else + { + char *db; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, 0)) + { + DBUG_RETURN(1); + } + db= lex->select_lex.db; + remove_escape(db); // Fix escaped '_' + if (check_db_name(db)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), db); + DBUG_RETURN(1); + } + if (check_access(thd, SELECT_ACL, db, &thd->col_access, 0, 0, + is_schema_db(db))) + DBUG_RETURN(1); /* purecov: inspected */ + if (!thd->col_access && check_grant_db(thd,db)) + { + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + thd->security_ctx->priv_user, thd->security_ctx->priv_host, + db); + DBUG_RETURN(1); + } + break; + } +#endif + case SCH_COLUMNS: + case SCH_STATISTICS: +#ifdef DONT_ALLOW_SHOW_COMMANDS + my_message(ER_NOT_ALLOWED_COMMAND, + ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ + DBUG_RETURN(1); +#else + if (table_ident) + { + TABLE_LIST **query_tables_last= lex->query_tables_last; + sel= new SELECT_LEX(); + /* 'parent_lex' is used in init_query() so it must be before it. */ + sel->parent_lex= lex; + sel->init_query(); + if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, + (List<String> *) 0, (List<String> *) 0)) + DBUG_RETURN(1); + lex->query_tables_last= query_tables_last; + TABLE_LIST *table_list= (TABLE_LIST*) sel->table_list.first; + char *db= table_list->db; + remove_escape(db); // Fix escaped '_' + remove_escape(table_list->table_name); + if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, + &table_list->grant.privilege, 0, 0, + test(table_list->schema_table))) + DBUG_RETURN(1); /* purecov: inspected */ + if (grant_option && check_grant(thd, SELECT_ACL, table_list, 2, + UINT_MAX, 0)) + DBUG_RETURN(1); + break; + } +#endif + case SCH_OPEN_TABLES: + case SCH_VARIABLES: + case SCH_STATUS: + case SCH_PROCEDURES: + case SCH_CHARSETS: + case SCH_COLLATIONS: + case SCH_COLLATION_CHARACTER_SET_APPLICABILITY: + case SCH_USER_PRIVILEGES: + case SCH_SCHEMA_PRIVILEGES: + case SCH_TABLE_PRIVILEGES: + case SCH_COLUMN_PRIVILEGES: + case SCH_TABLE_CONSTRAINTS: + case SCH_KEY_COLUMN_USAGE: + default: + break; + } + + SELECT_LEX *select_lex= lex->current_select; + if (make_schema_select(thd, select_lex, schema_table_idx)) + { + DBUG_RETURN(1); + } + TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first; + table_list->schema_select_lex= sel; + table_list->schema_table_reformed= 1; + statistic_increment(thd->status_var.com_stat[lex->orig_sql_command], + &LOCK_status); + DBUG_RETURN(0); +} + + +/* Read query from packet and store in thd->query - Used in COM_QUERY and COM_PREPARE + Used in COM_QUERY and COM_STMT_PREPARE DESCRIPTION Sets the following THD variables: @@ -1920,11 +2355,11 @@ void log_slow_statement(THD *thd) query_length RETURN VALUES - 0 ok - 1 error; In this case thd->fatal_error is set + FALSE ok + TRUE error; In this case thd->fatal_error is set */ -bool alloc_query(THD *thd, char *packet, ulong packet_length) +bool alloc_query(THD *thd, const char *packet, uint packet_length) { packet_length--; // Remove end null /* Remove garbage at start and end of query */ @@ -1933,7 +2368,7 @@ bool alloc_query(THD *thd, char *packet, ulong packet_length) packet++; packet_length--; } - char *pos=packet+packet_length; // Point at end null + const char *pos= packet + packet_length; // Point at end null while (packet_length > 0 && (pos[-1] == ';' || my_isspace(thd->charset() ,pos[-1]))) { @@ -1946,7 +2381,7 @@ bool alloc_query(THD *thd, char *packet, ulong packet_length) packet_length, thd->db_length+ 1 + QUERY_CACHE_FLAGS_SIZE))) - return 1; + return TRUE; thd->query[packet_length]=0; thd->query_length= packet_length; @@ -1954,9 +2389,7 @@ bool alloc_query(THD *thd, char *packet, ulong packet_length) thd->packet.shrink(thd->variables.net_buffer_length); thd->convert_buffer.shrink(thd->variables.net_buffer_length); - if (!(specialflag & SPECIAL_NO_PRIOR)) - my_pthread_setprio(pthread_self(),QUERY_PRIOR); - return 0; + return FALSE; } static void reset_one_shot_variables(THD *thd) @@ -1977,50 +2410,125 @@ static void reset_one_shot_variables(THD *thd) } -/**************************************************************************** -** mysql_execute_command -** Execute command saved in thd and current_lex->sql_command -****************************************************************************/ +/* + Execute command saved in thd and lex->sql_command -void + SYNOPSIS + mysql_execute_command() + thd Thread handle + + IMPLEMENTATION + + Before every operation that can request a write lock for a table + wait if a global read lock exists. However do not wait if this + thread has locked tables already. No new locks can be requested + until the other locks are released. The thread that requests the + global read lock waits for write locked tables to become unlocked. + + Note that wait_if_global_read_lock() sets a protection against a new + global read lock when it succeeds. This needs to be released by + start_waiting_global_read_lock() after the operation. + + RETURN + FALSE OK + TRUE Error +*/ + +bool mysql_execute_command(THD *thd) { - int res= 0; - LEX *lex= thd->lex; + bool res= FALSE; + bool need_start_waiting= FALSE; // have protection against global read lock + int up_result= 0; + LEX *lex= thd->lex; + /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ SELECT_LEX *select_lex= &lex->select_lex; - TABLE_LIST *tables= (TABLE_LIST*) select_lex->table_list.first; + /* first table of first SELECT_LEX */ + TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first; + /* list of all tables in query */ + TABLE_LIST *all_tables; + /* most outer SELECT_LEX_UNIT of query */ SELECT_LEX_UNIT *unit= &lex->unit; + /* Saved variable value */ DBUG_ENTER("mysql_execute_command"); + thd->net.no_send_error= 0; /* Remember first generated insert id value of the previous - statement. + statement. We remember it here at the beginning of the statement, + and also in Item_func_last_insert_id::fix_fields() and + sys_var_last_insert_id::value_ptr(). Last two places are required + because LAST_INSERT_ID() and @@LAST_INSERT_ID may also be used in + expression that is not executed with mysql_execute_command(). + + And we remember it here because some statements read + @@LAST_INSERT_ID indirectly, like "SELECT * FROM t1 WHERE id IS + NULL", that may replace "id IS NULL" with "id = <LAST_INSERT_ID>". */ thd->current_insert_id= thd->last_insert_id; /* + In many cases first table of main SELECT_LEX have special meaning => + check that it is first table in global list and relink it first in + queries_tables list if it is necessary (we need such relinking only + for queries with subqueries in select list, in this case tables of + subqueries will go to global list first) + + all_tables will differ from first_table only if most upper SELECT_LEX + do not contain tables. + + Because of above in place where should be at least one table in most + outer SELECT_LEX we have following check: + DBUG_ASSERT(first_table == all_tables); + DBUG_ASSERT(first_table == all_tables && first_table != 0); + */ + lex->first_lists_tables_same(); + /* should be assigned after making first tables same */ + all_tables= lex->query_tables; + /* set context for commands which do not use setup_tables */ + select_lex-> + context.resolve_in_table_list_only((TABLE_LIST*)select_lex-> + table_list.first); + + /* Reset warning count for each query that uses tables A better approach would be to reset this for any commands that is not a SHOW command or a select that only access local variables, but for now this is probably good enough. + Don't reset warnings when executing a stored routine. */ - if (tables || &lex->select_lex != lex->all_selects_list || + if ((all_tables || &lex->select_lex != lex->all_selects_list || + lex->sroutines.records) && !thd->spcont || lex->time_zone_tables_used) - mysql_reset_errors(thd); - - /* - When subselects or time_zone info is used in a query - we create a new TABLE_LIST containing all referenced tables - and set local variable 'tables' to point to this list. - */ - if ((&lex->select_lex != lex->all_selects_list || - lex->time_zone_tables_used) && - lex->unit.create_total_list(thd, lex, &tables)) - DBUG_VOID_RETURN; + mysql_reset_errors(thd, 0); #ifdef HAVE_REPLICATION - if (thd->slave_thread) + if (unlikely(thd->slave_thread)) { + if (lex->sql_command == SQLCOM_DROP_TRIGGER) + { + /* + When dropping a trigger, we need to load its table name + before checking slave filter rules. + */ + add_table_for_trigger(thd, thd->lex->spname, 1, &all_tables); + + if (!all_tables) + { + /* + If table name cannot be loaded, + it means the trigger does not exists possibly because + CREATE TRIGGER was previously skipped for this trigger + according to slave filtering rules. + Returning success without producing any errors in this case. + */ + DBUG_RETURN(0); + } + + // force searching in slave.cc:tables_ok() + all_tables->updating= 1; + } + /* Check if statment should be skipped because of slave filtering rules @@ -2038,41 +2546,56 @@ mysql_execute_command(THD *thd) !(lex->sql_command == SQLCOM_SET_OPTION) && !(lex->sql_command == SQLCOM_DROP_TABLE && lex->drop_temporary && lex->drop_if_exists) && - all_tables_not_ok(thd,tables)) + all_tables_not_ok(thd, all_tables)) { /* we warn the slave SQL thread */ - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); - reset_one_shot_variables(thd); - DBUG_VOID_RETURN; + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); + if (thd->one_shot_set) + { + /* + It's ok to check thd->one_shot_set here: + + The charsets in a MySQL 5.0 slave can change by both a binlogged + SET ONE_SHOT statement and the event-internal charset setting, + and these two ways to change charsets do not seems to work + together. + + At least there seems to be problems in the rli cache for + charsets if we are using ONE_SHOT. Note that this is normally no + problem because either the >= 5.0 slave reads a 4.1 binlog (with + ONE_SHOT) *or* or 5.0 binlog (without ONE_SHOT) but never both." + */ + reset_one_shot_variables(thd); + } + DBUG_RETURN(0); } -#ifndef TO_BE_DELETED + } + else + { +#endif /* HAVE_REPLICATION */ /* - This is a workaround to deal with the shortcoming in 3.23.44-3.23.46 - masters in RELEASE_LOCK() logging. We re-write SELECT RELEASE_LOCK() - as DO RELEASE_LOCK() + When option readonly is set deny operations which change non-temporary + tables. Except for the replication thread and the 'super' users. */ - if (lex->sql_command == SQLCOM_SELECT) - { - lex->sql_command = SQLCOM_DO; - lex->insert_list = &select_lex->item_list; + if (opt_readonly && + !(thd->security_ctx->master_access & SUPER_ACL) && + uc_update_queries[lex->sql_command] && + !((lex->sql_command == SQLCOM_CREATE_TABLE) && + (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE)) && + !((lex->sql_command == SQLCOM_DROP_TABLE) && lex->drop_temporary) && + ((lex->sql_command != SQLCOM_UPDATE_MULTI) && + some_non_temp_table_to_be_updated(thd, all_tables))) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only"); + DBUG_RETURN(-1); } +#ifdef HAVE_REPLICATION + } /* endif unlikely slave */ #endif - } -#endif /* HAVE_REPLICATION */ + if(lex->orig_sql_command == SQLCOM_END) + statistic_increment(thd->status_var.com_stat[lex->sql_command], + &LOCK_status); - /* - When option readonly is set deny operations which change tables. - Except for the replication thread and the 'super' users. - */ - if (opt_readonly && - !(thd->slave_thread || (thd->master_access & SUPER_ACL)) && - (uc_update_queries[lex->sql_command] > 0)) - { - net_printf(thd, ER_OPTION_PREVENTS_STATEMENT, "--read-only"); - DBUG_VOID_RETURN; - } - - statistic_increment(com_stat[lex->sql_command],&LOCK_status); switch (lex->sql_command) { case SQLCOM_SELECT: { @@ -2080,50 +2603,42 @@ mysql_execute_command(THD *thd) { SELECT_LEX *param= lex->unit.global_parameters; if (!param->explicit_limit) - param->select_limit= thd->variables.select_limit; + param->select_limit= + new Item_int((ulonglong)thd->variables.select_limit); } - select_result *result=lex->result; - if (tables) + select_result *sel_result=lex->result; + if (all_tables) { - res=check_table_access(thd, - lex->exchange ? SELECT_ACL | FILE_ACL : - SELECT_ACL, - tables,0); + if (lex->orig_sql_command != SQLCOM_SHOW_STATUS_PROC && + lex->orig_sql_command != SQLCOM_SHOW_STATUS_FUNC) + res= check_table_access(thd, + lex->exchange ? SELECT_ACL | FILE_ACL : + SELECT_ACL, + all_tables, 0); } else - res=check_access(thd, lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL, - any_db,0,0,0); + res= check_access(thd, + lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL, + any_db, 0, 0, 0, 0); if (res) - { - res=0; - break; // Error message is given - } - /* - In case of single SELECT unit->global_parameters points on first SELECT - TODO: move counters to SELECT_LEX - */ - unit->offset_limit_cnt= (ha_rows) unit->global_parameters->offset_limit; - unit->select_limit_cnt= (ha_rows) (unit->global_parameters->select_limit+ - unit->global_parameters->offset_limit); - if (unit->select_limit_cnt < - (ha_rows) unit->global_parameters->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR && !select_lex->next_select()) - select_lex->options&= ~OPTION_FOUND_ROWS; + goto error; - if (!(res=open_and_lock_tables(thd,tables))) + if (!(res= open_and_lock_tables(thd, all_tables))) { if (lex->describe) { - if (!(result= new select_send())) - { - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_VOID_RETURN; - } + /* + We always use select_send for EXPLAIN, even if it's an EXPLAIN + for SELECT ... INTO OUTFILE: a user application should be able + to prepend EXPLAIN to any query and receive output for it, + even if the query itself redirects the output. + */ + if (!(sel_result= new select_send())) + goto error; else - thd->send_explain_fields(result); - res= mysql_explain_union(thd, &thd->lex->unit, result); + thd->send_explain_fields(sel_result); + res= mysql_explain_union(thd, &thd->lex->unit, sel_result); if (lex->describe & DESCRIBE_EXTENDED) { char buff[1024]; @@ -2134,147 +2649,42 @@ mysql_execute_command(THD *thd) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_YES, str.ptr()); } - result->send_eof(); - delete result; + sel_result->send_eof(); + delete sel_result; } else { - if (!result && !(result= new select_send())) - { - res= -1; - break; - } - query_cache_store_query(thd, tables); - res= handle_select(thd, lex, result); - if (result != lex->result) - delete result; + if (!sel_result && !(sel_result= new select_send())) + goto error; + query_cache_store_query(thd, all_tables); + res= handle_select(thd, lex, sel_result, 0); + if (sel_result != lex->result) + delete sel_result; } } break; } case SQLCOM_PREPARE: { - char *query_str; - uint query_len; - if (lex->prepared_stmt_code_is_varref) - { - /* This is PREPARE stmt FROM @var. */ - String str; - CHARSET_INFO *to_cs= thd->variables.collation_connection; - bool need_conversion; - user_var_entry *entry; - String *pstr= &str; - uint32 unused; - /* - Convert @var contents to string in connection character set. Although - it is known that int/real/NULL value cannot be a valid query we still - convert it for error messages to uniform. - */ - if ((entry= - (user_var_entry*)hash_search(&thd->user_vars, - (byte*)lex->prepared_stmt_code.str, - lex->prepared_stmt_code.length)) - && entry->value) - { - my_bool is_var_null; - pstr= entry->val_str(&is_var_null, &str, NOT_FIXED_DEC); - /* - NULL value of variable checked early as entry->value so here - we can't get NULL in normal conditions - */ - DBUG_ASSERT(!is_var_null); - if (!pstr) - { - res= -1; - break; // EOM (error should be reported by allocator) - } - } - else - { - /* - variable absent or equal to NULL, so we need to set variable to - something reasonable to get readable error message during parsing - */ - str.set("NULL", 4, &my_charset_latin1); - } - - need_conversion= - String::needs_conversion(pstr->length(), pstr->charset(), - to_cs, &unused); - - query_len= need_conversion? (pstr->length() * to_cs->mbmaxlen) : - pstr->length(); - if (!(query_str= alloc_root(thd->mem_root, query_len+1))) - { - res= -1; - break; // EOM (error should be reported by allocator) - } - - if (need_conversion) - { - uint dummy_errors; - query_len= copy_and_convert(query_str, query_len, to_cs, - pstr->ptr(), pstr->length(), - pstr->charset(), &dummy_errors); - } - else - memcpy(query_str, pstr->ptr(), pstr->length()); - query_str[query_len]= 0; - } - else - { - query_str= lex->prepared_stmt_code.str; - query_len= lex->prepared_stmt_code.length; - DBUG_PRINT("info", ("PREPARE: %.*s FROM '%.*s' \n", - lex->prepared_stmt_name.length, - lex->prepared_stmt_name.str, - query_len, query_str)); - } - thd->command= COM_PREPARE; - if (!mysql_stmt_prepare(thd, query_str, query_len + 1, - &lex->prepared_stmt_name)) - send_ok(thd, 0L, 0L, "Statement prepared"); + mysql_sql_stmt_prepare(thd); break; } case SQLCOM_EXECUTE: { - DBUG_PRINT("info", ("EXECUTE: %.*s\n", - lex->prepared_stmt_name.length, - lex->prepared_stmt_name.str)); - mysql_sql_stmt_execute(thd, &lex->prepared_stmt_name); - lex->prepared_stmt_params.empty(); + mysql_sql_stmt_execute(thd); break; } case SQLCOM_DEALLOCATE_PREPARE: { - Statement* stmt; - DBUG_PRINT("info", ("DEALLOCATE PREPARE: %.*s\n", - lex->prepared_stmt_name.length, - lex->prepared_stmt_name.str)); - /* We account deallocate in the same manner as mysql_stmt_close */ - statistic_increment(com_stmt_close, &LOCK_status); - if ((stmt= thd->stmt_map.find_by_name(&lex->prepared_stmt_name))) - { - thd->stmt_map.erase(stmt); - send_ok(thd); - } - else - { - res= -1; - my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), - lex->prepared_stmt_name.length, lex->prepared_stmt_name.str, - "DEALLOCATE PREPARE"); - } + mysql_sql_stmt_close(thd); break; } case SQLCOM_DO: - if (tables && ((res= check_table_access(thd, SELECT_ACL, tables,0)) || - (res= open_and_lock_tables(thd,tables)))) - break; + if (check_table_access(thd, SELECT_ACL, all_tables, 0) || + open_and_lock_tables(thd, all_tables)) + goto error; res= mysql_do(thd, *lex->insert_list); - if (thd->net.report_error) - res= -1; break; case SQLCOM_EMPTY_QUERY: @@ -2290,16 +2700,31 @@ mysql_execute_command(THD *thd) { if (check_global_access(thd, SUPER_ACL)) goto error; - // PURGE MASTER LOGS TO 'file' + /* PURGE MASTER LOGS TO 'file' */ res = purge_master_logs(thd, lex->to_log); break; } case SQLCOM_PURGE_BEFORE: { + Item *it; + if (check_global_access(thd, SUPER_ACL)) goto error; - // PURGE MASTER LOGS BEFORE 'data' - res = purge_master_logs_before_date(thd, lex->purge_time); + /* PURGE MASTER LOGS BEFORE 'data' */ + it= (Item *)lex->value_list.head(); + if ((!it->fixed && it->fix_fields(lex->thd, &it)) || + it->check_cols(1)) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), "PURGE LOGS BEFORE"); + goto error; + } + it= new Item_func_unix_timestamp(it); + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + it->quick_fix_field(); + res = purge_master_logs_before_date(thd, (ulong)it->val_int()); break; } #endif @@ -2324,12 +2749,12 @@ mysql_execute_command(THD *thd) goto error; /* This query don't work now. See comment in repl_failsafe.cc */ #ifndef WORKING_NEW_MASTER - net_printf(thd, ER_NOT_SUPPORTED_YET, "SHOW NEW MASTER"); - res= 1; + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "SHOW NEW MASTER"); + goto error; #else res = show_new_master(thd); -#endif break; +#endif } #ifdef HAVE_REPLICATION @@ -2344,48 +2769,57 @@ mysql_execute_command(THD *thd) { if (check_global_access(thd, REPL_SLAVE_ACL)) goto error; - res = show_binlog_events(thd); + res = mysql_show_binlog_events(thd); break; } #endif case SQLCOM_BACKUP_TABLE: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL, tables,0) || + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; - res = mysql_backup_table(thd, tables); - + res = mysql_backup_table(thd, first_table); + select_lex->table_list.first= (byte*) first_table; + lex->query_tables=all_tables; break; } case SQLCOM_RESTORE_TABLE: { - if (check_db_used(thd,tables) || - check_table_access(thd, INSERT_ACL, tables,0) || + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, INSERT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; - res = mysql_restore_table(thd, tables); + res = mysql_restore_table(thd, first_table); + select_lex->table_list.first= (byte*) first_table; + lex->query_tables=all_tables; break; } case SQLCOM_ASSIGN_TO_KEYCACHE: { - if (check_db_used(thd, tables) || - check_access(thd, INDEX_ACL, tables->db, - &tables->grant.privilege, 0, 0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_access(thd, INDEX_ACL, first_table->db, + &first_table->grant.privilege, 0, 0, + test(first_table->schema_table))) goto error; - res= mysql_assign_to_keycache(thd, tables, &lex->name_and_length); + res= mysql_assign_to_keycache(thd, first_table, &lex->ident); break; } case SQLCOM_PRELOAD_KEYS: { - if (check_db_used(thd, tables) || - check_access(thd, INDEX_ACL, tables->db, - &tables->grant.privilege, 0, 0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_access(thd, INDEX_ACL, first_table->db, + &first_table->grant.privilege, 0, 0, + test(first_table->schema_table))) goto error; - res = mysql_preload_keys(thd, tables); + res = mysql_preload_keys(thd, first_table); break; } #ifdef HAVE_REPLICATION @@ -2421,7 +2855,7 @@ mysql_execute_command(THD *thd) if (check_global_access(thd, SUPER_ACL)) goto error; if (end_active_trans(thd)) - res= -1; + goto error; else res = load_master_data(thd); break; @@ -2441,23 +2875,33 @@ mysql_execute_command(THD *thd) res = innodb_show_status(thd); break; } + case SQLCOM_SHOW_MUTEX_STATUS: + { + if (check_global_access(thd, SUPER_ACL)) + goto error; + res = innodb_mutex_show_status(thd); + break; + } #endif #ifdef HAVE_REPLICATION case SQLCOM_LOAD_MASTER_TABLE: { - if (!tables->db) - tables->db=thd->db; - if (check_access(thd,CREATE_ACL,tables->db,&tables->grant.privilege,0,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + DBUG_ASSERT(first_table->db); /* Must be set in the parser */ + + if (check_access(thd, CREATE_ACL, first_table->db, + &first_table->grant.privilege, 0, 0, + test(first_table->schema_table))) goto error; /* purecov: inspected */ if (grant_option) { /* Check that the first table has CREATE privilege */ - if (check_grant(thd, CREATE_ACL, tables, 0, 1, 0)) + if (check_grant(thd, CREATE_ACL, all_tables, 0, 1, 0)) goto error; } - if (strlen(tables->real_name) > NAME_LEN) + if (strlen(first_table->table_name) > NAME_LEN) { - net_printf(thd,ER_WRONG_TABLE_NAME, tables->real_name); + my_error(ER_WRONG_TABLE_NAME, MYF(0), first_table->table_name); break; } pthread_mutex_lock(&LOCK_active_mi); @@ -2465,7 +2909,7 @@ mysql_execute_command(THD *thd) fetch_master_table will send the error to the client on failure. Give error if the table already exists. */ - if (!fetch_master_table(thd, tables->db, tables->real_name, + if (!fetch_master_table(thd, first_table->db, first_table->table_name, active_mi, 0, 0)) { send_ok(thd); @@ -2491,8 +2935,11 @@ mysql_execute_command(THD *thd) /* So that CREATE TEMPORARY TABLE gets to binlog at commit/rollback */ thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; } - /* Skip first table, which is the table we are creating */ - TABLE_LIST *create_table, *create_table_local; + DBUG_ASSERT(first_table == all_tables && first_table != 0); + bool link_to_local; + // Skip first table, which is the table we are creating + TABLE_LIST *create_table= lex->unlink_first_table(&link_to_local); + TABLE_LIST *select_tables= lex->query_tables; /* Code below (especially in mysql_create_table() and select_create methods) may modify HA_CREATE_INFO structure in LEX, so we have to @@ -2507,15 +2954,14 @@ mysql_execute_command(THD *thd) { /* out of memory when creating a copy of alter_info */ res= 1; - goto unsent_create_error; + goto end_with_restore_list; } - tables= lex->unlink_first_table(tables, &create_table, - &create_table_local); - if ((res= create_table_precheck(thd, tables, create_table))) - goto unsent_create_error; + if ((res= create_table_precheck(thd, select_tables, create_table))) + goto end_with_restore_list; create_info.alias= create_table->alias; + #ifndef HAVE_READLINK if (create_info.data_file_name) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, @@ -2527,16 +2973,13 @@ mysql_execute_command(THD *thd) #else /* Fix names if symlinked tables */ if (append_file_to_dir(thd, &create_info.data_file_name, - create_table->real_name) || + create_table->table_name) || append_file_to_dir(thd, &create_info.index_file_name, - create_table->real_name)) - { - res=-1; - goto unsent_create_error; - } + create_table->table_name)) + goto end_with_restore_list; #endif /* - If we are using SET CHARSET without DEFAULT, add an implicite + If we are using SET CHARSET without DEFAULT, add an implicit DEFAULT to not confuse old users. (This may change). */ if ((create_info.used_fields & @@ -2561,70 +3004,91 @@ mysql_execute_command(THD *thd) TABLE in the same way. That way we avoid that a new table is created during a gobal read lock. */ - if (wait_if_global_read_lock(thd, 0, 1)) + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) { - res= -1; - goto unsent_create_error; + res= 1; + goto end_with_restore_list; } if (select_lex->item_list.elements) // With select { - select_result *result; + select_result *sel_result; select_lex->options|= SELECT_NO_UNLOCK; - unit->offset_limit_cnt= select_lex->offset_limit; - unit->select_limit_cnt= select_lex->select_limit+ - select_lex->offset_limit; - if (unit->select_limit_cnt < select_lex->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // No limit + unit->set_limit(select_lex); - if (!(res=open_and_lock_tables(thd,tables))) + if (!(res= open_and_lock_tables(thd, select_tables))) { - res= -1; // If error + /* + Is table which we are changing used somewhere in other parts + of query + */ + if (!(create_info.options & HA_LEX_CREATE_TMP_TABLE)) + { + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, create_table, select_tables, 0))) + { + update_non_unique_table_error(create_table, "CREATE", duplicate); + res= 1; + goto end_with_restore_list; + } + } + /* If we create merge table, we have to test tables in merge, too */ + if (create_info.used_fields & HA_CREATE_USED_UNION) + { + TABLE_LIST *tab; + for (tab= (TABLE_LIST*) create_info.merge_list.first; + tab; + tab= tab->next_local) + { + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, tab, select_tables, 0))) + { + update_non_unique_table_error(tab, "CREATE", duplicate); + res= 1; + goto end_with_restore_list; + } + } + } /* select_create is currently not re-execution friendly and needs to be created for every execution of a PS/SP. */ - if ((result=new select_create(create_table->db, - create_table->real_name, - &create_info, - &alter_info, - select_lex->item_list, lex->duplicates, - lex->ignore))) + if ((sel_result= new select_create(create_table, + &create_info, + &alter_info, + select_lex->item_list, + lex->duplicates, + lex->ignore))) { /* CREATE from SELECT give its SELECT_LEX for SELECT, and item_list belong to SELECT */ - select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; - res=handle_select(thd, lex, result); - select_lex->resolve_mode= SELECT_LEX::NOMATTER_MODE; + res= handle_select(thd, lex, sel_result, 0); + delete sel_result; } } } - else // regular create + else { + /* regular create */ if (lex->name) res= mysql_create_like_table(thd, create_table, &create_info, - (Table_ident *)lex->name); + (Table_ident *)lex->name); else { res= mysql_create_table(thd, create_table->db, - create_table->real_name, &create_info, + create_table->table_name, &create_info, &alter_info, 0, 0); } if (!res) send_ok(thd); } - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); -unsent_create_error: - // put tables back for PS rexecuting - tables= lex->link_first_table_back(tables, create_table, - create_table_local); + /* put tables back for PS rexecuting */ +end_with_restore_list: + lex->link_first_table_back(create_table, link_to_local); break; } case SQLCOM_CREATE_INDEX: @@ -2649,9 +3113,9 @@ unsent_create_error: if (thd->is_fatal_error) /* out of memory creating a copy of alter_info*/ goto error; - if (check_one_table_access(thd, INDEX_ACL, tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_one_table_access(thd, INDEX_ACL, first_table)) goto error; /* purecov: inspected */ - thd->enable_slow_log= opt_log_slow_admin_statements; if (end_active_trans(thd)) goto error; /* @@ -2666,9 +3130,9 @@ unsent_create_error: create_info.row_type= ROW_TYPE_NOT_USED; create_info.default_table_charset= thd->variables.collation_database; - res= mysql_alter_table(thd, tables->db, tables->real_name, - &create_info, tables, &alter_info, - 0, (ORDER*)0, DUP_ERROR, 0); + res= mysql_alter_table(thd, first_table->db, first_table->table_name, + &create_info, first_table, &alter_info, + 0, (ORDER*) 0, 0); break; } #ifdef HAVE_REPLICATION @@ -2695,8 +3159,9 @@ unsent_create_error: */ if (thd->locked_tables || thd->active_transaction() || thd->global_read_lock) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); - break; + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); + goto error; } { pthread_mutex_lock(&LOCK_active_mi); @@ -2707,10 +3172,7 @@ unsent_create_error: #endif /* HAVE_REPLICATION */ case SQLCOM_ALTER_TABLE: -#if defined(DONT_ALLOW_SHOW_COMMANDS) - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - break; -#else + DBUG_ASSERT(first_table == all_tables && first_table != 0); { ulong priv=0; /* @@ -2726,44 +3188,29 @@ unsent_create_error: goto error; if (lex->name && (!lex->name[0] || strlen(lex->name) > NAME_LEN)) { - net_printf(thd, ER_WRONG_TABLE_NAME, lex->name); - res= 1; - break; - } - if (!select_lex->db) - { - /* - In the case of ALTER TABLE ... RENAME we should supply the - default database if the new name is not explicitly qualified - by a database. (Bug #11493) - */ - if (alter_info.flags & ALTER_RENAME) - { - if (! thd->db) - { - send_error(thd,ER_NO_DB_ERROR); - goto error; - } - select_lex->db= thd->db; - } - else - select_lex->db=tables->db; + my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name); + goto error; } - if (check_access(thd,ALTER_ACL,tables->db,&tables->grant.privilege,0,0) || - check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0)|| - check_merge_table_access(thd, tables->db, + /* Must be set in the parser */ + DBUG_ASSERT(select_lex->db); + if (check_access(thd, ALTER_ACL, first_table->db, + &first_table->grant.privilege, 0, 0, + test(first_table->schema_table)) || + check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0, + is_schema_db(select_lex->db))|| + check_merge_table_access(thd, first_table->db, (TABLE_LIST *) create_info.merge_list.first)) goto error; /* purecov: inspected */ if (grant_option) { - if (check_grant(thd, ALTER_ACL, tables, 0, UINT_MAX, 0)) + if (check_grant(thd, ALTER_ACL, all_tables, 0, UINT_MAX, 0)) goto error; if (lex->name && !test_all_bits(priv,INSERT_ACL | CREATE_ACL)) { // Rename of table TABLE_LIST tmp_table; bzero((char*) &tmp_table,sizeof(tmp_table)); - tmp_table.real_name=lex->name; + tmp_table.table_name=lex->name; tmp_table.db=select_lex->db; tmp_table.grant.privilege=priv; if (check_grant(thd, INSERT_ACL | CREATE_ACL, &tmp_table, 0, @@ -2781,63 +3228,68 @@ unsent_create_error: create_info.data_file_name= create_info.index_file_name= NULL; /* ALTER TABLE ends previous transaction */ if (end_active_trans(thd)) - res= -1; + goto error; else { + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_alter_table(thd, select_lex->db, lex->name, &create_info, - tables, + first_table, &alter_info, select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, - lex->duplicates, lex->ignore); + lex->ignore); } break; } -#endif /*DONT_ALLOW_SHOW_COMMANDS*/ case SQLCOM_RENAME_TABLE: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); TABLE_LIST *table; - if (check_db_used(thd,tables)) + if (check_db_used(thd, all_tables)) goto error; - for (table=tables ; table ; table=table->next->next) + for (table= first_table; table; table= table->next_local->next_local) { if (check_access(thd, ALTER_ACL | DROP_ACL, table->db, - &table->grant.privilege,0,0) || - check_access(thd, INSERT_ACL | CREATE_ACL, table->next->db, - &table->next->grant.privilege,0,0)) + &table->grant.privilege,0,0, test(table->schema_table)) || + check_access(thd, INSERT_ACL | CREATE_ACL, table->next_local->db, + &table->next_local->grant.privilege, 0, 0, + test(table->next_local->schema_table))) goto error; if (grant_option) { - TABLE_LIST old_list,new_list; + TABLE_LIST old_list, new_list; /* we do not need initialize old_list and new_list because we will come table[0] and table->next[0] there */ - old_list=table[0]; - new_list=table->next[0]; - old_list.next=new_list.next=0; - if (check_grant(thd, ALTER_ACL, &old_list, 0, UINT_MAX, 0) || - (!test_all_bits(table->next->grant.privilege, + old_list= table[0]; + new_list= table->next_local[0]; + if (check_grant(thd, ALTER_ACL, &old_list, 0, 1, 0) || + (!test_all_bits(table->next_local->grant.privilege, INSERT_ACL | CREATE_ACL) && - check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, - UINT_MAX, 0))) + check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, 1, 0))) goto error; } } - query_cache_invalidate3(thd, tables, 0); - if (end_active_trans(thd)) - res= -1; - else if (mysql_rename_tables(thd,tables)) - res= -1; + query_cache_invalidate3(thd, first_table, 0); + if (end_active_trans(thd) || mysql_rename_tables(thd, first_table)) + goto error; break; } #ifndef EMBEDDED_LIBRARY case SQLCOM_SHOW_BINLOGS: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ + goto error; #else { if (check_global_access(thd, SUPER_ACL)) @@ -2848,40 +3300,49 @@ unsent_create_error: #endif #endif /* EMBEDDED_LIBRARY */ case SQLCOM_SHOW_CREATE: + DBUG_ASSERT(first_table == all_tables && first_table != 0); #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ + goto error; #else { - if (check_db_used(thd, tables) || - check_access(thd, SELECT_ACL | EXTRA_ACL, tables->db, - &tables->grant.privilege,0,0)) + /* Ignore temporary tables if this is "SHOW CREATE VIEW" */ + if (lex->only_view) + first_table->skip_temporary= 1; + + if (check_db_used(thd, all_tables) || + check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db, + &first_table->grant.privilege, 0, 0, + test(first_table->schema_table))) goto error; - if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) + if (grant_option && check_grant(thd, SELECT_ACL, all_tables, 2, UINT_MAX, 0)) goto error; - res= mysqld_show_create(thd, tables); + res= mysqld_show_create(thd, first_table); break; } #endif case SQLCOM_CHECKSUM: { - if (check_db_used(thd,tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL , tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0)) goto error; /* purecov: inspected */ - res = mysql_checksum_table(thd, tables, &lex->check_opt); + res = mysql_checksum_table(thd, first_table, &lex->check_opt); break; } case SQLCOM_REPAIR: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; - res = mysql_repair_table(thd, tables, &lex->check_opt); + res= mysql_repair_table(thd, first_table, &lex->check_opt); /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); + /* Presumably, REPAIR and binlog writing doesn't require synchronization */ if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -2889,28 +3350,34 @@ unsent_create_error: mysql_bin_log.write(&qinfo); } } + select_lex->table_list.first= (byte*) first_table; + lex->query_tables=all_tables; break; } case SQLCOM_CHECK: { - if (check_db_used(thd,tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL , tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; - res = mysql_check_table(thd, tables, &lex->check_opt); + res = mysql_check_table(thd, first_table, &lex->check_opt); + select_lex->table_list.first= (byte*) first_table; + lex->query_tables=all_tables; break; } case SQLCOM_ANALYZE: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; - res = mysql_analyze_table(thd, tables, &lex->check_opt); + res = mysql_analyze_table(thd, first_table, &lex->check_opt); /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); + /* Presumably, ANALYZE and binlog writing doesn't require synchronization */ if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -2918,22 +3385,25 @@ unsent_create_error: mysql_bin_log.write(&qinfo); } } + select_lex->table_list.first= (byte*) first_table; + lex->query_tables=all_tables; break; } case SQLCOM_OPTIMIZE: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ? - mysql_recreate_table(thd, tables) : - mysql_optimize_table(thd, tables, &lex->check_opt); + mysql_recreate_table(thd, first_table) : + mysql_optimize_table(thd, first_table, &lex->check_opt); /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); + /* Presumably, OPTIMIZE and binlog writing doesn't require synchronization */ if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -2941,141 +3411,186 @@ unsent_create_error: mysql_bin_log.write(&qinfo); } } + select_lex->table_list.first= (byte*) first_table; + lex->query_tables=all_tables; break; } case SQLCOM_UPDATE: - if (update_precheck(thd, tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (update_precheck(thd, all_tables)) break; - res= mysql_update(thd,tables, - select_lex->item_list, - lex->value_list, - select_lex->where, - select_lex->order_list.elements, - (ORDER *) select_lex->order_list.first, - select_lex->select_limit, - lex->duplicates, lex->ignore); - if (thd->net.report_error) - res= -1; - break; + DBUG_ASSERT(select_lex->offset_limit == 0); + unit->set_limit(select_lex); + res= (up_result= mysql_update(thd, all_tables, + select_lex->item_list, + lex->value_list, + select_lex->where, + select_lex->order_list.elements, + (ORDER *) select_lex->order_list.first, + unit->select_limit_cnt, + lex->duplicates, lex->ignore)); + /* mysql_update return 2 if we need to switch to multi-update */ + if (up_result != 2) + break; + /* Fall through */ case SQLCOM_UPDATE_MULTI: { - if ((res= multi_update_precheck(thd, tables))) - break; + DBUG_ASSERT(first_table == all_tables && first_table != 0); + /* if we switched from normal update, rights are checked */ + if (up_result != 2) + { + if ((res= multi_update_precheck(thd, all_tables))) + break; + } + else + res= 0; + + res= mysql_multi_update_prepare(thd); - res= mysql_multi_update_lock(thd, tables, &select_lex->item_list, - select_lex); #ifdef HAVE_REPLICATION /* Check slave filtering rules */ - if (thd->slave_thread) - if (all_tables_not_ok(thd,tables)) + if (unlikely(thd->slave_thread)) + { + if (all_tables_not_ok(thd, all_tables)) { if (res!= 0) { res= 0; /* don't care of prev failure */ thd->clear_error(); /* filters are of highest prior */ } - /* we warn the slave SQL thread */ - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); - break; + /* we warn the slave SQL thread */ + my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + break; } + if (res) + break; + } + else + { #endif /* HAVE_REPLICATION */ - if (res) - break; - - res= mysql_multi_update(thd,tables, - &select_lex->item_list, - &lex->value_list, - select_lex->where, - select_lex->options, - lex->duplicates, lex->ignore, unit, select_lex); + if (res) + break; + if (opt_readonly && + !(thd->security_ctx->master_access & SUPER_ACL) && + some_non_temp_table_to_be_updated(thd, all_tables)) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only"); + break; + } +#ifdef HAVE_REPLICATION + } /* unlikely */ +#endif + + res= mysql_multi_update(thd, all_tables, + &select_lex->item_list, + &lex->value_list, + select_lex->where, + select_lex->options, + lex->duplicates, lex->ignore, unit, select_lex); break; } case SQLCOM_REPLACE: case SQLCOM_INSERT: { - if ((res= insert_precheck(thd, tables))) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if ((res= insert_precheck(thd, all_tables))) break; - res= mysql_insert(thd,tables,lex->field_list,lex->many_values, - lex->update_list, lex->value_list, + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + + res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values, + lex->update_list, lex->value_list, lex->duplicates, lex->ignore); - if (thd->net.report_error) - res= -1; + + /* + If we have inserted into a VIEW, and the base table has + AUTO_INCREMENT column, but this column is not accessible through + a view, then we should restore LAST_INSERT_ID to the value it + had before the statement. + */ + if (first_table->view && !first_table->contain_auto_increment) + thd->last_insert_id= thd->current_insert_id; + break; } case SQLCOM_REPLACE_SELECT: case SQLCOM_INSERT_SELECT: { - TABLE_LIST *first_local_table= (TABLE_LIST *) select_lex->table_list.first; - TABLE_LIST dup_tables; - TABLE *insert_table; - if ((res= insert_precheck(thd, tables))) + select_result *sel_result; + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if ((res= insert_precheck(thd, all_tables))) break; /* Fix lock for first table */ - if (tables->lock_type == TL_WRITE_DELAYED) - tables->lock_type= TL_WRITE; + if (first_table->lock_type == TL_WRITE_DELAYED) + first_table->lock_type= TL_WRITE; /* Don't unlock tables until command is written to binary log */ select_lex->options|= SELECT_NO_UNLOCK; - select_result *result; - unit->offset_limit_cnt= select_lex->offset_limit; - unit->select_limit_cnt= select_lex->select_limit+select_lex->offset_limit; - if (unit->select_limit_cnt < select_lex->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // No limit - - if ((res= open_and_lock_tables(thd, tables))) - break; + unit->set_limit(select_lex); - insert_table= tables->table; - /* MERGE sub-tables can only be detected after open. */ - if (mysql_lock_have_duplicate(thd, insert_table, tables->next)) + if (! thd->locked_tables && + ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1))) { - /* Using same table for INSERT and SELECT */ - select_lex->options |= OPTION_BUFFER_RESULT; + res= 1; + break; } - /* Skip first table, which is the table we are inserting in */ - select_lex->table_list.first= (byte*) first_local_table->next; - tables= (TABLE_LIST *) select_lex->table_list.first; - dup_tables= *first_local_table; - first_local_table->next= 0; - if (select_lex->group_list.elements != 0) - { - /* - When we are using GROUP BY we can't refere to other tables in the - ON DUPLICATE KEY part - */ - dup_tables.next= 0; - } - - if (!(res= mysql_prepare_insert(thd, tables, first_local_table, - &dup_tables, insert_table, - lex->field_list, 0, - lex->update_list, lex->value_list, - lex->duplicates)) && - (result= new select_insert(insert_table, first_local_table, - &dup_tables, &lex->field_list, - &lex->update_list, &lex->value_list, - lex->duplicates, lex->ignore))) - { - /* - insert/replace from SELECT give its SELECT_LEX for SELECT, - and item_list belong to SELECT - */ - lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; - res= handle_select(thd, lex, result); + if (!(res= open_and_lock_tables(thd, all_tables))) + { + /* Skip first table, which is the table we are inserting in */ + TABLE_LIST *second_table= first_table->next_local; + select_lex->table_list.first= (byte*) second_table; + select_lex->context.table_list= + select_lex->context.first_name_resolution_table= second_table; + res= mysql_insert_select_prepare(thd); + if (!res && (sel_result= new select_insert(first_table, + first_table->table, + &lex->field_list, + &lex->update_list, + &lex->value_list, + lex->duplicates, + lex->ignore))) + { + res= handle_select(thd, lex, sel_result, OPTION_SETUP_TABLES_DONE); + /* + Invalidate the table in the query cache if something changed + after unlocking when changes become visible. + TODO: this is workaround. right way will be move invalidating in + the unlock procedure. + */ + if (first_table->lock_type == TL_WRITE_CONCURRENT_INSERT && + thd->lock) + { + /* INSERT ... SELECT should invalidate only the very first table */ + TABLE_LIST *save_table= first_table->next_local; + first_table->next_local= 0; + mysql_unlock_tables(thd, thd->lock); + query_cache_invalidate3(thd, first_table, 1); + first_table->next_local= save_table; + thd->lock=0; + } + delete sel_result; + } /* revert changes for SP */ - lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; - delete result; - if (thd->net.report_error) - res= -1; + select_lex->table_list.first= (byte*) first_table; } - else - res= -1; - insert_table->insert_values= 0; // Set by mysql_prepare_insert() - first_local_table->next= tables; - lex->select_lex.table_list.first= (byte*) first_local_table; + + /* + If we have inserted into a VIEW, and the base table has + AUTO_INCREMENT column, but this column is not accessible through + a view, then we should restore LAST_INSERT_ID to the value it + had before the statement. + */ + if (first_table->view && !first_table->contain_auto_increment) + thd->last_insert_id= thd->current_insert_id; + break; } case SQLCOM_TRUNCATE: @@ -3084,7 +3599,8 @@ unsent_create_error: res= -1; break; } - if (check_one_table_access(thd, DELETE_ACL, tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_one_table_access(thd, DELETE_ACL, all_tables)) goto error; /* Don't allow this within a transaction because we want to use @@ -3092,68 +3608,66 @@ unsent_create_error: */ if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION,NullS); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } - res=mysql_truncate(thd, tables, 0); + + res= mysql_truncate(thd, first_table, 0); break; case SQLCOM_DELETE: { - if ((res= delete_precheck(thd, tables))) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if ((res= delete_precheck(thd, all_tables))) break; - res = mysql_delete(thd,tables, select_lex->where, + DBUG_ASSERT(select_lex->offset_limit == 0); + unit->set_limit(select_lex); + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + + res = mysql_delete(thd, all_tables, select_lex->where, &select_lex->order_list, - select_lex->select_limit, select_lex->options); - if (thd->net.report_error) - res= -1; + unit->select_limit_cnt, select_lex->options, + FALSE); break; } case SQLCOM_DELETE_MULTI: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); TABLE_LIST *aux_tables= - (TABLE_LIST *)thd->lex->auxilliary_table_list.first; - TABLE_LIST *target_tbl; - uint table_count; - multi_delete *result; + (TABLE_LIST *)thd->lex->auxiliary_table_list.first; + multi_delete *del_result; - if ((res= multi_delete_precheck(thd, tables, &table_count))) + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + + if ((res= multi_delete_precheck(thd, all_tables))) break; /* condition will be TRUE on SP re-excuting */ if (select_lex->item_list.elements != 0) select_lex->item_list.empty(); if (add_item_to_list(thd, new Item_null())) - { - res= -1; - break; - } + goto error; thd->proc_info="init"; - if ((res=open_and_lock_tables(thd,tables))) + if ((res= open_and_lock_tables(thd, all_tables))) break; - /* Fix tables-to-be-deleted-from list to point at opened tables */ - for (target_tbl= (TABLE_LIST*) aux_tables; - target_tbl; - target_tbl= target_tbl->next) - { - TABLE_LIST *orig= target_tbl->table_list; - target_tbl->table= orig->table; - /* - Multi-delete can't be constructed over-union => we always have - single SELECT on top and have to check underlying SELECTs of it - */ - if (lex->select_lex.check_updateable_in_subqueries(orig->db, - orig->real_name)) - { - my_error(ER_UPDATE_TABLE_USED, MYF(0), - orig->real_name); - res= -1; - break; - } - } - if (!res && !thd->is_fatal_error && - (result= new multi_delete(thd,aux_tables, table_count))) + if ((res= mysql_multi_delete_prepare(thd))) + goto error; + + if (!thd->is_fatal_error && + (del_result= new multi_delete(aux_tables, lex->table_count))) { res= mysql_select(thd, &select_lex->ref_pointer_array, select_lex->get_table_list(), @@ -3163,28 +3677,24 @@ unsent_create_error: 0, (ORDER *)NULL, (ORDER *)NULL, (Item *)NULL, (ORDER *)NULL, select_lex->options | thd->options | - SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, - result, unit, select_lex); - if (thd->net.report_error) - res= -1; - delete result; + SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK | + OPTION_SETUP_TABLES_DONE, + del_result, unit, select_lex); + delete del_result; } else - res= -1; // Error is not sent - close_thread_tables(thd); + res= TRUE; // Error break; } case SQLCOM_DROP_TABLE: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); if (!lex->drop_temporary) { - if (check_table_access(thd,DROP_ACL,tables,0)) + if (check_table_access(thd, DROP_ACL, all_tables, 0)) goto error; /* purecov: inspected */ if (end_active_trans(thd)) - { - res= -1; - break; - } + goto error; } else { @@ -3202,26 +3712,20 @@ unsent_create_error: /* So that DROP TEMPORARY TABLE gets to binlog at commit/rollback */ thd->options|= OPTION_STATUS_NO_TRANS_UPDATE; } - res= mysql_rm_table(thd,tables,lex->drop_if_exists, lex->drop_temporary); + /* DDL and binlog write order protected by LOCK_open */ + res= mysql_rm_table(thd, first_table, lex->drop_if_exists, + lex->drop_temporary); } break; - case SQLCOM_SHOW_DATABASES: -#if defined(DONT_ALLOW_SHOW_COMMANDS) - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - if ((specialflag & SPECIAL_SKIP_SHOW_DB) && - check_global_access(thd, SHOW_DB_ACL)) - goto error; - res= mysqld_show_dbs(thd, (lex->wild ? lex->wild->ptr() : NullS)); - break; -#endif case SQLCOM_SHOW_PROCESSLIST: - if (!thd->priv_user[0] && check_global_access(thd,PROCESS_ACL)) + if (!thd->security_ctx->priv_user[0] && + check_global_access(thd,PROCESS_ACL)) break; mysqld_list_processes(thd, - thd->master_access & PROCESS_ACL ? NullS : - thd->priv_user,lex->verbose); + (thd->security_ctx->master_access & PROCESS_ACL ? + NullS : + thd->security_ctx->priv_user), + lex->verbose); break; case SQLCOM_SHOW_STORAGE_ENGINES: res= mysqld_show_storage_engines(thd); @@ -3232,155 +3736,60 @@ unsent_create_error: case SQLCOM_SHOW_COLUMN_TYPES: res= mysqld_show_column_types(thd); break; - case SQLCOM_SHOW_STATUS: - res= mysqld_show(thd,(lex->wild ? lex->wild->ptr() : NullS),status_vars, - OPT_GLOBAL, &LOCK_status); - break; - case SQLCOM_SHOW_VARIABLES: - res= mysqld_show(thd, (lex->wild ? lex->wild->ptr() : NullS), - init_vars, lex->option_type, - &LOCK_global_system_variables); - break; case SQLCOM_SHOW_LOGS: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ + goto error; #else { - if (grant_option && check_access(thd, FILE_ACL, any_db,0,0,0)) + if (grant_option && check_access(thd, FILE_ACL, any_db,0,0,0,0)) goto error; res= mysqld_show_logs(thd); break; } #endif - case SQLCOM_SHOW_TABLES: - /* FALL THROUGH */ -#ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - { - char *db=select_lex->db ? select_lex->db : thd->db; - if (!db) - { - send_error(thd,ER_NO_DB_ERROR); /* purecov: inspected */ - goto error; /* purecov: inspected */ - } - remove_escape(db); // Fix escaped '_' - if (check_db_name(db)) - { - net_printf(thd,ER_WRONG_DB_NAME, db); - goto error; - } - if (check_access(thd,SELECT_ACL,db,&thd->col_access,0,0)) - goto error; /* purecov: inspected */ - if (!thd->col_access && check_grant_db(thd,db)) - { - net_printf(thd, ER_DBACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - db); - goto error; - } - /* grant is checked in mysqld_show_tables */ - if (lex->describe) - res= mysqld_extend_show_tables(thd,db, - (lex->wild ? lex->wild->ptr() : NullS)); - else - res= mysqld_show_tables(thd,db, - (lex->wild ? lex->wild->ptr() : NullS)); - break; - } -#endif - case SQLCOM_SHOW_OPEN_TABLES: - res= mysqld_show_open_tables(thd,(lex->wild ? lex->wild->ptr() : NullS)); - break; - case SQLCOM_SHOW_CHARSETS: - res= mysqld_show_charsets(thd,(lex->wild ? lex->wild->ptr() : NullS)); - break; - case SQLCOM_SHOW_COLLATIONS: - res= mysqld_show_collations(thd,(lex->wild ? lex->wild->ptr() : NullS)); - break; - case SQLCOM_SHOW_FIELDS: -#ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - { - char *db=tables->db; - remove_escape(db); // Fix escaped '_' - remove_escape(tables->real_name); - if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, - &tables->grant.privilege, 0, 0)) - goto error; /* purecov: inspected */ - if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) - goto error; - res= mysqld_show_fields(thd,tables, - (lex->wild ? lex->wild->ptr() : NullS), - lex->verbose); - break; - } -#endif - case SQLCOM_SHOW_KEYS: -#ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - { - char *db=tables->db; - remove_escape(db); // Fix escaped '_' - remove_escape(tables->real_name); - if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, - &tables->grant.privilege, 0, 0)) - goto error; /* purecov: inspected */ - if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) - goto error; - res= mysqld_show_keys(thd,tables); - break; - } -#endif case SQLCOM_CHANGE_DB: - mysql_change_db(thd,select_lex->db); + if (!mysql_change_db(thd,select_lex->db,FALSE)) + send_ok(thd); break; case SQLCOM_LOAD: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); uint privilege= (lex->duplicates == DUP_REPLACE ? - INSERT_ACL | DELETE_ACL : INSERT_ACL); + INSERT_ACL | DELETE_ACL : INSERT_ACL) | + (lex->local_file ? 0 : FILE_ACL); - if (!lex->local_file) - { - if (check_access(thd,privilege | FILE_ACL,tables->db,0,0,0)) - goto error; - } - else + if (lex->local_file) { if (!(thd->client_capabilities & CLIENT_LOCAL_FILES) || - ! opt_local_infile) + !opt_local_infile) { - send_error(thd,ER_NOT_ALLOWED_COMMAND); + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); goto error; } - if (check_one_table_access(thd, privilege, tables)) - goto error; } - res=mysql_load(thd, lex->exchange, tables, lex->field_list, - lex->duplicates, lex->ignore, (bool) lex->local_file, lex->lock_option); + + if (check_one_table_access(thd, privilege, all_tables)) + goto error; + + res= mysql_load(thd, lex->exchange, first_table, lex->field_list, + lex->update_list, lex->value_list, lex->duplicates, + lex->ignore, (bool) lex->local_file); break; } case SQLCOM_SET_OPTION: { List<set_var_base> *lex_var_list= &lex->var_list; - if (tables && ((res= check_table_access(thd, SELECT_ACL, tables,0)) || - (res= open_and_lock_tables(thd,tables)))) - break; + if ((check_table_access(thd, SELECT_ACL, all_tables, 0) || + open_and_lock_tables(thd, all_tables))) + goto error; if (lex->one_shot_set && not_all_support_one_shot(lex_var_list)) { - my_printf_error(0, "The SET ONE_SHOT syntax is reserved for \ -purposes internal to the MySQL server", MYF(0)); - res= -1; - break; + my_error(ER_RESERVED_SYNTAX, MYF(0), "SET ONE_SHOT"); + goto error; } if (!(res= sql_set_variables(thd, lex_var_list))) { @@ -3391,8 +3800,6 @@ purposes internal to the MySQL server", MYF(0)); thd->one_shot_set|= lex->one_shot_set; send_ok(thd); } - if (thd->net.report_error) - res= -1; break; } @@ -3415,17 +3822,18 @@ purposes internal to the MySQL server", MYF(0)); break; case SQLCOM_LOCK_TABLES: unlock_locked_tables(thd); - if (check_db_used(thd,tables) || end_active_trans(thd)) + if (check_db_used(thd, all_tables) || end_active_trans(thd)) goto error; - if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, tables,0)) + if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, 0)) goto error; thd->in_lock_tables=1; thd->options|= OPTION_TABLE_LOCK; - if (!(res= open_and_lock_tables(thd, tables))) + + if (!(res= simple_open_n_lock_tables(thd, all_tables))) { #ifdef HAVE_QUERY_CACHE if (thd->variables.query_cache_wlock_invalidate) - query_cache.invalidate_locked_for_write(tables); + query_cache.invalidate_locked_for_write(first_table); #endif /*HAVE_QUERY_CACHE*/ thd->locked_tables=thd->lock; thd->lock=0; @@ -3451,26 +3859,27 @@ purposes internal to the MySQL server", MYF(0)); char *alias; if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name)) { - net_printf(thd,ER_WRONG_DB_NAME, lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } /* If in a slave thread : CREATE DATABASE DB was certainly not preceded by USE DB. - For that reason, db_ok() in sql/slave.cc did not check the + For that reason, db_ok() in sql/slave.cc did not check the do_db/ignore_db. And as this query involves no tables, tables_ok() above was not called. So we have to check rules again here. */ #ifdef HAVE_REPLICATION - if (thd->slave_thread && + if (thd->slave_thread && (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(lex->name))) { - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif - if (check_access(thd,CREATE_ACL,lex->name,0,1,0)) + + if (check_access(thd,CREATE_ACL,lex->name,0,1,0,is_schema_db(lex->name))) break; res= mysql_create_db(thd,(lower_case_table_names == 2 ? alias : lex->name), &create_info, 0); @@ -3483,10 +3892,9 @@ purposes internal to the MySQL server", MYF(0)); res= -1; break; } - char *alias; - if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name)) + if (check_db_name(lex->name)) { - net_printf(thd, ER_WRONG_DB_NAME, lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } /* @@ -3501,32 +3909,28 @@ purposes internal to the MySQL server", MYF(0)); (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(lex->name))) { - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif - if (check_access(thd,DROP_ACL,lex->name,0,1,0)) + if (check_access(thd,DROP_ACL,lex->name,0,1,0,is_schema_db(lex->name))) break; if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } - res=mysql_rm_db(thd, (lower_case_table_names == 2 ? alias : lex->name), - lex->drop_if_exists, 0); + res= mysql_rm_db(thd, lex->name, lex->drop_if_exists, 0); break; } case SQLCOM_ALTER_DB: { - char *db= lex->name ? lex->name : thd->db; - if (!db) - { - send_error(thd, ER_NO_DB_ERROR); - goto error; - } + char *db= lex->name; + DBUG_ASSERT(db); /* Must be set in the parser */ if (!strip_sp(db) || check_db_name(db)) { - net_printf(thd, ER_WRONG_DB_NAME, db); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } /* @@ -3541,15 +3945,16 @@ purposes internal to the MySQL server", MYF(0)); (!db_ok(db, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(db))) { - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif - if (check_access(thd, ALTER_ACL, db, 0, 1, 0)) + if (check_access(thd, ALTER_ACL, db, 0, 1, 0, is_schema_db(db))) break; if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } res= mysql_alter_db(thd, db, &lex->create_info); @@ -3559,161 +3964,179 @@ purposes internal to the MySQL server", MYF(0)); { if (!strip_sp(lex->name) || check_db_name(lex->name)) { - net_printf(thd,ER_WRONG_DB_NAME, lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } - if (check_access(thd,SELECT_ACL,lex->name,0,1,0)) + if (check_access(thd,SELECT_ACL,lex->name,0,1,0,is_schema_db(lex->name))) break; res=mysqld_show_create_db(thd,lex->name,&lex->create_info); break; } - case SQLCOM_CREATE_FUNCTION: - if (check_access(thd,INSERT_ACL,"mysql",0,1,0)) + case SQLCOM_CREATE_FUNCTION: // UDF function + { + if (check_access(thd,INSERT_ACL,"mysql",0,1,0,0)) break; #ifdef HAVE_DLOPEN - if (!(res = mysql_create_function(thd,&lex->udf))) + if (sp_find_routine(thd, TYPE_ENUM_FUNCTION, lex->spname, + &thd->sp_func_cache, FALSE)) + { + my_error(ER_UDF_EXISTS, MYF(0), lex->spname->m_name.str); + goto error; + } + if (!(res = mysql_create_function(thd, &lex->udf))) send_ok(thd); #else - net_printf(thd, ER_CANT_OPEN_LIBRARY, lex->udf.dl, 0, "feature disabled"); - res= -1; + my_error(ER_CANT_OPEN_LIBRARY, MYF(0), lex->udf.dl, 0, "feature disabled"); + res= TRUE; #endif break; - case SQLCOM_DROP_FUNCTION: - if (check_access(thd,DELETE_ACL,"mysql",0,1,0)) + } +#ifndef NO_EMBEDDED_ACCESS_CHECKS + case SQLCOM_CREATE_USER: + { + if (check_access(thd, INSERT_ACL, "mysql", 0, 1, 1, 0) && + check_global_access(thd,CREATE_USER_ACL)) break; -#ifdef HAVE_DLOPEN - if (!(res = mysql_drop_function(thd,&lex->udf.name))) + if (end_active_trans(thd)) + goto error; + /* Conditionally writes to binlog */ + if (!(res= mysql_create_user(thd, lex->users_list))) send_ok(thd); -#else - res= -1; -#endif break; -#ifndef NO_EMBEDDED_ACCESS_CHECKS + } case SQLCOM_DROP_USER: { - if (check_access(thd, GRANT_ACL,"mysql",0,1,0)) + if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 1, 0) && + check_global_access(thd,CREATE_USER_ACL)) break; + if (end_active_trans(thd)) + goto error; + /* Conditionally writes to binlog */ if (!(res= mysql_drop_user(thd, lex->users_list))) - { - mysql_update_log.write(thd, thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); - mysql_bin_log.write(&qinfo); - } send_ok(thd); - } + break; + } + case SQLCOM_RENAME_USER: + { + if (check_access(thd, UPDATE_ACL, "mysql", 0, 1, 1, 0) && + check_global_access(thd,CREATE_USER_ACL)) + break; + if (end_active_trans(thd)) + goto error; + /* Conditionally writes to binlog */ + if (!(res= mysql_rename_user(thd, lex->users_list))) + send_ok(thd); break; } case SQLCOM_REVOKE_ALL: { - if (check_access(thd, GRANT_ACL ,"mysql",0,1,0)) + if (check_access(thd, UPDATE_ACL, "mysql", 0, 1, 1, 0) && + check_global_access(thd,CREATE_USER_ACL)) break; + /* Conditionally writes to binlog */ if (!(res = mysql_revoke_all(thd, lex->users_list))) - { - mysql_update_log.write(thd, thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); - mysql_bin_log.write(&qinfo); - } send_ok(thd); - } break; } case SQLCOM_REVOKE: case SQLCOM_GRANT: { if (check_access(thd, lex->grant | lex->grant_tot_col | GRANT_ACL, - tables ? tables->db : select_lex->db, - tables ? &tables->grant.privilege : 0, - tables ? 0 : 1, 0)) + first_table ? first_table->db : select_lex->db, + first_table ? &first_table->grant.privilege : 0, + first_table ? 0 : 1, 0, + first_table ? (bool) first_table->schema_table : + select_lex->db ? is_schema_db(select_lex->db) : 0)) goto error; - /* - Check that the user isn't trying to change a password for another - user if he doesn't have UPDATE privilege to the MySQL database - */ - - if (thd->user) // If not replication + if (thd->security_ctx->user) // If not replication { - LEX_USER *user; + LEX_USER *user, *tmp_user; + List_iterator <LEX_USER> user_list(lex->users_list); - while ((user=user_list++)) + while ((tmp_user= user_list++)) { - if (user->password.str && - (strcmp(thd->user,user->user.str) || - user->host.str && - my_strcasecmp(&my_charset_latin1, - user->host.str, thd->host_or_ip))) - { - if (check_access(thd, UPDATE_ACL, "mysql", 0, 1, 1)) - { - send_error(thd, ER_PASSWORD_NOT_ALLOWED); - goto error; - } - break; // We are allowed to do global changes - } + if (!(user= get_current_user(thd, tmp_user))) + goto error; + if (specialflag & SPECIAL_NO_RESOLVE && + hostname_requires_resolving(user->host.str)) + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_HOSTNAME_WONT_WORK, + ER(ER_WARN_HOSTNAME_WONT_WORK), + user->host.str); + // Are we trying to change a password of another user + DBUG_ASSERT(user->host.str != 0); + if (strcmp(thd->security_ctx->user, user->user.str) || + my_strcasecmp(system_charset_info, + user->host.str, thd->security_ctx->host_or_ip)) + { + // TODO: use check_change_password() + if (is_acl_user(user->host.str, user->user.str) && + user->password.str && + check_access(thd, UPDATE_ACL,"mysql",0,1,1,0)) + { + my_message(ER_PASSWORD_NOT_ALLOWED, + ER(ER_PASSWORD_NOT_ALLOWED), MYF(0)); + goto error; + } + } } } - if (specialflag & SPECIAL_NO_RESOLVE) + if (first_table) { - LEX_USER *user; - List_iterator <LEX_USER> user_list(lex->users_list); - while ((user=user_list++)) + if (lex->type == TYPE_ENUM_PROCEDURE || + lex->type == TYPE_ENUM_FUNCTION) { - if (hostname_requires_resolving(user->host.str)) - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_HOSTNAME_WONT_WORK, - ER(ER_WARN_HOSTNAME_WONT_WORK), - user->host.str); + uint grants= lex->all_privileges + ? (PROC_ACLS & ~GRANT_ACL) | (lex->grant & GRANT_ACL) + : lex->grant; + if (grant_option && + check_grant_routine(thd, grants | GRANT_ACL, all_tables, + lex->type == TYPE_ENUM_PROCEDURE, 0)) + goto error; + /* Conditionally writes to binlog */ + res= mysql_routine_grant(thd, all_tables, + lex->type == TYPE_ENUM_PROCEDURE, + lex->users_list, grants, + lex->sql_command == SQLCOM_REVOKE, 0); } - } - if (tables) - { - if (grant_option && check_grant(thd, - (lex->grant | lex->grant_tot_col | - GRANT_ACL), - tables, 0, UINT_MAX, 0)) - goto error; - if (!(res = mysql_table_grant(thd,tables,lex->users_list, lex->columns, - lex->grant, - lex->sql_command == SQLCOM_REVOKE))) + else { - mysql_update_log.write(thd, thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); - mysql_bin_log.write(&qinfo); - } + if (grant_option && check_grant(thd, + (lex->grant | lex->grant_tot_col | + GRANT_ACL), + all_tables, 0, UINT_MAX, 0)) + goto error; + /* Conditionally writes to binlog */ + res= mysql_table_grant(thd, all_tables, lex->users_list, + lex->columns, lex->grant, + lex->sql_command == SQLCOM_REVOKE); } } else { - if (lex->columns.elements) + if (lex->columns.elements || lex->type) { - send_error(thd,ER_ILLEGAL_GRANT_FOR_TABLE); - res=1; + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), + MYF(0)); + goto error; } else + /* Conditionally writes to binlog */ res = mysql_grant(thd, select_lex->db, lex->users_list, lex->grant, lex->sql_command == SQLCOM_REVOKE); if (!res) { - mysql_update_log.write(thd, thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); - mysql_bin_log.write(&qinfo); - } - if (mqh_used && lex->sql_command == SQLCOM_GRANT) + if (lex->sql_command == SQLCOM_GRANT) { List_iterator <LEX_USER> str_list(lex->users_list); - LEX_USER *user; - while ((user=str_list++)) - reset_mqh(thd,user); + LEX_USER *user, *tmp_user; + while ((tmp_user=str_list++)) + { + if (!(user= get_current_user(thd, tmp_user))) + goto error; + reset_mqh(user); + } } } } @@ -3721,31 +4144,30 @@ purposes internal to the MySQL server", MYF(0)); } #endif /*!NO_EMBEDDED_ACCESS_CHECKS*/ case SQLCOM_RESET: - /* - RESET commands are never written to the binary log, so we have to - initialize this variable because RESET shares the same code as FLUSH + /* + RESET commands are never written to the binary log, so we have to + initialize this variable because RESET shares the same code as FLUSH */ lex->no_write_to_binlog= 1; case SQLCOM_FLUSH: { - if (check_global_access(thd,RELOAD_ACL) || check_db_used(thd, tables)) + bool write_to_binlog; + if (check_global_access(thd,RELOAD_ACL)) goto error; + /* reload_acl_and_cache() will tell us if we are allowed to write to the binlog or not. */ - bool write_to_binlog; - if (reload_acl_and_cache(thd, lex->type, tables, &write_to_binlog)) - send_error(thd, 0); - else + if (!reload_acl_and_cache(thd, lex->type, first_table, &write_to_binlog)) { /* We WANT to write and we CAN write. ! we write after unlocking the table. */ + /* Presumably, RESET and binlog writing doesn't require synchronization */ if (!lex->no_write_to_binlog && write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); @@ -3753,129 +4175,911 @@ purposes internal to the MySQL server", MYF(0)); } } send_ok(thd); - } + } + break; } case SQLCOM_KILL: - kill_one_thread(thd,lex->thread_id); + { + Item *it= (Item *)lex->value_list.head(); + + if ((!it->fixed && it->fix_fields(lex->thd, &it)) || it->check_cols(1)) + { + my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY), + MYF(0)); + goto error; + } + kill_one_thread(thd, (ulong)it->val_int(), lex->type & ONLY_KILL_QUERY); break; + } #ifndef NO_EMBEDDED_ACCESS_CHECKS case SQLCOM_SHOW_GRANTS: - res=0; - if ((thd->priv_user && - !strcmp(thd->priv_user,lex->grant_user->user.str)) || - !check_access(thd, SELECT_ACL, "mysql",0,1,0)) + { + LEX_USER *grant_user= get_current_user(thd, lex->grant_user); + if (!grant_user) + goto error; + if ((thd->security_ctx->priv_user && + !strcmp(thd->security_ctx->priv_user, grant_user->user.str)) || + !check_access(thd, SELECT_ACL, "mysql",0,1,0,0)) { - res = mysql_show_grants(thd,lex->grant_user); + res = mysql_show_grants(thd, grant_user); } break; + } #endif case SQLCOM_HA_OPEN: - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL, all_tables, 0)) goto error; - res = mysql_ha_open(thd, tables); + res= mysql_ha_open(thd, first_table, 0); break; case SQLCOM_HA_CLOSE: - if (check_db_used(thd,tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables)) goto error; - res = mysql_ha_close(thd, tables); + res= mysql_ha_close(thd, first_table); break; case SQLCOM_HA_READ: + DBUG_ASSERT(first_table == all_tables && first_table != 0); /* There is no need to check for table permissions here, because if a user has no permissions to read a table, he won't be able to open it (with SQLCOM_HA_OPEN) in the first place. */ - if (check_db_used(thd,tables)) + if (check_db_used(thd, all_tables)) goto error; - res = mysql_ha_read(thd, tables, lex->ha_read_mode, lex->backup_dir, - lex->insert_list, lex->ha_rkey_mode, select_lex->where, - select_lex->select_limit, select_lex->offset_limit); + unit->set_limit(select_lex); + res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->ident.str, + lex->insert_list, lex->ha_rkey_mode, select_lex->where, + unit->select_limit_cnt, unit->offset_limit_cnt); break; case SQLCOM_BEGIN: - if (thd->locked_tables) + if (thd->transaction.xid_state.xa_state != XA_NOTR) { - thd->lock=thd->locked_tables; - thd->locked_tables=0; // Will be automaticly closed - close_thread_tables(thd); // Free tables + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + break; } - if (end_active_trans(thd)) + if (begin_trans(thd)) + goto error; + send_ok(thd); + break; + case SQLCOM_COMMIT: + if (end_trans(thd, lex->tx_release ? COMMIT_RELEASE : + lex->tx_chain ? COMMIT_AND_CHAIN : COMMIT)) + goto error; + send_ok(thd); + break; + case SQLCOM_ROLLBACK: + if (end_trans(thd, lex->tx_release ? ROLLBACK_RELEASE : + lex->tx_chain ? ROLLBACK_AND_CHAIN : ROLLBACK)) + goto error; + send_ok(thd); + break; + case SQLCOM_RELEASE_SAVEPOINT: + { + SAVEPOINT *sv; + for (sv=thd->transaction.savepoints; sv; sv=sv->prev) { - res= -1; + if (my_strnncoll(system_charset_info, + (uchar *)lex->ident.str, lex->ident.length, + (uchar *)sv->name, sv->length) == 0) + break; } - else + if (sv) { - thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) | - OPTION_BEGIN); - thd->server_status|= SERVER_STATUS_IN_TRANS; - if (!(lex->start_transaction_opt & MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT) || - !(res= ha_start_consistent_snapshot(thd))) + if (ha_release_savepoint(thd, sv)) + res= TRUE; // cannot happen + else send_ok(thd); + thd->transaction.savepoints=sv->prev; } + else + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "SAVEPOINT", lex->ident.str); break; - case SQLCOM_COMMIT: - /* - We don't use end_active_trans() here to ensure that this works - even if there is a problem with the OPTION_AUTO_COMMIT flag - (Which of course should never happen...) - */ + } + case SQLCOM_ROLLBACK_TO_SAVEPOINT: { - thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); - thd->server_status&= ~SERVER_STATUS_IN_TRANS; - if (!ha_commit(thd)) + SAVEPOINT *sv; + for (sv=thd->transaction.savepoints; sv; sv=sv->prev) { - send_ok(thd); + if (my_strnncoll(system_charset_info, + (uchar *)lex->ident.str, lex->ident.length, + (uchar *)sv->name, sv->length) == 0) + break; + } + if (sv) + { + if (ha_rollback_to_savepoint(thd, sv)) + res= TRUE; // cannot happen + else + { + if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && + !thd->slave_thread) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARNING_NOT_COMPLETE_ROLLBACK, + ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)); + send_ok(thd); + } + thd->transaction.savepoints=sv; } else - res= -1; + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "SAVEPOINT", lex->ident.str); break; } - case SQLCOM_ROLLBACK: - thd->server_status&= ~SERVER_STATUS_IN_TRANS; - if (!ha_rollback(thd)) + case SQLCOM_SAVEPOINT: + if (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN) || + thd->in_sub_stmt) || !opt_using_transactions) + send_ok(thd); + else { + SAVEPOINT **sv, *newsv; + for (sv=&thd->transaction.savepoints; *sv; sv=&(*sv)->prev) + { + if (my_strnncoll(system_charset_info, + (uchar *)lex->ident.str, lex->ident.length, + (uchar *)(*sv)->name, (*sv)->length) == 0) + break; + } + if (*sv) /* old savepoint of the same name exists */ + { + newsv=*sv; + ha_release_savepoint(thd, *sv); // it cannot fail + *sv=(*sv)->prev; + } + else if ((newsv=(SAVEPOINT *) alloc_root(&thd->transaction.mem_root, + savepoint_alloc_size)) == 0) + { + my_error(ER_OUT_OF_RESOURCES, MYF(0)); + break; + } + newsv->name=strmake_root(&thd->transaction.mem_root, + lex->ident.str, lex->ident.length); + newsv->length=lex->ident.length; /* - If a non-transactional table was updated, warn; don't warn if this is a - slave thread (because when a slave thread executes a ROLLBACK, it has - been read from the binary log, so it's 100% sure and normal to produce - error ER_WARNING_NOT_COMPLETE_ROLLBACK. If we sent the warning to the - slave SQL thread, it would not stop the thread but just be printed in - the error log; but we don't want users to wonder why they have this - message in the error log, so we don't send it. + if we'll get an error here, don't add new savepoint to the list. + we'll lose a little bit of memory in transaction mem_root, but it'll + be free'd when transaction ends anyway */ - if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && !thd->slave_thread) - send_warning(thd,ER_WARNING_NOT_COMPLETE_ROLLBACK,0); + if (ha_savepoint(thd, newsv)) + res= TRUE; else + { + newsv->prev=thd->transaction.savepoints; + thd->transaction.savepoints=newsv; + send_ok(thd); + } + } + break; + case SQLCOM_CREATE_PROCEDURE: + case SQLCOM_CREATE_SPFUNCTION: + { + uint namelen; + char *name; + int sp_result= SP_INTERNAL_ERROR; + + DBUG_ASSERT(lex->sphead != 0); + DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */ + /* + Verify that the database name is allowed, optionally + lowercase it. + */ + if (check_db_name(lex->sphead->m_db.str)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), lex->sphead->m_db.str); + goto create_sp_error; + } + + /* + Check that a database directory with this name + exists. Design note: This won't work on virtual databases + like information_schema. + */ + if (check_db_dir_existence(lex->sphead->m_db.str)) + { + my_error(ER_BAD_DB_ERROR, MYF(0), lex->sphead->m_db.str); + goto create_sp_error; + } + + if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0, + is_schema_db(lex->sphead->m_db.str))) + goto create_sp_error; + + if (end_active_trans(thd)) + goto create_sp_error; + + name= lex->sphead->name(&namelen); +#ifdef HAVE_DLOPEN + if (lex->sphead->m_type == TYPE_ENUM_FUNCTION) + { + udf_func *udf = find_udf(name, namelen); + + if (udf) + { + my_error(ER_UDF_EXISTS, MYF(0), name); + goto create_sp_error; + } + } +#endif + + /* + If the definer is not specified, this means that CREATE-statement missed + DEFINER-clause. DEFINER-clause can be missed in two cases: + + - The user submitted a statement w/o the clause. This is a normal + case, we should assign CURRENT_USER as definer. + + - Our slave received an updated from the master, that does not + replicate definer for stored rountines. We should also assign + CURRENT_USER as definer here, but also we should mark this routine + as NON-SUID. This is essential for the sake of backward + compatibility. + + The problem is the slave thread is running under "special" user (@), + that actually does not exist. In the older versions we do not fail + execution of a stored routine if its definer does not exist and + continue the execution under the authorization of the invoker + (BUG#13198). And now if we try to switch to slave-current-user (@), + we will fail. + + Actually, this leads to the inconsistent state of master and + slave (different definers, different SUID behaviour), but it seems, + this is the best we can do. + */ + + if (!lex->definer) + { + bool local_res= FALSE; + Query_arena original_arena; + Query_arena *ps_arena = thd->activate_stmt_arena_if_needed(&original_arena); + + if (!(lex->definer= create_default_definer(thd))) + local_res= TRUE; + + if (ps_arena) + thd->restore_active_arena(ps_arena, &original_arena); + + /* Error has been already reported. */ + if (local_res) + goto create_sp_error; + + if (thd->slave_thread) + lex->sphead->m_chistics->suid= SP_IS_NOT_SUID; + } + + /* + If the specified definer differs from the current user, we should check + that the current user has SUPER privilege (in order to create a stored + routine under another user one must have SUPER privilege). + */ + + else if (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) || + my_strcasecmp(system_charset_info, + lex->definer->host.str, + thd->security_ctx->priv_host)) + { + if (check_global_access(thd, SUPER_ACL)) + { + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); + goto create_sp_error; + } + } + + /* Check that the specified definer exists. Emit a warning if not. */ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (!is_acl_user(lex->definer->host.str, + lex->definer->user.str)) + { + push_warning_printf(thd, + MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_NO_SUCH_USER, + ER(ER_NO_SUCH_USER), + lex->definer->user.str, + lex->definer->host.str); + } +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + + res= (sp_result= lex->sphead->create(thd)); + switch (sp_result) { + case SP_OK: +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* only add privileges if really neccessary */ + if (sp_automatic_privileges && !opt_noacl && + check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS, + lex->sphead->m_db.str, name, + lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1)) + { + if (sp_grant_privileges(thd, lex->sphead->m_db.str, name, + lex->sql_command == SQLCOM_CREATE_PROCEDURE)) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_PROC_AUTO_GRANT_FAIL, + ER(ER_PROC_AUTO_GRANT_FAIL)); + close_thread_tables(thd); + } +#endif + break; + case SP_WRITE_ROW_FAILED: + my_error(ER_SP_ALREADY_EXISTS, MYF(0), SP_TYPE_STRING(lex), name); + break; + case SP_BAD_IDENTIFIER: + my_error(ER_TOO_LONG_IDENT, MYF(0), name); + break; + case SP_BODY_TOO_LONG: + my_error(ER_TOO_LONG_BODY, MYF(0), name); + break; + default: + my_error(ER_SP_STORE_FAILED, MYF(0), SP_TYPE_STRING(lex), name); + break; + } /* end switch */ + + /* + Capture all errors within this CASE and + clean up the environment. + */ +create_sp_error: + if (sp_result != SP_OK ) + goto error; + send_ok(thd); + break; /* break super switch */ + } /* end case group bracket */ + case SQLCOM_CALL: + { + sp_head *sp; + + /* + This will cache all SP and SF and open and lock all tables + required for execution. + */ + if (check_table_access(thd, SELECT_ACL, all_tables, 0) || + open_and_lock_tables(thd, all_tables)) + goto error; + + /* + By this moment all needed SPs should be in cache so no need to look + into DB. + */ + if (!(sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname, + &thd->sp_proc_cache, TRUE))) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PROCEDURE", + lex->spname->m_qname.str); + goto error; + } + else + { + ha_rows select_limit; + /* bits that should be cleared in thd->server_status */ + uint bits_to_be_cleared= 0; + /* + Check that the stored procedure doesn't contain Dynamic SQL + and doesn't return result sets: such stored procedures can't + be called from a function or trigger. + */ + if (thd->in_sub_stmt) + { + const char *where= (thd->in_sub_stmt & SUB_STMT_TRIGGER ? + "trigger" : "function"); + if (sp->is_not_allowed_in_function(where)) + goto error; + } + + my_bool save_no_send_ok= thd->net.no_send_ok; + thd->net.no_send_ok= TRUE; + if (sp->m_flags & sp_head::MULTI_RESULTS) + { + if (! (thd->client_capabilities & CLIENT_MULTI_RESULTS)) + { + /* + The client does not support multiple result sets being sent + back + */ + my_error(ER_SP_BADSELECT, MYF(0), sp->m_qname.str); + thd->net.no_send_ok= save_no_send_ok; + goto error; + } + /* + If SERVER_MORE_RESULTS_EXISTS is not set, + then remember that it should be cleared + */ + bits_to_be_cleared= (~thd->server_status & + SERVER_MORE_RESULTS_EXISTS); + thd->server_status|= SERVER_MORE_RESULTS_EXISTS; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (check_routine_access(thd, EXECUTE_ACL, + sp->m_db.str, sp->m_name.str, TRUE, FALSE)) + { + thd->net.no_send_ok= save_no_send_ok; + goto error; + } +#endif + select_limit= thd->variables.select_limit; + thd->variables.select_limit= HA_POS_ERROR; + + /* + We never write CALL statements into binlog: + - If the mode is non-prelocked, each statement will be logged + separately. + - If the mode is prelocked, the invoking statement will care + about writing into binlog. + So just execute the statement. + */ + res= sp->execute_procedure(thd, &lex->value_list); + /* + If warnings have been cleared, we have to clear total_warn_count + too, otherwise the clients get confused. + */ + if (thd->warn_list.is_empty()) + thd->total_warn_count= 0; + + thd->variables.select_limit= select_limit; + + thd->net.no_send_ok= save_no_send_ok; + thd->server_status&= ~bits_to_be_cleared; + + if (!res) + send_ok(thd, (ulong) (thd->row_count_func < 0 ? 0 : + thd->row_count_func)); + else + goto error; // Substatement should already have sent error + } + break; + } + case SQLCOM_ALTER_PROCEDURE: + case SQLCOM_ALTER_FUNCTION: + { + int sp_result; + sp_head *sp; + st_sp_chistics chistics; + + memcpy(&chistics, &lex->sp_chistics, sizeof(chistics)); + if (lex->sql_command == SQLCOM_ALTER_PROCEDURE) + sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname, + &thd->sp_proc_cache, FALSE); + else + sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, lex->spname, + &thd->sp_func_cache, FALSE); + mysql_reset_errors(thd, 0); + if (! sp) + { + if (lex->spname->m_db.str) + sp_result= SP_KEY_NOT_FOUND; + else + { + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + goto error; + } + } + else + { + if (check_routine_access(thd, ALTER_PROC_ACL, sp->m_db.str, + sp->m_name.str, + lex->sql_command == SQLCOM_ALTER_PROCEDURE, 0)) + goto error; + + if (end_active_trans(thd)) + goto error; + memcpy(&lex->sp_chistics, &chistics, sizeof(lex->sp_chistics)); + if ((sp->m_type == TYPE_ENUM_FUNCTION) && + !trust_function_creators && mysql_bin_log.is_open() && + !sp->m_chistics->detistic && + (chistics.daccess == SP_CONTAINS_SQL || + chistics.daccess == SP_MODIFIES_SQL_DATA)) + { + my_message(ER_BINLOG_UNSAFE_ROUTINE, + ER(ER_BINLOG_UNSAFE_ROUTINE), MYF(0)); + sp_result= SP_INTERNAL_ERROR; + } + else + { + /* + Note that if you implement the capability of ALTER FUNCTION to + alter the body of the function, this command should be made to + follow the restrictions that log-bin-trust-function-creators=0 + already puts on CREATE FUNCTION. + */ + /* Conditionally writes to binlog */ + if (lex->sql_command == SQLCOM_ALTER_PROCEDURE) + sp_result= sp_update_procedure(thd, lex->spname, + &lex->sp_chistics); + else + sp_result= sp_update_function(thd, lex->spname, &lex->sp_chistics); + } + } + switch (sp_result) + { + case SP_OK: send_ok(thd); + break; + case SP_KEY_NOT_FOUND: + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + default: + my_error(ER_SP_CANT_ALTER, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + } + break; + } + case SQLCOM_DROP_PROCEDURE: + case SQLCOM_DROP_FUNCTION: + { + int sp_result; + int type= (lex->sql_command == SQLCOM_DROP_PROCEDURE ? + TYPE_ENUM_PROCEDURE : TYPE_ENUM_FUNCTION); + + sp_result= sp_routine_exists_in_table(thd, type, lex->spname); + mysql_reset_errors(thd, 0); + if (sp_result == SP_OK) + { + char *db= lex->spname->m_db.str; + char *name= lex->spname->m_name.str; + + if (check_routine_access(thd, ALTER_PROC_ACL, db, name, + lex->sql_command == SQLCOM_DROP_PROCEDURE, 0)) + goto error; + + if (end_active_trans(thd)) + goto error; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (sp_automatic_privileges && !opt_noacl && + sp_revoke_privileges(thd, db, name, + lex->sql_command == SQLCOM_DROP_PROCEDURE)) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_PROC_AUTO_REVOKE_FAIL, + ER(ER_PROC_AUTO_REVOKE_FAIL)); + } +#endif + /* Conditionally writes to binlog */ + if (lex->sql_command == SQLCOM_DROP_PROCEDURE) + sp_result= sp_drop_procedure(thd, lex->spname); + else + sp_result= sp_drop_function(thd, lex->spname); + } + else + { +#ifdef HAVE_DLOPEN + if (lex->sql_command == SQLCOM_DROP_FUNCTION) + { + udf_func *udf = find_udf(lex->spname->m_name.str, + lex->spname->m_name.length); + if (udf) + { + if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0, 0)) + goto error; + + /* Does NOT write to binlog */ + if (!(res = mysql_drop_function(thd, &lex->spname->m_name))) + { + send_ok(thd); + break; + } + } + } +#endif + if (lex->spname->m_db.str) + sp_result= SP_KEY_NOT_FOUND; + else + { + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + goto error; + } + } + res= sp_result; + switch (sp_result) { + case SP_OK: + send_ok(thd); + break; + case SP_KEY_NOT_FOUND: + if (lex->drop_if_exists) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), + SP_COM_STRING(lex), lex->spname->m_name.str); + res= FALSE; + send_ok(thd); + break; + } + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + default: + my_error(ER_SP_DROP_FAILED, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + } + break; + } + case SQLCOM_SHOW_CREATE_PROC: + { + if (lex->spname->m_name.length > NAME_LEN) + { + my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); + goto error; + } + if (sp_show_create_procedure(thd, lex->spname) != SP_OK) + { /* We don't distinguish between errors for now */ + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_name.str); + goto error; + } + break; + } + case SQLCOM_SHOW_CREATE_FUNC: + { + if (lex->spname->m_name.length > NAME_LEN) + { + my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); + goto error; + } + if (sp_show_create_function(thd, lex->spname) != SP_OK) + { /* We don't distinguish between errors for now */ + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_name.str); + goto error; + } + break; + } + case SQLCOM_SHOW_STATUS_PROC: + { + res= sp_show_status_procedure(thd, (lex->wild ? + lex->wild->ptr() : NullS)); + break; + } + case SQLCOM_SHOW_STATUS_FUNC: + { + res= sp_show_status_function(thd, (lex->wild ? + lex->wild->ptr() : NullS)); + break; + } +#ifndef DBUG_OFF + case SQLCOM_SHOW_PROC_CODE: + case SQLCOM_SHOW_FUNC_CODE: + { + sp_head *sp; + + if (lex->spname->m_name.length > NAME_LEN) + { + my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); + goto error; + } + if (lex->sql_command == SQLCOM_SHOW_PROC_CODE) + sp= sp_find_routine(thd, TYPE_ENUM_PROCEDURE, lex->spname, + &thd->sp_proc_cache, FALSE); + else + sp= sp_find_routine(thd, TYPE_ENUM_FUNCTION, lex->spname, + &thd->sp_func_cache, FALSE); + if (!sp || sp->show_routine_code(thd)) + { + /* We don't distinguish between errors for now */ + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_name.str); + goto error; + } + break; + } +#endif // ifndef DBUG_OFF + case SQLCOM_CREATE_VIEW: + { + if (end_active_trans(thd)) + goto error; + + res= mysql_create_view(thd, first_table, thd->lex->create_view_mode); + break; + } + case SQLCOM_DROP_VIEW: + { + if (check_table_access(thd, DROP_ACL, all_tables, 0) || + end_active_trans(thd)) + goto error; + /* Conditionally writes to binlog. */ + res= mysql_drop_view(thd, first_table, thd->lex->drop_mode); + break; + } + case SQLCOM_CREATE_TRIGGER: + { + if (end_active_trans(thd)) + goto error; + + /* Conditionally writes to binlog. */ + res= mysql_create_or_drop_trigger(thd, all_tables, 1); + + break; + } + case SQLCOM_DROP_TRIGGER: + { + if (end_active_trans(thd)) + goto error; + + /* Conditionally writes to binlog. */ + res= mysql_create_or_drop_trigger(thd, all_tables, 0); + break; + } + case SQLCOM_XA_START: + if (thd->transaction.xid_state.xa_state == XA_IDLE && + thd->lex->xa_opt == XA_RESUME) + { + if (! thd->transaction.xid_state.xid.eq(thd->lex->xid)) + { + my_error(ER_XAER_NOTA, MYF(0)); + break; + } + thd->transaction.xid_state.xa_state=XA_ACTIVE; + send_ok(thd); + break; + } + if (thd->lex->xa_opt != XA_NONE) + { // JOIN is not supported yet. TODO + my_error(ER_XAER_INVAL, MYF(0)); + break; + } + if (thd->transaction.xid_state.xa_state != XA_NOTR) + { + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + break; + } + if (thd->active_transaction() || thd->locked_tables) + { + my_error(ER_XAER_OUTSIDE, MYF(0)); + break; + } + if (xid_cache_search(thd->lex->xid)) + { + my_error(ER_XAER_DUPID, MYF(0)); + break; + } + DBUG_ASSERT(thd->transaction.xid_state.xid.is_null()); + thd->transaction.xid_state.xa_state=XA_ACTIVE; + thd->transaction.xid_state.xid.set(thd->lex->xid); + xid_cache_insert(&thd->transaction.xid_state); + thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) | + OPTION_BEGIN); + thd->server_status|= SERVER_STATUS_IN_TRANS; + send_ok(thd); + break; + case SQLCOM_XA_END: + /* fake it */ + if (thd->lex->xa_opt != XA_NONE) + { // SUSPEND and FOR MIGRATE are not supported yet. TODO + my_error(ER_XAER_INVAL, MYF(0)); + break; + } + if (thd->transaction.xid_state.xa_state != XA_ACTIVE) + { + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + break; + } + if (!thd->transaction.xid_state.xid.eq(thd->lex->xid)) + { + my_error(ER_XAER_NOTA, MYF(0)); + break; + } + thd->transaction.xid_state.xa_state=XA_IDLE; + send_ok(thd); + break; + case SQLCOM_XA_PREPARE: + if (thd->transaction.xid_state.xa_state != XA_IDLE) + { + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + break; + } + if (!thd->transaction.xid_state.xid.eq(thd->lex->xid)) + { + my_error(ER_XAER_NOTA, MYF(0)); + break; + } + if (ha_prepare(thd)) + { + my_error(ER_XA_RBROLLBACK, MYF(0)); + xid_cache_delete(&thd->transaction.xid_state); + thd->transaction.xid_state.xa_state=XA_NOTR; + break; + } + thd->transaction.xid_state.xa_state=XA_PREPARED; + send_ok(thd); + break; + case SQLCOM_XA_COMMIT: + if (!thd->transaction.xid_state.xid.eq(thd->lex->xid)) + { + XID_STATE *xs=xid_cache_search(thd->lex->xid); + if (!xs || xs->in_thd) + my_error(ER_XAER_NOTA, MYF(0)); + else + { + ha_commit_or_rollback_by_xid(thd->lex->xid, 1); + xid_cache_delete(xs); + send_ok(thd); + } + break; + } + if (thd->transaction.xid_state.xa_state == XA_IDLE && + thd->lex->xa_opt == XA_ONE_PHASE) + { + int r; + if ((r= ha_commit(thd))) + my_error(r == 1 ? ER_XA_RBROLLBACK : ER_XAER_RMERR, MYF(0)); + else + send_ok(thd); + } + else if (thd->transaction.xid_state.xa_state == XA_PREPARED && + thd->lex->xa_opt == XA_NONE) + { + if (wait_if_global_read_lock(thd, 0, 0)) + { + ha_rollback(thd); + my_error(ER_XAER_RMERR, MYF(0)); + } + else + { + if (ha_commit_one_phase(thd, 1)) + my_error(ER_XAER_RMERR, MYF(0)); + else + send_ok(thd); + start_waiting_global_read_lock(thd); + } } else - res= -1; + { + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + break; + } thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); + thd->server_status&= ~SERVER_STATUS_IN_TRANS; + xid_cache_delete(&thd->transaction.xid_state); + thd->transaction.xid_state.xa_state=XA_NOTR; break; - case SQLCOM_ROLLBACK_TO_SAVEPOINT: - if (!ha_rollback_to_savepoint(thd, lex->savepoint_name)) + case SQLCOM_XA_ROLLBACK: + if (!thd->transaction.xid_state.xid.eq(thd->lex->xid)) { - if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && !thd->slave_thread) - send_warning(thd, ER_WARNING_NOT_COMPLETE_ROLLBACK, 0); + XID_STATE *xs=xid_cache_search(thd->lex->xid); + if (!xs || xs->in_thd) + my_error(ER_XAER_NOTA, MYF(0)); else - send_ok(thd); + { + ha_commit_or_rollback_by_xid(thd->lex->xid, 0); + xid_cache_delete(xs); + send_ok(thd); + } + break; + } + if (thd->transaction.xid_state.xa_state != XA_IDLE && + thd->transaction.xid_state.xa_state != XA_PREPARED) + { + my_error(ER_XAER_RMFAIL, MYF(0), + xa_state_names[thd->transaction.xid_state.xa_state]); + break; } + if (ha_rollback(thd)) + my_error(ER_XAER_RMERR, MYF(0)); else - res= -1; - break; - case SQLCOM_SAVEPOINT: - if (!ha_savepoint(thd, lex->savepoint_name)) send_ok(thd); - else - res= -1; + thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); + thd->server_status&= ~SERVER_STATUS_IN_TRANS; + xid_cache_delete(&thd->transaction.xid_state); + thd->transaction.xid_state.xa_state=XA_NOTR; break; - default: /* Impossible */ + case SQLCOM_XA_RECOVER: + res= mysql_xa_recover(thd); + break; + default: +#ifndef EMBEDDED_LIBRARY + DBUG_ASSERT(0); /* Impossible */ +#endif send_ok(thd); break; } - thd->proc_info="query end"; // QQ + thd->proc_info="query end"; + /* Two binlog-related cleanups: */ /* Reset system variables temporarily modified by SET ONE SHOT. @@ -3889,43 +5093,119 @@ purposes internal to the MySQL server", MYF(0)); if (thd->one_shot_set && lex->sql_command != SQLCOM_SET_OPTION) reset_one_shot_variables(thd); - if (res < 0) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); + /* + The return value for ROW_COUNT() is "implementation dependent" if the + statement is not DELETE, INSERT or UPDATE, but -1 is what JDBC and ODBC + wants. + + We do not change the value for a CALL or EXECUTE statement, so the value + generated by the last called (or executed) statement is preserved. + */ + if (lex->sql_command != SQLCOM_CALL && lex->sql_command != SQLCOM_EXECUTE && + uc_update_queries[lex->sql_command]<2) + thd->row_count_func= -1; + + goto end; error: - DBUG_VOID_RETURN; + res= TRUE; + +end: + if (need_start_waiting) + { + /* + Release the protection against the global read lock and wake + everyone, who might want to set a global read lock. + */ + start_waiting_global_read_lock(thd); + } + DBUG_RETURN(res || thd->net.report_error); } /* + Check grants for commands which work only with one table. + + SYNOPSIS + check_single_table_access() + thd Thread handler + privilege requested privilege + all_tables global table list of query + + RETURN + 0 - OK + 1 - access denied, error is sent to client +*/ + +bool check_single_table_access(THD *thd, ulong privilege, + TABLE_LIST *all_tables) +{ + Security_context * backup_ctx= thd->security_ctx; + + /* we need to switch to the saved context (if any) */ + if (all_tables->security_ctx) + thd->security_ctx= all_tables->security_ctx; + + const char *db_name; + if ((all_tables->view || all_tables->field_translation) && + !all_tables->schema_table) + db_name= all_tables->view_db.str; + else + db_name= all_tables->db; + + if (check_access(thd, privilege, db_name, + &all_tables->grant.privilege, 0, 0, + test(all_tables->schema_table))) + goto deny; + + /* Show only 1 table for check_grant */ + if (grant_option && check_grant(thd, privilege, all_tables, 0, 1, 0)) + goto deny; + + thd->security_ctx= backup_ctx; + return 0; + +deny: + thd->security_ctx= backup_ctx; + return 1; +} + +/* Check grants for commands which work only with one table and all other tables belonging to subselects or implicitly opened tables. SYNOPSIS check_one_table_access() thd Thread handler - privilege requested privelage - tables table list of command + privilege requested privilege + all_tables global table list of query RETURN 0 - OK 1 - access denied, error is sent to client */ -int check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables) +bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables) { - if (check_access(thd, privilege, tables->db, &tables->grant.privilege,0,0)) - return 1; - - /* Show only 1 table for check_grant */ - if (grant_option && check_grant(thd, privilege, tables, 0, 1, 0)) + if (check_single_table_access (thd,privilege,all_tables)) return 1; /* Check rights on tables of subselects and implictly opened tables */ - TABLE_LIST *subselects_tables; - if ((subselects_tables= tables->next)) + TABLE_LIST *subselects_tables, *view= all_tables->view ? all_tables : 0; + if ((subselects_tables= all_tables->next_global)) { - if ((check_table_access(thd, SELECT_ACL, subselects_tables,0))) + /* + Access rights asked for the first table of a view should be the same + as for the view + */ + if (view && subselects_tables->belong_to_view == view) + { + if (check_single_table_access (thd, privilege, subselects_tables)) + return 1; + subselects_tables= subselects_tables->next_global; + } + if (subselects_tables && + (check_table_access(thd, SELECT_ACL, subselects_tables, 0))) return 1; } return 0; @@ -3953,16 +5233,17 @@ int check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables) bool check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, - bool dont_check_global_grants, bool no_errors) + bool dont_check_global_grants, bool no_errors, bool schema_db) { - DBUG_ENTER("check_access"); - DBUG_PRINT("enter",("db: '%s' want_access: %lu master_access: %lu", - db ? db : "", want_access, thd->master_access)); + Security_context *sctx= thd->security_ctx; #ifndef NO_EMBEDDED_ACCESS_CHECKS ulong db_access; bool db_is_pattern= test(want_access & GRANT_ACL); #endif ulong dummy; + DBUG_ENTER("check_access"); + DBUG_PRINT("enter",("db: %s want_access: %lu master_access: %lu", + db ? db : "", want_access, sctx->master_access)); if (save_priv) *save_priv=0; else @@ -3970,36 +5251,61 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, if ((!db || !db[0]) && !thd->db && !dont_check_global_grants) { + DBUG_PRINT("error",("No database")); if (!no_errors) - send_error(thd,ER_NO_DB_ERROR); /* purecov: tested */ + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), + MYF(0)); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ } + if (schema_db) + { + if (want_access & ~(SELECT_ACL | EXTRA_ACL)) + { + if (!no_errors) + { + const char *db_name= db ? db : thd->db; + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + sctx->priv_user, sctx->priv_host, db_name); + } + DBUG_RETURN(TRUE); + } + else + { + *save_priv= SELECT_ACL; + DBUG_RETURN(FALSE); + } + } + #ifdef NO_EMBEDDED_ACCESS_CHECKS DBUG_RETURN(0); #else - if ((thd->master_access & want_access) == want_access) + if ((sctx->master_access & want_access) == want_access) { /* If we don't have a global SELECT privilege, we have to get the database specific access rights to be able to handle queries of type UPDATE t1 SET a=1 WHERE b > 0 */ - db_access= thd->db_access; - if (!(thd->master_access & SELECT_ACL) && + db_access= sctx->db_access; + if (!(sctx->master_access & SELECT_ACL) && (db && (!thd->db || db_is_pattern || strcmp(db,thd->db)))) - db_access=acl_get(thd->host, thd->ip, thd->priv_user, db, db_is_pattern); - *save_priv=thd->master_access | db_access; + db_access=acl_get(sctx->host, sctx->ip, sctx->priv_user, db, + db_is_pattern); + *save_priv=sctx->master_access | db_access; DBUG_RETURN(FALSE); } - if (((want_access & ~thd->master_access) & ~(DB_ACLS | EXTRA_ACL)) || + if (((want_access & ~sctx->master_access) & ~(DB_ACLS | EXTRA_ACL)) || ! db && dont_check_global_grants) { // We can never grant this + DBUG_PRINT("error",("No possible access")); if (!no_errors) - net_printf(thd,ER_ACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - thd->password ? ER(ER_YES) : ER(ER_NO));/* purecov: tested */ + my_error(ER_ACCESS_DENIED_ERROR, MYF(0), + sctx->priv_user, + sctx->priv_host, + (thd->password ? + ER(ER_YES) : + ER(ER_NO))); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ } @@ -4007,24 +5313,30 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, DBUG_RETURN(FALSE); // Allow select on anything if (db && (!thd->db || db_is_pattern || strcmp(db,thd->db))) - db_access=acl_get(thd->host, thd->ip, thd->priv_user, db, db_is_pattern); + db_access= acl_get(sctx->host, sctx->ip, sctx->priv_user, db, + db_is_pattern); else - db_access=thd->db_access; + db_access= sctx->db_access; DBUG_PRINT("info",("db_access: %lu", db_access)); /* Remove SHOW attribute and access rights we already have */ - want_access &= ~(thd->master_access | EXTRA_ACL); - db_access= ((*save_priv=(db_access | thd->master_access)) & want_access); + want_access &= ~(sctx->master_access | EXTRA_ACL); + DBUG_PRINT("info",("db_access: %lu want_access: %lu", + db_access, want_access)); + db_access= ((*save_priv=(db_access | sctx->master_access)) & want_access); /* grant_option is set if there exists a single table or column grant */ if (db_access == want_access || - ((grant_option && !dont_check_global_grants) && - !(want_access & ~(db_access | TABLE_ACLS)))) + (grant_option && !dont_check_global_grants && + !(want_access & ~(db_access | TABLE_ACLS | PROC_ACLS)))) DBUG_RETURN(FALSE); /* Ok */ + + DBUG_PRINT("error",("Access denied")); if (!no_errors) - net_printf(thd,ER_DBACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - db ? db : thd->db ? thd->db : "unknown"); /* purecov: tested */ + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + sctx->priv_user, sctx->priv_host, + (db ? db : (thd->db ? + thd->db : + "unknown"))); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -4039,7 +5351,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, want_access Use should have any of these global rights WARNING - One gets access rigth if one has ANY of the rights in want_access + One gets access right if one has ANY of the rights in want_access This is useful as one in most cases only need one global right, but in some case we want to check if the user has SUPER or REPL_CLIENT_ACL rights. @@ -4055,19 +5367,36 @@ bool check_global_access(THD *thd, ulong want_access) return 0; #else char command[128]; - if ((thd->master_access & want_access)) + if ((thd->security_ctx->master_access & want_access)) return 0; get_privilege_desc(command, sizeof(command), want_access); - net_printf(thd,ER_SPECIFIC_ACCESS_DENIED_ERROR, - command); + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command); return 1; #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } /* - Check the privilege for all used tables. Table privileges are cached - in the table list for GRANT checking + Check the privilege for all used tables. + + SYNOPSYS + check_table_access() + thd Thread context + want_access Privileges requested + tables List of tables to be checked + no_errors FALSE/TRUE - report/don't report error to + the client (using my_error() call). + + NOTES + Table privileges are cached in the table list for GRANT checking. + This functions assumes that table list used and + thd->lex->query_tables_own_last value correspond to each other + (the latter should be either 0 or point to next_global member + of one of elements of this table list). + + RETURN VALUE + FALSE - OK + TRUE - Access denied */ bool @@ -4076,40 +5405,173 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, { uint found=0; ulong found_access=0; - TABLE_LIST *org_tables=tables; - for (; tables ; tables=tables->next) +#ifndef NO_EMBEDDED_ACCESS_CHECKS + TABLE_LIST *org_tables= tables; +#endif + TABLE_LIST *first_not_own_table= thd->lex->first_not_own_table(); + Security_context *sctx= thd->security_ctx, *backup_ctx= thd->security_ctx; + /* + The check that first_not_own_table is not reached is for the case when + the given table list refers to the list for prelocking (contains tables + of other queries). For simple queries first_not_own_table is 0. + */ + for (; tables != first_not_own_table; tables= tables->next_global) { - if (tables->derived || - (tables->table && (int)tables->table->tmp_table) || + if (tables->security_ctx) + sctx= tables->security_ctx; + else + sctx= backup_ctx; + + if (tables->schema_table && + (want_access & ~(SELECT_ACL | EXTRA_ACL | FILE_ACL))) + { + if (!no_errors) + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + sctx->priv_user, sctx->priv_host, + information_schema_name.str); + return TRUE; + } + /* + Register access for view underlying table. + Remove SHOW_VIEW_ACL, because it will be checked during making view + */ + tables->grant.orig_want_privilege= (want_access & ~SHOW_VIEW_ACL); + if (tables->derived || tables->schema_table || + (tables->table && (int)tables->table->s->tmp_table) || my_tz_check_n_skip_implicit_tables(&tables, thd->lex->time_zone_tables_used)) continue; - if ((thd->master_access & want_access) == (want_access & ~EXTRA_ACL) && + thd->security_ctx= sctx; + if ((sctx->master_access & want_access) == + (want_access & ~EXTRA_ACL) && thd->db) tables->grant.privilege= want_access; - else if (tables->db && tables->db == thd->db) + else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0) { if (found && !grant_option) // db already checked tables->grant.privilege=found_access; else { if (check_access(thd,want_access,tables->db,&tables->grant.privilege, - 0, no_errors)) - return TRUE; // Access denied + 0, no_errors, test(tables->schema_table))) + goto deny; // Access denied found_access=tables->grant.privilege; found=1; } } else if (check_access(thd,want_access,tables->db,&tables->grant.privilege, - 0, no_errors)) - return TRUE; + 0, no_errors, test(tables->schema_table))) + goto deny; } + thd->security_ctx= backup_ctx; if (grant_option) return check_grant(thd,want_access & ~EXTRA_ACL,org_tables, test(want_access & EXTRA_ACL), UINT_MAX, no_errors); return FALSE; +deny: + thd->security_ctx= backup_ctx; + return TRUE; +} + + +bool +check_routine_access(THD *thd, ulong want_access,char *db, char *name, + bool is_proc, bool no_errors) +{ + TABLE_LIST tables[1]; + + bzero((char *)tables, sizeof(TABLE_LIST)); + tables->db= db; + tables->table_name= tables->alias= name; + + /* + The following test is just a shortcut for check_access() (to avoid + calculating db_access) under the assumption that it's common to + give persons global right to execute all stored SP (but not + necessary to create them). + */ + if ((thd->security_ctx->master_access & want_access) == want_access) + tables->grant.privilege= want_access; + else if (check_access(thd,want_access,db,&tables->grant.privilege, + 0, no_errors, 0)) + return TRUE; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (grant_option) + return check_grant_routine(thd, want_access, tables, is_proc, no_errors); +#endif + + return FALSE; +} + + +/* + Check if the routine has any of the routine privileges + + SYNOPSIS + check_some_routine_access() + thd Thread handler + db Database name + name Routine name + + RETURN + 0 ok + 1 error +*/ + +bool check_some_routine_access(THD *thd, const char *db, const char *name, + bool is_proc) +{ + ulong save_priv; + if (thd->security_ctx->master_access & SHOW_PROC_ACLS) + return FALSE; + /* + There are no routines in information_schema db. So we can safely + pass zero to last paramter of check_access function + */ + if (!check_access(thd, SHOW_PROC_ACLS, db, &save_priv, 0, 1, 0) || + (save_priv & SHOW_PROC_ACLS)) + return FALSE; + return check_routine_level_acl(thd, db, name, is_proc); +} + + +/* + Check if the given table has any of the asked privileges + + SYNOPSIS + check_some_access() + thd Thread handler + want_access Bitmap of possible privileges to check for + + RETURN + 0 ok + 1 error +*/ + + +bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table) +{ + ulong access; + DBUG_ENTER("check_some_access"); + + /* This loop will work as long as we have less than 32 privileges */ + for (access= 1; access < want_access ; access<<= 1) + { + if (access & want_access) + { + if (!check_access(thd, access, table->db, + &table->grant.privilege, 0, 1, + test(table->schema_table)) && + !grant_option || !check_grant(thd, access, table, 0, 1, 1)) + DBUG_RETURN(0); + } + } + DBUG_PRINT("exit",("no matching access rights")); + DBUG_RETURN(1); } + bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list) { @@ -4118,7 +5580,7 @@ bool check_merge_table_access(THD *thd, char *db, { /* Check that all tables use the current database */ TABLE_LIST *tmp; - for (tmp=table_list; tmp ; tmp=tmp->next) + for (tmp= table_list; tmp; tmp= tmp->next_local) { if (!tmp->db || !tmp->db[0]) tmp->db=db; @@ -4132,15 +5594,20 @@ bool check_merge_table_access(THD *thd, char *db, static bool check_db_used(THD *thd,TABLE_LIST *tables) { - for (; tables ; tables=tables->next) + char *current_db= NULL; + for (; tables; tables= tables->next_global) { - if (!tables->db) + if (tables->db == NULL) { - if (!(tables->db=thd->db)) - { - send_error(thd,ER_NO_DB_ERROR); /* purecov: tested */ - return TRUE; /* purecov: tested */ - } + /* + This code never works and should be removed in 5.1. All tables + that are added to the list of tables should already have its + database field initialized properly (see st_lex::add_table_to_list). + */ + DBUG_ASSERT(0); + if (thd->copy_db_to(¤t_db, 0)) + return TRUE; + tables->db= current_db; } } return FALSE; @@ -4161,14 +5628,23 @@ long max_stack_used; #endif #ifndef EMBEDDED_LIBRARY -bool check_stack_overrun(THD *thd,char *buf __attribute__((unused))) +/* + Note: The 'buf' parameter is necessary, even if it is unused here. + - fix_fields functions has a "dummy" buffer large enough for the + corresponding exec. (Thus we only have to check in fix_fields.) + - Passing to check_stack_overrun() prevents the compiler from removing it. + */ +bool check_stack_overrun(THD *thd, long margin, + char *buf __attribute__((unused))) { long stack_used; + DBUG_ASSERT(thd == current_thd); if ((stack_used=used_stack(thd->thread_stack,(char*) &stack_used)) >= - (long) thread_stack_min) + (long) (thread_stack - margin)) { - sprintf(errbuff[0],ER(ER_STACK_OVERRUN),stack_used,thread_stack); - my_message(ER_STACK_OVERRUN,errbuff[0],MYF(0)); + sprintf(errbuff[0],ER(ER_STACK_OVERRUN_NEED_MORE), + stack_used,thread_stack,margin); + my_message(ER_STACK_OVERRUN_NEED_MORE,errbuff[0],MYF(0)); thd->fatal_error(); return 1; } @@ -4184,7 +5660,7 @@ bool check_stack_overrun(THD *thd,char *buf __attribute__((unused))) bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize) { - LEX *lex=current_lex; + LEX *lex= current_thd->lex; ulong old_info=0; if ((uint) *yystacksize >= MY_YACC_MAX) return 1; @@ -4210,6 +5686,7 @@ bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize) return 0; } + /**************************************************************************** Initialize global thd variables needed for query ****************************************************************************/ @@ -4242,17 +5719,26 @@ void mysql_reset_thd_for_next_command(THD *thd) DBUG_ENTER("mysql_reset_thd_for_next_command"); thd->free_list= 0; thd->select_number= 1; - thd->total_warn_count= 0; // Warnings for this query - thd->last_insert_id_used= thd->query_start_used= thd->insert_id_used=0; - thd->sent_row_count= thd->examined_row_count= 0; - thd->is_fatal_error= thd->rand_used= thd->time_zone_used= 0; + thd->query_start_used= thd->insert_id_used=0; + thd->last_insert_id_used_bin_log= FALSE; + thd->is_fatal_error= thd->time_zone_used= 0; thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS | - SERVER_QUERY_NO_INDEX_USED | - SERVER_QUERY_NO_GOOD_INDEX_USED); + SERVER_QUERY_NO_INDEX_USED | + SERVER_QUERY_NO_GOOD_INDEX_USED); + DBUG_ASSERT(thd->security_ctx== &thd->main_security_ctx); thd->tmp_table_used= 0; - if (opt_bin_log) - reset_dynamic(&thd->user_var_events); - thd->clear_error(); + if (!thd->in_sub_stmt) + { + if (opt_bin_log) + { + reset_dynamic(&thd->user_var_events); + thd->user_var_events_alloc= thd->mem_root; + } + thd->clear_error(); + thd->total_warn_count=0; // Warnings for this query + thd->rand_used= 0; + thd->sent_row_count= thd->examined_row_count= 0; + } DBUG_VOID_RETURN; } @@ -4262,7 +5748,7 @@ mysql_init_select(LEX *lex) { SELECT_LEX *select_lex= lex->current_select; select_lex->init_select(); - select_lex->select_limit= HA_POS_ERROR; + lex->wild= 0; if (select_lex == &lex->select_lex) { DBUG_ASSERT(lex->result == 0); @@ -4275,50 +5761,70 @@ bool mysql_new_select(LEX *lex, bool move_down) { SELECT_LEX *select_lex; - if (!(select_lex= new(lex->thd->mem_root) SELECT_LEX())) - return 1; - select_lex->select_number= ++lex->thd->select_number; + THD *thd= lex->thd; + DBUG_ENTER("mysql_new_select"); + + if (!(select_lex= new (thd->mem_root) SELECT_LEX())) + DBUG_RETURN(1); + select_lex->select_number= ++thd->select_number; + select_lex->parent_lex= lex; /* Used in init_query. */ select_lex->init_query(); select_lex->init_select(); + lex->nest_level++; + select_lex->nest_level= lex->nest_level; /* Don't evaluate this subquery during statement prepare even if it's a constant one. The flag is switched off in the end of mysql_stmt_prepare. */ - if (lex->thd->current_arena->is_stmt_prepare()) + if (thd->stmt_arena->is_stmt_prepare()) select_lex->uncacheable|= UNCACHEABLE_PREPARE; - if (move_down) { + SELECT_LEX_UNIT *unit; lex->subqueries= TRUE; /* first select_lex of subselect or derived table */ - SELECT_LEX_UNIT *unit; - if (!(unit= new(lex->thd->mem_root) SELECT_LEX_UNIT())) - return 1; + if (!(unit= new (thd->mem_root) SELECT_LEX_UNIT())) + DBUG_RETURN(1); unit->init_query(); unit->init_select(); - unit->thd= lex->thd; + unit->thd= thd; unit->include_down(lex->current_select); unit->link_next= 0; unit->link_prev= 0; unit->return_to= lex->current_select; select_lex->include_down(unit); - // TODO: assign resolve_mode for fake subquery after merging with new tree + /* + By default we assume that it is usual subselect and we have outer name + resolution context, if no we will assign it to 0 later + */ + select_lex->context.outer_context= &select_lex->outer_select()->context; } else { + if (lex->current_select->order_list.first && !lex->current_select->braces) + { + my_error(ER_WRONG_USAGE, MYF(0), "UNION", "ORDER BY"); + DBUG_RETURN(1); + } select_lex->include_neighbour(lex->current_select); - if (!select_lex->master_unit()->fake_select_lex && - select_lex->master_unit()->add_fake_select_lex(lex->thd)) - return 1; + SELECT_LEX_UNIT *unit= select_lex->master_unit(); + if (!unit->fake_select_lex && unit->add_fake_select_lex(lex->thd)) + DBUG_RETURN(1); + select_lex->context.outer_context= + unit->first_select()->context.outer_context; } select_lex->master_unit()->global_parameters= select_lex; select_lex->include_global((st_select_lex_node**)&lex->all_selects_list); lex->current_select= select_lex; - select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; - return 0; + /* + in subquery is SELECT query and we allow resolution of names in SELECT + list + */ + select_lex->context.resolve_in_select_list= TRUE; + DBUG_RETURN(0); } /* @@ -4363,52 +5869,19 @@ void create_select_for_variable(const char *var_name) DBUG_VOID_RETURN; } -static TABLE_LIST* get_table_by_alias(TABLE_LIST* tl, const char* db, - const char* alias) -{ - for (;tl;tl= tl->next) - { - if (!strcmp(db,tl->db) && - tl->alias && !my_strcasecmp(table_alias_charset,tl->alias,alias)) - return tl; - } - - return 0; -} - -/* Sets up lex->auxilliary_table_list */ -void fix_multi_delete_lex(LEX* lex) -{ - TABLE_LIST *tl; - TABLE_LIST *good_list= (TABLE_LIST*)lex->select_lex.table_list.first; - - for (tl= (TABLE_LIST*)lex->auxilliary_table_list.first; tl; tl= tl->next) - { - TABLE_LIST* good_table= get_table_by_alias(good_list,tl->db,tl->alias); - if (good_table && !good_table->derived) - { - /* - real_name points to a member of Table_ident which is - allocated via thd->strmake() from THD memroot - */ - tl->real_name= good_table->real_name; - tl->real_name_length= good_table->real_name_length; - good_table->updating= tl->updating; - } - } -} void mysql_init_multi_delete(LEX *lex) { lex->sql_command= SQLCOM_DELETE_MULTI; mysql_init_select(lex); - lex->select_lex.select_limit= lex->unit.select_limit_cnt= - HA_POS_ERROR; - lex->select_lex.table_list.save_and_clear(&lex->auxilliary_table_list); + lex->select_lex.select_limit= 0; + lex->unit.select_limit_cnt= HA_POS_ERROR; + lex->select_lex.table_list.save_and_clear(&lex->auxiliary_table_list); lex->lock_option= using_update_log ? TL_READ_NO_INSERT : TL_READ; + lex->query_tables= 0; + lex->query_tables_last= &lex->query_tables; } - /* When you modify mysql_parse(), you may need to mofify mysql_test_parse_for_slave() in this same file. @@ -4418,11 +5891,17 @@ void mysql_parse(THD *thd, char *inBuf, uint length) { DBUG_ENTER("mysql_parse"); + DBUG_EXECUTE_IF("parser_debug", turn_parser_debug_on();); + mysql_init_query(thd, (uchar*) inBuf, length); if (query_cache_send_result_to_client(thd, inBuf, length) <= 0) { LEX *lex= thd->lex; - if (!yyparse((void *)thd) && ! thd->is_fatal_error) + + sp_cache_flush_obsolete(&thd->sp_proc_cache); + sp_cache_flush_obsolete(&thd->sp_func_cache); + + if (!MYSQLparse((void *)thd) && ! thd->is_fatal_error) { #ifndef NO_EMBEDDED_ACCESS_CHECKS if (mqh_used && thd->user_connect && @@ -4433,9 +5912,7 @@ void mysql_parse(THD *thd, char *inBuf, uint length) else #endif { - if (thd->net.report_error) - send_error(thd, 0, NullS); - else + if (! thd->net.report_error) { /* Binlog logs a string starting from thd->query and having length @@ -4447,8 +5924,8 @@ void mysql_parse(THD *thd, char *inBuf, uint length) PROCESSLIST. Note that we don't need LOCK_thread_count to modify query_length. */ - if (lex->found_colon && - (thd->query_length= (ulong)(lex->found_colon - thd->query))) + if (lex->found_semicolon && + (thd->query_length= (ulong)(lex->found_semicolon - thd->query))) thd->query_length--; /* Actually execute the query */ mysql_execute_command(thd); @@ -4458,12 +5935,21 @@ void mysql_parse(THD *thd, char *inBuf, uint length) } else { + DBUG_ASSERT(thd->net.report_error); DBUG_PRINT("info",("Command aborted. Fatal_error: %d", thd->is_fatal_error)); + query_cache_abort(&thd->net); } + if (thd->lex->sphead) + { + delete thd->lex->sphead; + thd->lex->sphead= 0; + } + lex->unit.cleanup(); thd->proc_info="freeing items"; thd->end_statement(); + thd->cleanup_after_query(); DBUG_ASSERT(thd->change_list.is_empty()); } DBUG_VOID_RETURN; @@ -4484,17 +5970,20 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) { LEX *lex= thd->lex; bool error= 0; + DBUG_ENTER("mysql_test_parse_for_slave"); mysql_init_query(thd, (uchar*) inBuf, length); - if (!yyparse((void*) thd) && ! thd->is_fatal_error && + if (!MYSQLparse((void*) thd) && ! thd->is_fatal_error && all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first)) - error= 1; /* Ignore question */ + error= 1; /* Ignore question */ thd->end_statement(); - return error; + thd->cleanup_after_query(); + DBUG_RETURN(error); } #endif + /***************************************************************************** ** Store field definition for create ** Return 0 if ok @@ -4511,13 +6000,11 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, { register create_field *new_field; LEX *lex= thd->lex; - uint allowed_type_modifier=0; - char warn_buff[MYSQL_ERRMSG_SIZE]; DBUG_ENTER("add_field_to_list"); if (strlen(field_name) > NAME_LEN) { - net_printf(thd, ER_TOO_LONG_IDENT, field_name); /* purecov: inspected */ + my_error(ER_TOO_LONG_IDENT, MYF(0), field_name); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } if (type_modifier & PRI_KEY_FLAG) @@ -4550,7 +6037,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, !(((Item_func*)default_value)->functype() == Item_func::NOW_FUNC && type == FIELD_TYPE_TIMESTAMP)) { - net_printf(thd, ER_INVALID_DEFAULT, field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name); DBUG_RETURN(1); } else if (default_value->type() == Item::NULL_ITEM) @@ -4559,323 +6046,54 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, if ((type_modifier & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) == NOT_NULL_FLAG) { - net_printf(thd,ER_INVALID_DEFAULT,field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name); DBUG_RETURN(1); } } else if (type_modifier & AUTO_INCREMENT_FLAG) { - net_printf(thd, ER_INVALID_DEFAULT, field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name); DBUG_RETURN(1); } } if (on_update_value && type != FIELD_TYPE_TIMESTAMP) { - net_printf(thd, ER_INVALID_ON_UPDATE, field_name); - DBUG_RETURN(1); - } - - if (!(new_field=new create_field())) + my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name); DBUG_RETURN(1); - new_field->field=0; - new_field->field_name=field_name; - new_field->def= default_value; - new_field->flags= type_modifier; - new_field->unireg_check= (type_modifier & AUTO_INCREMENT_FLAG ? - Field::NEXT_NUMBER : Field::NONE); - new_field->decimals= decimals ? (uint) set_zone(atoi(decimals),0, - NOT_FIXED_DEC-1) : 0; - new_field->sql_type=type; - new_field->length=0; - new_field->char_length= 0; - new_field->change=change; - new_field->interval=0; - new_field->pack_length=0; - new_field->charset=cs; - new_field->geom_type= (Field::geometry_type) uint_geom_type; - - if (!comment) - { - new_field->comment.str=0; - new_field->comment.length=0; - } - else - { - /* In this case comment is always of type Item_string */ - new_field->comment.str= (char*) comment->str; - new_field->comment.length=comment->length; - } - if (length && !(new_field->length= (uint) atoi(length))) - length=0; /* purecov: inspected */ - uint sign_len=type_modifier & UNSIGNED_FLAG ? 0 : 1; - - if (new_field->length && new_field->decimals && - new_field->length < new_field->decimals+1 && - new_field->decimals != NOT_FIXED_DEC) - new_field->length=new_field->decimals+1; /* purecov: inspected */ - - switch (type) { - case FIELD_TYPE_TINY: - if (!length) new_field->length=MAX_TINYINT_WIDTH+sign_len; - allowed_type_modifier= AUTO_INCREMENT_FLAG; - break; - case FIELD_TYPE_SHORT: - if (!length) new_field->length=MAX_SMALLINT_WIDTH+sign_len; - allowed_type_modifier= AUTO_INCREMENT_FLAG; - break; - case FIELD_TYPE_INT24: - if (!length) new_field->length=MAX_MEDIUMINT_WIDTH+sign_len; - allowed_type_modifier= AUTO_INCREMENT_FLAG; - break; - case FIELD_TYPE_LONG: - if (!length) new_field->length=MAX_INT_WIDTH+sign_len; - allowed_type_modifier= AUTO_INCREMENT_FLAG; - break; - case FIELD_TYPE_LONGLONG: - if (!length) new_field->length=MAX_BIGINT_WIDTH; - allowed_type_modifier= AUTO_INCREMENT_FLAG; - break; - case FIELD_TYPE_NULL: - break; - case FIELD_TYPE_DECIMAL: - if (!length) - { - if ((new_field->length= new_field->decimals)) - new_field->length++; - else - new_field->length= 10; // Default length for DECIMAL - } - if (new_field->length < MAX_FIELD_WIDTH) // Skip wrong argument - { - new_field->length+=sign_len; - if (new_field->decimals) - new_field->length++; - } - break; - case FIELD_TYPE_VAR_STRING: - if (new_field->length < 4) - { - new_field->sql_type= FIELD_TYPE_STRING; - break; - } - /* fall through */ - case FIELD_TYPE_STRING: - if (new_field->length <= MAX_FIELD_CHARLENGTH || default_value) - break; - /* Convert long CHAR() and VARCHAR columns to TEXT or BLOB */ - new_field->sql_type= FIELD_TYPE_BLOB; - sprintf(warn_buff, ER(ER_AUTO_CONVERT), field_name, "CHAR", - (cs == &my_charset_bin) ? "BLOB" : "TEXT"); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_AUTO_CONVERT, - warn_buff); - /* fall through */ - case FIELD_TYPE_BLOB: - case FIELD_TYPE_TINY_BLOB: - case FIELD_TYPE_LONG_BLOB: - case FIELD_TYPE_MEDIUM_BLOB: - case FIELD_TYPE_GEOMETRY: - if (new_field->length) - { - /* The user has given a length to the blob column */ - if (new_field->length < 256) - type= FIELD_TYPE_TINY_BLOB; - else if (new_field->length < 65536) - type= FIELD_TYPE_BLOB; - else if (new_field->length < 256L*256L*256L) - type= FIELD_TYPE_MEDIUM_BLOB; - else - type= FIELD_TYPE_LONG_BLOB; - new_field->length= 0; - } - new_field->sql_type= type; - if (default_value) // Allow empty as default value - { - String str,*res; - res=default_value->val_str(&str); - if (res->length()) - { - net_printf(thd,ER_BLOB_CANT_HAVE_DEFAULT,field_name); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - new_field->def=0; - } - new_field->flags|=BLOB_FLAG; - break; - case FIELD_TYPE_YEAR: - if (!length || new_field->length != 2) - new_field->length=4; // Default length - new_field->flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; - break; - case FIELD_TYPE_FLOAT: - /* change FLOAT(precision) to FLOAT or DOUBLE */ - allowed_type_modifier= AUTO_INCREMENT_FLAG; - if (length && !decimals) - { - uint tmp_length=new_field->length; - if (tmp_length > PRECISION_FOR_DOUBLE) - { - net_printf(thd,ER_WRONG_FIELD_SPEC,field_name); - DBUG_RETURN(1); - } - else if (tmp_length > PRECISION_FOR_FLOAT) - { - new_field->sql_type=FIELD_TYPE_DOUBLE; - new_field->length=DBL_DIG+7; // -[digits].E+### - } - else - new_field->length=FLT_DIG+6; // -[digits].E+## - new_field->decimals= NOT_FIXED_DEC; - break; - } - if (!length) - { - new_field->length = FLT_DIG+6; - new_field->decimals= NOT_FIXED_DEC; - } - break; - case FIELD_TYPE_DOUBLE: - allowed_type_modifier= AUTO_INCREMENT_FLAG; - if (!length) - { - new_field->length = DBL_DIG+7; - new_field->decimals=NOT_FIXED_DEC; - } - break; - case FIELD_TYPE_TIMESTAMP: - if (!length) - new_field->length= 14; // Full date YYYYMMDDHHMMSS - else if (new_field->length != 19) - { - /* - We support only even TIMESTAMP lengths less or equal than 14 - and 19 as length of 4.1 compatible representation. - */ - new_field->length=((new_field->length+1)/2)*2; /* purecov: inspected */ - new_field->length= min(new_field->length,14); /* purecov: inspected */ - } - new_field->flags|= ZEROFILL_FLAG | UNSIGNED_FLAG; - if (default_value) - { - /* Grammar allows only NOW() value for ON UPDATE clause */ - if (default_value->type() == Item::FUNC_ITEM && - ((Item_func*)default_value)->functype() == Item_func::NOW_FUNC) - { - new_field->unireg_check= (on_update_value?Field::TIMESTAMP_DNUN_FIELD: - Field::TIMESTAMP_DN_FIELD); - /* - We don't need default value any longer moreover it is dangerous. - Everything handled by unireg_check further. - */ - new_field->def= 0; - } - else - new_field->unireg_check= (on_update_value?Field::TIMESTAMP_UN_FIELD: - Field::NONE); - } - else - { - /* - If we have default TIMESTAMP NOT NULL column without explicit DEFAULT - or ON UPDATE values then for the sake of compatiblity we should treat - this column as having DEFAULT NOW() ON UPDATE NOW() (when we don't - have another TIMESTAMP column with auto-set option before this one) - or DEFAULT 0 (in other cases). - So here we are setting TIMESTAMP_OLD_FIELD only temporary, and will - replace this value by TIMESTAMP_DNUN_FIELD or NONE later when - information about all TIMESTAMP fields in table will be availiable. - - If we have TIMESTAMP NULL column without explicit DEFAULT value - we treat it as having DEFAULT NULL attribute. - */ - new_field->unireg_check= on_update_value ? - Field::TIMESTAMP_UN_FIELD : - (new_field->flags & NOT_NULL_FLAG ? - Field::TIMESTAMP_OLD_FIELD: - Field::NONE); - } - break; - case FIELD_TYPE_DATE: // Old date type - if (protocol_version != PROTOCOL_VERSION-1) - new_field->sql_type=FIELD_TYPE_NEWDATE; - /* fall trough */ - case FIELD_TYPE_NEWDATE: - new_field->length=10; - break; - case FIELD_TYPE_TIME: - new_field->length=10; - break; - case FIELD_TYPE_DATETIME: - new_field->length=19; - break; - case FIELD_TYPE_SET: - { - if (interval_list->elements > sizeof(longlong)*8) - { - net_printf(thd,ER_TOO_BIG_SET,field_name); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - new_field->pack_length= get_set_pack_length(interval_list->elements); - - List_iterator<String> it(*interval_list); - String *tmp; - while ((tmp= it++)) - new_field->interval_list.push_back(tmp); - /* - Set fake length to 1 to pass the below conditions. - Real length will be set in mysql_prepare_table() - when we know the character set of the column - */ - new_field->length= 1; - } - break; - case FIELD_TYPE_ENUM: - { - // Should be safe - new_field->pack_length= get_enum_pack_length(interval_list->elements); - - List_iterator<String> it(*interval_list); - String *tmp; - while ((tmp= it++)) - new_field->interval_list.push_back(tmp); - new_field->length= 1; // See comment for FIELD_TYPE_SET above. - } - break; } - if ((new_field->length > MAX_FIELD_CHARLENGTH && type != FIELD_TYPE_SET && - type != FIELD_TYPE_ENUM) || - (!new_field->length && !(new_field->flags & BLOB_FLAG) && - type != FIELD_TYPE_STRING && - type != FIELD_TYPE_VAR_STRING && type != FIELD_TYPE_GEOMETRY)) - { - net_printf(thd,ER_TOO_BIG_FIELDLENGTH,field_name, - MAX_FIELD_CHARLENGTH); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ - } - type_modifier&= AUTO_INCREMENT_FLAG; - if ((~allowed_type_modifier) & type_modifier) + if (type == FIELD_TYPE_TIMESTAMP && length) { - net_printf(thd,ER_WRONG_FIELD_SPEC,field_name); + /* Display widths are no longer supported for TIMSTAMP as of MySQL 4.1. + In other words, for declarations such as TIMESTAMP(2), TIMESTAMP(4), + and so on, the display width is ignored. + */ + char buf[32]; + my_snprintf(buf, sizeof(buf), "TIMESTAMP(%s)", length); + push_warning_printf(thd,MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_SYNTAX, + ER(ER_WARN_DEPRECATED_SYNTAX), + buf, "TIMESTAMP"); + } + + if (!(new_field= new create_field()) || + new_field->init(thd, field_name, type, length, decimals, type_modifier, + default_value, on_update_value, comment, change, + interval_list, cs, uint_geom_type)) DBUG_RETURN(1); - } - if (!new_field->pack_length) - new_field->pack_length=calc_pack_length(new_field->sql_type == - FIELD_TYPE_VAR_STRING ? - FIELD_TYPE_STRING : - new_field->sql_type, - new_field->length); - new_field->char_length= new_field->length; + lex->alter_info.create_list.push_back(new_field); lex->last_field=new_field; DBUG_RETURN(0); } + /* Store position for column in ALTER TABLE .. ADD column */ void store_position_for_column(const char *name) { - current_lex->last_field->after=my_const_cast(char*) (name); + current_thd->lex->last_field->after=my_const_cast(char*) (name); } bool @@ -4909,7 +6127,6 @@ static void remove_escape(char *name) { #ifdef USE_MB int l; -/* if ((l = ismbchar(name, name+MBMAXLEN))) { Wei He: I think it's wrong */ if (use_mb(system_charset_info) && (l = my_ismbchar(system_charset_info, name, strend))) { @@ -4942,6 +6159,7 @@ bool add_to_list(THD *thd, SQL_LIST &list,Item *item,bool asc) order->asc = asc; order->free_me=0; order->used=0; + order->counter_used= 0; list.link_in_list((byte*) order,(byte**) &order->next); DBUG_RETURN(0); } @@ -4977,17 +6195,19 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, LEX_STRING *option) { register TABLE_LIST *ptr; + TABLE_LIST *previous_table_ref; /* The table preceding the current one. */ char *alias_str; + LEX *lex= thd->lex; DBUG_ENTER("add_table_to_list"); + LINT_INIT(previous_table_ref); if (!table) DBUG_RETURN(0); // End of memory alias_str= alias ? alias->str : table->table.str; - if (!test(table_options & TL_OPTION_ALIAS) && - check_table_name(table->table.str,table->table.length) || - table->db.str && check_db_name(table->db.str)) + if (!test(table_options & TL_OPTION_ALIAS) && + check_table_name(table->table.str, table->table.length)) { - net_printf(thd, ER_WRONG_TABLE_NAME, table->table.str); + my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str); DBUG_RETURN(0); } @@ -4995,7 +6215,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, { if (table->sel) { - net_printf(thd,ER_DERIVED_MUST_HAVE_ALIAS); + my_message(ER_DERIVED_MUST_HAVE_ALIAS, + ER(ER_DERIVED_MUST_HAVE_ALIAS), MYF(0)); DBUG_RETURN(0); } if (!(alias_str=thd->memdup(alias_str,table->table.length+1))) @@ -5005,33 +6226,43 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, DBUG_RETURN(0); /* purecov: inspected */ if (table->db.str) { + if (table->is_derived_table() == FALSE && check_db_name(table->db.str)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), table->db.str); + DBUG_RETURN(0); + } ptr->db= table->db.str; ptr->db_length= table->db.length; } - else if (thd->db) - { - ptr->db= thd->db; - ptr->db_length= thd->db_length; - } - else - { - /* The following can't be "" as we may do 'casedn_str()' on it */ - ptr->db= empty_c_string; - ptr->db_length= 0; - } - if (thd->current_arena->is_stmt_prepare()) - ptr->db= thd->strdup(ptr->db); + else if (thd->copy_db_to(&ptr->db, &ptr->db_length)) + DBUG_RETURN(0); ptr->alias= alias_str; if (lower_case_table_names && table->table.length) - my_casedn_str(files_charset_info, table->table.str); - ptr->real_name=table->table.str; - ptr->real_name_length=table->table.length; + table->table.length= my_casedn_str(files_charset_info, table->table.str); + ptr->table_name=table->table.str; + ptr->table_name_length=table->table.length; ptr->lock_type= lock_type; ptr->updating= test(table_options & TL_OPTION_UPDATING); ptr->force_index= test(table_options & TL_OPTION_FORCE_INDEX); ptr->ignore_leaves= test(table_options & TL_OPTION_IGNORE_LEAVES); ptr->derived= table->sel; + if (!ptr->derived && !my_strcasecmp(system_charset_info, ptr->db, + information_schema_name.str)) + { + ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->table_name); + if (!schema_table || + (schema_table->hidden && + lex->orig_sql_command == SQLCOM_END)) // not a 'show' command + { + my_error(ER_UNKNOWN_TABLE, MYF(0), + ptr->table_name, information_schema_name.str); + DBUG_RETURN(0); + } + ptr->schema_table_name= ptr->table_name; + ptr->schema_table= schema_table; + } + ptr->select_lex= lex->current_select; ptr->cacheable_table= 1; if (use_index_arg) ptr->use_index=(List<String> *) thd->memdup((gptr) use_index_arg, @@ -5043,24 +6274,275 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, /* check that used name is unique */ if (lock_type != TL_IGNORE) { - for (TABLE_LIST *tables=(TABLE_LIST*) table_list.first ; + TABLE_LIST *first_table= (TABLE_LIST*) table_list.first; + if (lex->sql_command == SQLCOM_CREATE_VIEW) + first_table= first_table ? first_table->next_local : NULL; + for (TABLE_LIST *tables= first_table ; tables ; - tables=tables->next) + tables=tables->next_local) { if (!my_strcasecmp(table_alias_charset, alias_str, tables->alias) && !strcmp(ptr->db, tables->db)) { - net_printf(thd,ER_NONUNIQ_TABLE,alias_str); /* purecov: tested */ + my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str); /* purecov: tested */ DBUG_RETURN(0); /* purecov: tested */ } } } - table_list.link_in_list((byte*) ptr, (byte**) &ptr->next); + /* Store the table reference preceding the current one. */ + if (table_list.elements > 0) + { + /* + table_list.next points to the last inserted TABLE_LIST->next_local' + element + We don't use the offsetof() macro here to avoid warnings from gcc + */ + previous_table_ref= (TABLE_LIST*) ((char*) table_list.next - + ((char*) &(ptr->next_local) - + (char*) ptr)); + /* + Set next_name_resolution_table of the previous table reference to point + to the current table reference. In effect the list + TABLE_LIST::next_name_resolution_table coincides with + TABLE_LIST::next_local. Later this may be changed in + store_top_level_join_columns() for NATURAL/USING joins. + */ + previous_table_ref->next_name_resolution_table= ptr; + } + + /* + Link the current table reference in a local list (list for current select). + Notice that as a side effect here we set the next_local field of the + previous table reference to 'ptr'. Here we also add one element to the + list 'table_list'. + */ + table_list.link_in_list((byte*) ptr, (byte**) &ptr->next_local); + ptr->next_name_resolution_table= NULL; + /* Link table in global list (all used tables) */ + lex->add_to_query_tables(ptr); + DBUG_RETURN(ptr); +} + + +/* + Initialize a new table list for a nested join + + SYNOPSIS + init_nested_join() + thd current thread + + DESCRIPTION + The function initializes a structure of the TABLE_LIST type + for a nested join. It sets up its nested join list as empty. + The created structure is added to the front of the current + join list in the st_select_lex object. Then the function + changes the current nest level for joins to refer to the newly + created empty list after having saved the info on the old level + in the initialized structure. + + RETURN VALUE + 0, if success + 1, otherwise +*/ + +bool st_select_lex::init_nested_join(THD *thd) +{ + TABLE_LIST *ptr; + NESTED_JOIN *nested_join; + DBUG_ENTER("init_nested_join"); + + if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+ + sizeof(NESTED_JOIN)))) + DBUG_RETURN(1); + nested_join= ptr->nested_join= + ((NESTED_JOIN*) ((byte*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST)))); + + join_list->push_front(ptr); + ptr->embedding= embedding; + ptr->join_list= join_list; + embedding= ptr; + join_list= &nested_join->join_list; + join_list->empty(); + DBUG_RETURN(0); +} + + +/* + End a nested join table list + + SYNOPSIS + end_nested_join() + thd current thread + + DESCRIPTION + The function returns to the previous join nest level. + If the current level contains only one member, the function + moves it one level up, eliminating the nest. + + RETURN VALUE + Pointer to TABLE_LIST element added to the total table list, if success + 0, otherwise +*/ + +TABLE_LIST *st_select_lex::end_nested_join(THD *thd) +{ + TABLE_LIST *ptr; + NESTED_JOIN *nested_join; + DBUG_ENTER("end_nested_join"); + + DBUG_ASSERT(embedding); + ptr= embedding; + join_list= ptr->join_list; + embedding= ptr->embedding; + nested_join= ptr->nested_join; + if (nested_join->join_list.elements == 1) + { + TABLE_LIST *embedded= nested_join->join_list.head(); + join_list->pop(); + embedded->join_list= join_list; + embedded->embedding= embedding; + join_list->push_front(embedded); + ptr= embedded; + } + else if (nested_join->join_list.elements == 0) + { + join_list->pop(); + ptr= 0; // return value + } + DBUG_RETURN(ptr); +} + + +/* + Nest last join operation + + SYNOPSIS + nest_last_join() + thd current thread + + DESCRIPTION + The function nest last join operation as if it was enclosed in braces. + + RETURN VALUE + 0 Error + # Pointer to TABLE_LIST element created for the new nested join + +*/ + +TABLE_LIST *st_select_lex::nest_last_join(THD *thd) +{ + TABLE_LIST *ptr; + NESTED_JOIN *nested_join; + List<TABLE_LIST> *embedded_list; + DBUG_ENTER("nest_last_join"); + + if (!(ptr= (TABLE_LIST*) thd->calloc(ALIGN_SIZE(sizeof(TABLE_LIST))+ + sizeof(NESTED_JOIN)))) + DBUG_RETURN(0); + nested_join= ptr->nested_join= + ((NESTED_JOIN*) ((byte*) ptr + ALIGN_SIZE(sizeof(TABLE_LIST)))); + + ptr->embedding= embedding; + ptr->join_list= join_list; + embedded_list= &nested_join->join_list; + embedded_list->empty(); + + for (uint i=0; i < 2; i++) + { + TABLE_LIST *table= join_list->pop(); + table->join_list= embedded_list; + table->embedding= ptr; + embedded_list->push_back(table); + if (table->natural_join) + { + ptr->is_natural_join= TRUE; + /* + If this is a JOIN ... USING, move the list of joined fields to the + table reference that describes the join. + */ + if (prev_join_using) + ptr->join_using_fields= prev_join_using; + } + } + join_list->push_front(ptr); + nested_join->used_tables= nested_join->not_null_tables= (table_map) 0; DBUG_RETURN(ptr); } /* + Add a table to the current join list + + SYNOPSIS + add_joined_table() + table the table to add + + DESCRIPTION + The function puts a table in front of the current join list + of st_select_lex object. + Thus, joined tables are put into this list in the reverse order + (the most outer join operation follows first). + + RETURN VALUE + None +*/ + +void st_select_lex::add_joined_table(TABLE_LIST *table) +{ + DBUG_ENTER("add_joined_table"); + join_list->push_front(table); + table->join_list= join_list; + table->embedding= embedding; + DBUG_VOID_RETURN; +} + + +/* + Convert a right join into equivalent left join + + SYNOPSIS + convert_right_join() + thd current thread + + DESCRIPTION + The function takes the current join list t[0],t[1] ... and + effectively converts it into the list t[1],t[0] ... + Although the outer_join flag for the new nested table contains + JOIN_TYPE_RIGHT, it will be handled as the inner table of a left join + operation. + + EXAMPLES + SELECT * FROM t1 RIGHT JOIN t2 ON on_expr => + SELECT * FROM t2 LEFT JOIN t1 ON on_expr + + SELECT * FROM t1,t2 RIGHT JOIN t3 ON on_expr => + SELECT * FROM t1,t3 LEFT JOIN t2 ON on_expr + + SELECT * FROM t1,t2 RIGHT JOIN (t3,t4) ON on_expr => + SELECT * FROM t1,(t3,t4) LEFT JOIN t2 ON on_expr + + SELECT * FROM t1 LEFT JOIN t2 ON on_expr1 RIGHT JOIN t3 ON on_expr2 => + SELECT * FROM t3 LEFT JOIN (t1 LEFT JOIN t2 ON on_expr2) ON on_expr1 + + RETURN + Pointer to the table representing the inner table, if success + 0, otherwise +*/ + +TABLE_LIST *st_select_lex::convert_right_join() +{ + TABLE_LIST *tab2= join_list->pop(); + TABLE_LIST *tab1= join_list->pop(); + DBUG_ENTER("convert_right_join"); + + join_list->push_front(tab2); + join_list->push_front(tab1); + tab1->outer_join|= JOIN_TYPE_RIGHT; + + DBUG_RETURN(tab1); +} + +/* Set lock for all tables in current select level SYNOPSIS: @@ -5080,9 +6562,9 @@ void st_select_lex::set_lock_for_tables(thr_lock_type lock_type) DBUG_PRINT("enter", ("lock_type: %d for_update: %d", lock_type, for_update)); - for (TABLE_LIST *tables= (TABLE_LIST*) table_list.first ; - tables ; - tables=tables->next) + for (TABLE_LIST *tables= (TABLE_LIST*) table_list.first; + tables; + tables= tables->next_local) { tables->lock_type= lock_type; tables->updating= for_update; @@ -5115,20 +6597,26 @@ void st_select_lex::set_lock_for_tables(thr_lock_type lock_type) 0 on success */ -bool st_select_lex_unit::add_fake_select_lex(THD *thd) +bool st_select_lex_unit::add_fake_select_lex(THD *thd_arg) { SELECT_LEX *first_sl= first_select(); DBUG_ENTER("add_fake_select_lex"); DBUG_ASSERT(!fake_select_lex); - - if (!(fake_select_lex= new (thd->mem_root) SELECT_LEX())) + + if (!(fake_select_lex= new (thd_arg->mem_root) SELECT_LEX())) DBUG_RETURN(1); fake_select_lex->include_standalone(this, (SELECT_LEX_NODE**)&fake_select_lex); fake_select_lex->select_number= INT_MAX; + fake_select_lex->parent_lex= thd_arg->lex; /* Used in init_query. */ fake_select_lex->make_empty_select(); fake_select_lex->linkage= GLOBAL_OPTIONS_TYPE; - fake_select_lex->select_limit= HA_POS_ERROR; + fake_select_lex->select_limit= 0; + + fake_select_lex->context.outer_context=first_sl->context.outer_context; + /* allow item list resolving in fake select for ORDER BY */ + fake_select_lex->context.resolve_in_select_list= TRUE; + fake_select_lex->context.select_lex= fake_select_lex; if (!first_sl->next_select()) { @@ -5140,21 +6628,80 @@ bool st_select_lex_unit::add_fake_select_lex(THD *thd) */ global_parameters= fake_select_lex; fake_select_lex->no_table_names_allowed= 1; - thd->lex->current_select= fake_select_lex; + thd_arg->lex->current_select= fake_select_lex; } + thd_arg->lex->pop_context(); DBUG_RETURN(0); } -void add_join_on(TABLE_LIST *b,Item *expr) + +/* + Push a new name resolution context for a JOIN ... ON clause to the + context stack of a query block. + + SYNOPSIS + push_new_name_resolution_context() + thd pointer to current thread + left_op left operand of the JOIN + right_op rigth operand of the JOIN + + DESCRIPTION + Create a new name resolution context for a JOIN ... ON clause, + set the first and last leaves of the list of table references + to be used for name resolution, and push the newly created + context to the stack of contexts of the query. + + RETURN + FALSE if all is OK + TRUE if a memory allocation error occured +*/ + +bool +push_new_name_resolution_context(THD *thd, + TABLE_LIST *left_op, TABLE_LIST *right_op) +{ + Name_resolution_context *on_context; + if (!(on_context= new (thd->mem_root) Name_resolution_context)) + return TRUE; + on_context->init(); + on_context->first_name_resolution_table= + left_op->first_leaf_for_name_resolution(); + on_context->last_name_resolution_table= + right_op->last_leaf_for_name_resolution(); + return thd->lex->push_context(on_context); +} + + +/* + Add an ON condition to the second operand of a JOIN ... ON. + + SYNOPSIS + add_join_on + b the second operand of a JOIN ... ON + expr the condition to be added to the ON clause + + DESCRIPTION + Add an ON condition to the right operand of a JOIN ... ON clause. + + RETURN + FALSE if there was some error + TRUE if all is OK +*/ + +void add_join_on(TABLE_LIST *b, Item *expr) { if (expr) { if (!b->on_expr) - b->on_expr=expr; + b->on_expr= expr; else { - // This only happens if you have both a right and left join - b->on_expr=new Item_cond_and(b->on_expr,expr); + /* + If called from the parser, this happens if you have both a + right and left join. If called later, it happens if we add more + than one condition to the ON clause. + */ + b->on_expr= new Item_cond_and(b->on_expr,expr); } b->on_expr->top_level_item(); } @@ -5162,46 +6709,69 @@ void add_join_on(TABLE_LIST *b,Item *expr) /* - Mark that we have a NATURAL JOIN between two tables + Mark that there is a NATURAL JOIN or JOIN ... USING between two + tables. SYNOPSIS add_join_natural() - a Table to do normal join with - b Do normal join with this table + a Left join argument + b Right join argument + using_fields Field names from USING clause + lex The current st_select_lex IMPLEMENTATION - This function just marks that table b should be joined with a. - The function setup_cond() will create in b->on_expr a list - of equal condition between all fields of the same name. - + This function marks that table b should be joined with a either via + a NATURAL JOIN or via JOIN ... USING. Both join types are special + cases of each other, so we treat them together. The function + setup_conds() creates a list of equal condition between all fields + of the same name for NATURAL JOIN or the fields in 'using_fields' + for JOIN ... USING. The list of equality conditions is stored + either in b->on_expr, or in JOIN::conds, depending on whether there + was an outer join. + + EXAMPLE SELECT * FROM t1 NATURAL LEFT JOIN t2 <=> SELECT * FROM t1 LEFT JOIN t2 ON (t1.i=t2.i and t1.j=t2.j ... ) + + SELECT * FROM t1 NATURAL JOIN t2 WHERE <some_cond> + <=> + SELECT * FROM t1, t2 WHERE (t1.i=t2.i and t1.j=t2.j and <some_cond>) + + SELECT * FROM t1 JOIN t2 USING(j) WHERE <some_cond> + <=> + SELECT * FROM t1, t2 WHERE (t1.j=t2.j and <some_cond>) + + RETURN + None */ -void add_join_natural(TABLE_LIST *a,TABLE_LIST *b) +void add_join_natural(TABLE_LIST *a, TABLE_LIST *b, List<String> *using_fields, + SELECT_LEX *lex) { - b->natural_join=a; + b->natural_join= a; + lex->prev_join_using= using_fields; } + /* Reload/resets privileges and the different caches. SYNOPSIS reload_acl_and_cache() - thd Thread handler + thd Thread handler (can be NULL!) options What should be reset/reloaded (tables, privileges, slave...) tables Tables to flush (if any) write_to_binlog Depending on 'options', it may be very bad to write the query to the binlog (e.g. FLUSH SLAVE); this is a - pointer where, if it is not NULL, reload_acl_and_cache() - will put 0 if it thinks we really should not write to - the binlog. Otherwise it will put 1. + pointer where reload_acl_and_cache() will put 0 if + it thinks we really should not write to the binlog. + Otherwise it will put 1. RETURN 0 ok - !=0 error + !=0 error. thd->killed or thd->net.report_error is set */ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, @@ -5210,6 +6780,9 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, bool result=0; select_errors=0; /* Write if more errors */ bool tmp_write_to_binlog= 1; + + DBUG_ASSERT(!thd || !thd->in_sub_stmt); + #ifndef NO_EMBEDDED_ACCESS_CHECKS if (options & REFRESH_GRANT) { @@ -5219,13 +6792,14 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, allocate temporary THD for execution of acl_reload()/grant_reload(). */ if (!thd && (thd= (tmp_thd= new THD))) + { + thd->thread_stack= (char*) &tmp_thd; thd->store_globals(); + } if (thd) { (void)acl_reload(thd); (void)grant_reload(thd); - if (mqh_used) - reset_mqh(thd, (LEX_USER *) NULL, TRUE); } if (tmp_thd) { @@ -5234,6 +6808,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, my_pthread_setspecific_ptr(THR_THD, 0); thd= 0; } + reset_mqh((LEX_USER *)NULL, TRUE); } #endif if (options & REFRESH_LOG) @@ -5244,23 +6819,19 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, */ /* - Writing this command to the binlog may result in infinite loops when - doing mysqlbinlog|mysql, and anyway it does not really make sense to - log it automatically (would cause more trouble to users than it would - help them) + Writing this command to the binlog may result in infinite loops + when doing mysqlbinlog|mysql, and anyway it does not really make + sense to log it automatically (would cause more trouble to users + than it would help them) */ tmp_write_to_binlog= 0; mysql_log.new_file(1); - mysql_update_log.new_file(1); - mysql_bin_log.new_file(1); mysql_slow_log.new_file(1); -#ifdef HAVE_REPLICATION - if (mysql_bin_log.is_open() && expire_logs_days) + if( mysql_bin_log.is_open() ) { - long purge_time= time(0) - expire_logs_days*24*60*60; - if (purge_time >= 0) - mysql_bin_log.purge_logs_before_date(purge_time); + mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); } +#ifdef HAVE_REPLICATION pthread_mutex_lock(&LOCK_active_mi); rotate_relay_log(active_mi); pthread_mutex_unlock(&LOCK_active_mi); @@ -5274,7 +6845,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, if (options & REFRESH_QUERY_CACHE_FREE) { query_cache.pack(); // FLUSH QUERY CACHE - options &= ~REFRESH_QUERY_CACHE; //don't flush all cache, just free memory + options &= ~REFRESH_QUERY_CACHE; // Don't flush cache, just free memory } if (options & (REFRESH_TABLES | REFRESH_QUERY_CACHE)) { @@ -5314,10 +6885,15 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, */ tmp_write_to_binlog= 0; if (lock_global_read_lock(thd)) - return 1; + return 1; // Killed result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables); - make_global_read_lock_block_commit(thd); + if (make_global_read_lock_block_commit(thd)) // Killed + { + /* Don't leave things in a half-locked state */ + unlock_global_read_lock(thd); + return 1; + } } else result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables); @@ -5325,16 +6901,20 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } if (options & REFRESH_HOSTS) hostname_cache_refresh(); - if (options & REFRESH_STATUS) - refresh_status(); + if (thd && (options & REFRESH_STATUS)) + refresh_status(thd); if (options & REFRESH_THREADS) flush_thread_cache(); #ifdef HAVE_REPLICATION if (options & REFRESH_MASTER) { + DBUG_ASSERT(thd); tmp_write_to_binlog= 0; if (reset_master(thd)) + { result=1; + thd->fatal_error(); // Ensure client get error + } } #endif #ifdef OPENSSL @@ -5355,9 +6935,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } #endif if (options & REFRESH_USER_RESOURCES) - reset_mqh(thd,(LEX_USER *) NULL); - if (write_to_binlog) - *write_to_binlog= tmp_write_to_binlog; + reset_mqh((LEX_USER *) NULL); + *write_to_binlog= tmp_write_to_binlog; return result; } @@ -5373,7 +6952,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, This is written such that we have a short lock on LOCK_thread_count */ -void kill_one_thread(THD *thd, ulong id) +void kill_one_thread(THD *thd, ulong id, bool only_kill_query) { THD *tmp; uint error=ER_NO_SUCH_THREAD; @@ -5390,10 +6969,10 @@ void kill_one_thread(THD *thd, ulong id) VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (tmp) { - if ((thd->master_access & SUPER_ACL) || - !strcmp(thd->user,tmp->user)) + if ((thd->security_ctx->master_access & SUPER_ACL) || + !strcmp(thd->security_ctx->user, tmp->security_ctx->user)) { - tmp->awake(1 /*prepare to die*/); + tmp->awake(only_kill_query ? THD::KILL_QUERY : THD::KILL_CONNECTION); error=0; } else @@ -5404,23 +6983,7 @@ void kill_one_thread(THD *thd, ulong id) if (!error) send_ok(thd); else - net_printf(thd,error,id); -} - - -/* Clear most status variables */ - -static void refresh_status(void) -{ - pthread_mutex_lock(&LOCK_status); - for (struct show_var_st *ptr=status_vars; ptr->name; ptr++) - { - if (ptr->type == SHOW_LONG) - *(ulong*) ptr->value= 0; - } - /* Reset the counters of all key caches (default and named). */ - process_key_caches(reset_key_cache_counters); - pthread_mutex_unlock(&LOCK_status); + my_error(error, MYF(0), id); } @@ -5465,12 +7028,13 @@ static bool append_file_to_dir(THD *thd, const char **filename_ptr, bool check_simple_select() { THD *thd= current_thd; - if (thd->lex->current_select != &thd->lex->select_lex) + LEX *lex= thd->lex; + if (lex->current_select != &lex->select_lex) { char command[80]; - strmake(command, thd->lex->yylval->symbol.str, - min(thd->lex->yylval->symbol.length, sizeof(command)-1)); - net_printf(thd, ER_CANT_USE_OPTION_HERE, command); + strmake(command, lex->yylval->symbol.str, + min(lex->yylval->symbol.length, sizeof(command)-1)); + my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command); return 1; } return 0; @@ -5552,51 +7116,46 @@ Item * all_any_subquery_creator(Item *left_expr, SYNOPSIS multi_update_precheck() thd Thread handler - tables Global table list + tables Global/local table list (have to be the same) RETURN VALUE - 0 OK - 1 Error (message is sent to user) - -1 Error (message is not sent to user) + FALSE OK + TRUE Error */ -int multi_update_precheck(THD *thd, TABLE_LIST *tables) +bool multi_update_precheck(THD *thd, TABLE_LIST *tables) { - DBUG_ENTER("multi_update_precheck"); const char *msg= 0; TABLE_LIST *table; LEX *lex= thd->lex; SELECT_LEX *select_lex= &lex->select_lex; - TABLE_LIST *update_list= (TABLE_LIST*)select_lex->table_list.first; + DBUG_ENTER("multi_update_precheck"); if (select_lex->item_list.elements != lex->value_list.elements) { - my_error(ER_WRONG_VALUE_COUNT, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); + DBUG_RETURN(TRUE); } /* Ensure that we have UPDATE or SELECT privilege for each table The exact privilege is checked in mysql_multi_update() */ - for (table= update_list; table; table= table->next) + for (table= tables; table; table= table->next_local) { if (table->derived) table->grant.privilege= SELECT_ACL; else if ((check_access(thd, UPDATE_ACL, table->db, - &table->grant.privilege, 0, 1) || + &table->grant.privilege, 0, 1, + test(table->schema_table)) || grant_option && check_grant(thd, UPDATE_ACL, table, 0, 1, 1)) && - (check_access(thd, SELECT_ACL, table->db, - &table->grant.privilege, 0, 0) || - grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0))) - DBUG_RETURN(1); + (check_access(thd, SELECT_ACL, table->db, + &table->grant.privilege, 0, 0, + test(table->schema_table)) || + grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0))) + DBUG_RETURN(TRUE); - /* - We assign following flag only to copy of table, because it will - be checked only if query contains subqueries i.e. only if copy exists - */ - if (table->table_list) - table->table_list->table_in_update_from_clause= 1; + table->table_in_first_from_clause= 1; } /* Is there tables of subqueries? @@ -5604,42 +7163,31 @@ int multi_update_precheck(THD *thd, TABLE_LIST *tables) if (&lex->select_lex != lex->all_selects_list || lex->time_zone_tables_used) { DBUG_PRINT("info",("Checking sub query list")); - for (table= tables; table; table= table->next) + for (table= tables; table; table= table->next_global) { - if (my_tz_check_n_skip_implicit_tables(&table, - lex->time_zone_tables_used)) - continue; - else if (table->table_in_update_from_clause) - { - /* - If we check table by local TABLE_LIST copy then we should copy - grants to global table list, because it will be used for table - opening. - */ - if (table->table_list) - table->grant= table->table_list->grant; - } - else if (!table->derived) + if (!my_tz_check_n_skip_implicit_tables(&table, + lex->time_zone_tables_used) && + !table->table_in_first_from_clause) { if (check_access(thd, SELECT_ACL, table->db, - &table->grant.privilege, 0, 0) || + &table->grant.privilege, 0, 0, + test(table->schema_table)) || grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } } if (select_lex->order_list.elements) msg= "ORDER BY"; - else if (select_lex->select_limit && select_lex->select_limit != - HA_POS_ERROR) + else if (select_lex->select_limit) msg= "LIMIT"; if (msg) { my_error(ER_WRONG_USAGE, MYF(0), "UPDATE", msg); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } /* @@ -5648,67 +7196,100 @@ int multi_update_precheck(THD *thd, TABLE_LIST *tables) SYNOPSIS multi_delete_precheck() thd Thread handler - tables Global table list - table_count Pointer to table counter + tables Global/local table list RETURN VALUE - 0 OK - 1 error (message is sent to user) - -1 error (message is not sent to user) + FALSE OK + TRUE error */ -int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) +bool multi_delete_precheck(THD *thd, TABLE_LIST *tables) { - DBUG_ENTER("multi_delete_precheck"); SELECT_LEX *select_lex= &thd->lex->select_lex; TABLE_LIST *aux_tables= - (TABLE_LIST *)thd->lex->auxilliary_table_list.first; - TABLE_LIST *delete_tables= (TABLE_LIST *)select_lex->table_list.first; - TABLE_LIST *target_tbl; - - *table_count= 0; + (TABLE_LIST *)thd->lex->auxiliary_table_list.first; + TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last; + DBUG_ENTER("multi_delete_precheck"); /* sql_yacc guarantees that tables and aux_tables are not zero */ DBUG_ASSERT(aux_tables != 0); if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) || - check_table_access(thd,SELECT_ACL, tables,0) || - check_table_access(thd,DELETE_ACL, aux_tables,0)) - DBUG_RETURN(1); + check_table_access(thd, SELECT_ACL, tables, 0)) + DBUG_RETURN(TRUE); + + /* + Since aux_tables list is not part of LEX::query_tables list we + have to juggle with LEX::query_tables_own_last value to be able + call check_table_access() safely. + */ + thd->lex->query_tables_own_last= 0; + if (check_table_access(thd, DELETE_ACL, aux_tables, 0)) + { + thd->lex->query_tables_own_last= save_query_tables_own_last; + DBUG_RETURN(TRUE); + } + thd->lex->query_tables_own_last= save_query_tables_own_last; + if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where) { - my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); + DBUG_RETURN(TRUE); } - for (target_tbl= aux_tables; target_tbl; target_tbl= target_tbl->next) + DBUG_RETURN(FALSE); +} + + +/* + Link tables in auxilary table list of multi-delete with corresponding + elements in main table list, and set proper locks for them. + + SYNOPSIS + multi_delete_set_locks_and_link_aux_tables() + lex - pointer to LEX representing multi-delete + + RETURN VALUE + FALSE - success + TRUE - error +*/ + +bool multi_delete_set_locks_and_link_aux_tables(LEX *lex) +{ + TABLE_LIST *tables= (TABLE_LIST*)lex->select_lex.table_list.first; + TABLE_LIST *target_tbl; + DBUG_ENTER("multi_delete_set_locks_and_link_aux_tables"); + + lex->table_count= 0; + + for (target_tbl= (TABLE_LIST *)lex->auxiliary_table_list.first; + target_tbl; target_tbl= target_tbl->next_local) { - (*table_count)++; + lex->table_count++; /* All tables in aux_tables must be found in FROM PART */ TABLE_LIST *walk; - walk= get_table_by_alias(delete_tables,target_tbl->db,target_tbl->alias); - if (!walk) + for (walk= tables; walk; walk= walk->next_local) { - my_error(ER_UNKNOWN_TABLE, MYF(0), target_tbl->real_name, - "MULTI DELETE"); - DBUG_RETURN(-1); + if (!my_strcasecmp(table_alias_charset, + target_tbl->alias, walk->alias) && + !strcmp(walk->db, target_tbl->db)) + break; } - if (walk->derived) + if (!walk) { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), target_tbl->real_name, - "DELETE"); - DBUG_RETURN(-1); + my_error(ER_UNKNOWN_TABLE, MYF(0), + target_tbl->table_name, "MULTI DELETE"); + DBUG_RETURN(TRUE); } - walk->lock_type= target_tbl->lock_type; - target_tbl->table_list= walk; // Remember corresponding table - - /* in case of subselects, we need to set lock_type in - * corresponding table in list of all tables */ - if (walk->table_list) + if (!walk->derived) { - target_tbl->table_list= walk->table_list; - walk->table_list->lock_type= walk->lock_type; + target_tbl->table_name= walk->table_name; + target_tbl->table_name_length= walk->table_name_length; } + walk->updating= target_tbl->updating; + walk->lock_type= target_tbl->lock_type; + target_tbl->correspondent_table= walk; // Remember corresponding table } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -5721,21 +7302,20 @@ int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) tables Global table list RETURN VALUE - 0 OK - 1 Error (message is sent to user) - -1 Error (message is not sent to user) + FALSE OK + TRUE Error */ -int update_precheck(THD *thd, TABLE_LIST *tables) +bool update_precheck(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("update_precheck"); if (thd->lex->select_lex.item_list.elements != thd->lex->value_list.elements) { - my_error(ER_WRONG_VALUE_COUNT, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); + DBUG_RETURN(TRUE); } - DBUG_RETURN((check_db_used(thd, tables) || - check_one_table_access(thd, UPDATE_ACL, tables)) ? 1 : 0); + DBUG_RETURN(check_db_used(thd, tables) || + check_one_table_access(thd, UPDATE_ACL, tables)); } @@ -5748,19 +7328,18 @@ int update_precheck(THD *thd, TABLE_LIST *tables) tables Global table list RETURN VALUE - 0 OK - 1 error (message is sent to user) - -1 error (message is not sent to user) + FALSE OK + TRUE error */ -int delete_precheck(THD *thd, TABLE_LIST *tables) +bool delete_precheck(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("delete_precheck"); if (check_one_table_access(thd, DELETE_ACL, tables)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); /* Set privilege for the WHERE clause */ tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -5773,12 +7352,11 @@ int delete_precheck(THD *thd, TABLE_LIST *tables) tables Global table list RETURN VALUE - 0 OK - 1 error (message is sent to user) - -1 error (message is not sent to user) + FALSE OK + TRUE error */ -int insert_precheck(THD *thd, TABLE_LIST *tables) +bool insert_precheck(THD *thd, TABLE_LIST *tables) { LEX *lex= thd->lex; DBUG_ENTER("insert_precheck"); @@ -5787,19 +7365,21 @@ int insert_precheck(THD *thd, TABLE_LIST *tables) Check that we have modify privileges for the first table and select privileges for the rest */ - ulong privilege= INSERT_ACL | - (lex->duplicates == DUP_REPLACE ? DELETE_ACL : 0) | - (lex->duplicates == DUP_UPDATE ? UPDATE_ACL : 0); + ulong privilege= (INSERT_ACL | + (lex->duplicates == DUP_REPLACE ? DELETE_ACL : 0) | + (lex->value_list.elements ? UPDATE_ACL : 0)); if (check_one_table_access(thd, privilege, tables)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); if (lex->update_list.elements != lex->value_list.elements) { - my_error(ER_WRONG_VALUE_COUNT, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + if (check_db_used(thd, tables)) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); } @@ -5813,67 +7393,61 @@ int insert_precheck(THD *thd, TABLE_LIST *tables) create_table Table which will be created RETURN VALUE - 0 OK - 1 Error (message is sent to user) + FALSE OK + TRUE Error */ -int create_table_precheck(THD *thd, TABLE_LIST *tables, - TABLE_LIST *create_table) +bool create_table_precheck(THD *thd, TABLE_LIST *tables, + TABLE_LIST *create_table) { LEX *lex= thd->lex; SELECT_LEX *select_lex= &lex->select_lex; ulong want_priv; - int error= 1; // Error message is given + bool error= TRUE; // Error message is given DBUG_ENTER("create_table_precheck"); want_priv= ((lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ? CREATE_TMP_ACL : CREATE_ACL); if (check_access(thd, want_priv, create_table->db, - &create_table->grant.privilege, 0, 0) || + &create_table->grant.privilege, 0, 0, + test(create_table->schema_table)) || check_merge_table_access(thd, create_table->db, (TABLE_LIST *) lex->create_info.merge_list.first)) goto err; if (grant_option && want_priv != CREATE_TMP_ACL && - check_grant(thd, want_priv, create_table, 0, UINT_MAX, 0)) + check_grant(thd, want_priv, create_table, 0, 1, 0)) goto err; if (select_lex->item_list.elements) { /* Check permissions for used tables in CREATE TABLE ... SELECT */ +#ifdef NOT_NECESSARY_TO_CHECK_CREATE_TABLE_EXIST_WHEN_PREPARING_STATEMENT + /* This code throws an ill error for CREATE TABLE t1 SELECT * FROM t1 */ /* - For temporary tables or PREPARED STATEMETNS we don't have to check - if the created table exists + Only do the check for PS, becasue we on execute we have to check that + against the opened tables to ensure we don't use a table that is part + of the view (which can only be done after the table has been opened). */ - if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && - ! thd->current_arena->is_stmt_prepare() && - find_real_table_in_list(tables, create_table->db, - create_table->real_name)) + if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute()) { - net_printf(thd,ER_UPDATE_TABLE_USED, create_table->real_name); - - goto err; - } - if (lex->create_info.used_fields & HA_CREATE_USED_UNION) - { - TABLE_LIST *tab; - for (tab= tables; tab; tab= tab->next) + /* + For temporary tables we don't have to check if the created table exists + */ + if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && + find_table_in_global_list(tables, create_table->db, + create_table->table_name)) { - if (find_real_table_in_list((TABLE_LIST*) lex->create_info. - merge_list.first, - tables->db, tab->real_name)) - { - net_printf(thd, ER_UPDATE_TABLE_USED, tab->real_name); - goto err; - } - } - } - + error= FALSE; + goto err; + } + } +#endif if (tables && check_table_access(thd, SELECT_ACL, tables,0)) goto err; } - error= 0; + error= FALSE; err: DBUG_RETURN(error); @@ -5885,7 +7459,7 @@ err: SYNOPSIS negate_expression() - thd therad handler + thd thread handler expr expression for negation RETURN @@ -5914,3 +7488,130 @@ Item *negate_expression(THD *thd, Item *expr) return negated; return new Item_func_not(expr); } + +/* + Set the specified definer to the default value, which is the current user in + the thread. + + SYNOPSIS + get_default_definer() + thd [in] thread handler + definer [out] definer +*/ + +void get_default_definer(THD *thd, LEX_USER *definer) +{ + const Security_context *sctx= thd->security_ctx; + + definer->user.str= (char *) sctx->priv_user; + definer->user.length= strlen(definer->user.str); + + definer->host.str= (char *) sctx->priv_host; + definer->host.length= strlen(definer->host.str); +} + + +/* + Create default definer for the specified THD. + + SYNOPSIS + create_default_definer() + thd [in] thread handler + + RETURN + On success, return a valid pointer to the created and initialized + LEX_USER, which contains definer information. + On error, return 0. +*/ + +LEX_USER *create_default_definer(THD *thd) +{ + LEX_USER *definer; + + if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + return 0; + + get_default_definer(thd, definer); + + return definer; +} + + +/* + Create definer with the given user and host names. + + SYNOPSIS + create_definer() + thd [in] thread handler + user_name [in] user name + host_name [in] host name + + RETURN + On success, return a valid pointer to the created and initialized + LEX_USER, which contains definer information. + On error, return 0. +*/ + +LEX_USER *create_definer(THD *thd, LEX_STRING *user_name, LEX_STRING *host_name) +{ + LEX_USER *definer; + + /* Create and initialize. */ + + if (! (definer= (LEX_USER*) thd->alloc(sizeof(LEX_USER)))) + return 0; + + definer->user= *user_name; + definer->host= *host_name; + + return definer; +} + + +/* + Retuns information about user or current user. + + SYNOPSIS + get_current_user() + thd [in] thread handler + user [in] user + + RETURN + On success, return a valid pointer to initialized + LEX_USER, which contains user information. + On error, return 0. +*/ + +LEX_USER *get_current_user(THD *thd, LEX_USER *user) +{ + if (!user->user.str) // current_user + return create_default_definer(thd); + + return user; +} + + +/* + Check that length of a string does not exceed some limit. + + SYNOPSIS + check_string_length() + str string to be checked + err_msg error message to be displayed if the string is too long + max_length max length + + RETURN + FALSE the passed string is not longer than max_length + TRUE the passed string is longer than max_length +*/ + +bool check_string_length(LEX_STRING *str, const char *err_msg, + uint max_length) +{ + if (str->length <= max_length) + return FALSE; + + my_error(ER_WRONG_STRING_LENGTH, MYF(0), str->str, err_msg, max_length); + + return TRUE; +} diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index b5aed0bbc4e..85092f14624 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -15,77 +14,113 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /********************************************************************** -This file contains the implementation of prepare and executes. +This file contains the implementation of prepared statements. -Prepare: +When one prepares a statement: - - Server gets the query from client with command 'COM_PREPARE'; + - Server gets the query from client with command 'COM_STMT_PREPARE'; in the following format: - [COM_PREPARE:1] [query] - - Parse the query and recognize any parameter markers '?' and + [COM_STMT_PREPARE:1] [query] + - Parse the query and recognize any parameter markers '?' and store its information list in lex->param_list - - Allocate a new statement for this prepare; and keep this in - 'thd->prepared_statements' pool. - - Without executing the query, return back to client the total + - Allocate a new statement for this prepare; and keep this in + 'thd->stmt_map'. + - Without executing the query, return back to client the total number of parameters along with result-set metadata information (if any) in the following format: [STMT_ID:4] [Column_count:2] [Param_count:2] + [Params meta info (stubs only for now)] (if Param_count > 0) [Columns meta info] (if Column_count > 0) - [Params meta info] (if Param_count > 0 ) (TODO : 4.1.1) - -Prepare-execute: - - - Server gets the command 'COM_EXECUTE' to execute the - previously prepared query. If there is any param markers; then client - will send the data in the following format: - [COM_EXECUTE:1] + +When one executes a statement: + + - Server gets the command 'COM_STMT_EXECUTE' to execute the + previously prepared query. If there are any parameter markers, then the + client will send the data in the following format: + [COM_STMT_EXECUTE:1] [STMT_ID:4] [NULL_BITS:(param_count+7)/8)] [TYPES_SUPPLIED_BY_CLIENT(0/1):1] [[length]data] - [[length]data] .. [[length]data]. - (Note: Except for string/binary types; all other types will not be + [[length]data] .. [[length]data]. + (Note: Except for string/binary types; all other types will not be supplied with length field) - - Replace the param items with this new data. If it is a first execute - or types altered by client; then setup the conversion routines. - - Execute the query without re-parsing and send back the results + - If it is a first execute or types of parameters were altered by client, + then setup the conversion routines. + - Assign parameter items from the supplied data. + - Execute the query without re-parsing and send back the results to client -Long data handling: +When one supplies long data for a placeholder: - - Server gets the long data in pieces with command type 'COM_LONG_DATA'. + - Server gets the long data in pieces with command type + 'COM_STMT_SEND_LONG_DATA'. - The packet recieved will have the format as: - [COM_LONG_DATA:1][STMT_ID:4][parameter_number:2][data] - - data from the packet is appended to long data value buffer for this + [COM_STMT_SEND_LONG_DATA:1][STMT_ID:4][parameter_number:2][data] + - data from the packet is appended to the long data value buffer for this placeholder. - - It's up to the client to check for read data ended. The server doesn't - care; and also server doesn't notify to the client that it got the - data or not; if there is any error; then during execute; the error - will be returned + - It's up to the client to stop supplying data chunks at any point. The + server doesn't care; also, the server doesn't notify the client whether + it got the data or not; if there is any error, then it will be returned + at statement execute. ***********************************************************************/ #include "mysql_priv.h" #include "sql_select.h" // for JOIN -#include <m_ctype.h> // for isspace() +#include "sql_cursor.h" +#include "sp_head.h" +#include "sp.h" +#include "sp_cache.h" #ifdef EMBEDDED_LIBRARY /* include MYSQL_BIND headers */ #include <mysql.h> +#else +#include <mysql_com.h> #endif -/****************************************************************************** - Prepared_statement: statement which can contain placeholders -******************************************************************************/ +/* A result class used to send cursor rows using the binary protocol. */ + +class Select_fetch_protocol_prep: public select_send +{ + Protocol_prep protocol; +public: + Select_fetch_protocol_prep(THD *thd); + virtual bool send_fields(List<Item> &list, uint flags); + virtual bool send_data(List<Item> &items); + virtual bool send_eof(); +#ifdef EMBEDDED_LIBRARY + void begin_dataset() + { + protocol.begin_dataset(); + } +#endif +}; + +/****************************************************************************/ + +/** + @class Prepared_statement + @brief Prepared_statement: a statement that can contain placeholders +*/ class Prepared_statement: public Statement { public: + enum flag_values + { + IS_IN_USE= 1 + }; + THD *thd; + Select_fetch_protocol_prep result; + Protocol *protocol; Item_param **param_array; uint param_count; uint last_errno; + uint flags; char last_error[MYSQL_ERRMSG_SIZE]; #ifndef EMBEDDED_LIBRARY bool (*set_params)(Prepared_statement *st, uchar *data, uchar *data_end, @@ -93,18 +128,34 @@ public: #else bool (*set_params_data)(Prepared_statement *st, String *expanded_query); #endif - bool (*set_params_from_vars)(Prepared_statement *stmt, + bool (*set_params_from_vars)(Prepared_statement *stmt, List<LEX_STRING>& varnames, String *expanded_query); public: - Prepared_statement(THD *thd_arg); + Prepared_statement(THD *thd_arg, Protocol *protocol_arg); virtual ~Prepared_statement(); void setup_set_params(); - virtual Item_arena::Type type() const; + virtual Query_arena::Type type() const; + virtual void cleanup_stmt(); + bool set_name(LEX_STRING *name); + inline void close_cursor() { delete cursor; cursor= 0; } + + bool prepare(const char *packet, uint packet_length); + bool execute(String *expanded_query, bool open_cursor); + /* Destroy this statement */ + bool deallocate(); +private: + /** + Store the parsed tree of a prepared statement here. + */ + LEX main_lex; + /** + The memory root to allocate parsed tree elements (instances of Item, + SELECT_LEX and other classes). + */ + MEM_ROOT main_mem_root; }; -static void execute_stmt(THD *thd, Prepared_statement *stmt, - String *expanded_query, bool set_context); /****************************************************************************** Implementation @@ -116,27 +167,38 @@ inline bool is_param_null(const uchar *pos, ulong param_no) return pos[param_no/8] & (1 << (param_no & 7)); } -enum { STMT_QUERY_LOG_LENGTH= 8192 }; +/* + Find a prepared statement in the statement map by id. -enum enum_send_error { DONT_SEND_ERROR= 0, SEND_ERROR }; + SYNOPSIS + find_prepared_statement() + thd thread handle + id statement id + where the place from which this function is called (for + error reporting). -/* - Seek prepared statement in statement map by id: returns zero if statement - was not found, pointer otherwise. + DESCRIPTION + Try to find a prepared statement and set THD error if it's not found. + + RETURN VALUE + 0 if the statement was not found, a pointer otherwise. */ static Prepared_statement * -find_prepared_statement(THD *thd, ulong id, const char *where, - enum enum_send_error se) +find_prepared_statement(THD *thd, ulong id, const char *where) { + /* + To strictly separate namespaces of SQL prepared statements and C API + prepared statements find() will return 0 if there is a named prepared + statement with such id. + */ Statement *stmt= thd->stmt_map.find(id); - if (stmt == 0 || stmt->type() != Item_arena::PREPARED_STATEMENT) + if (stmt == 0 || stmt->type() != Query_arena::PREPARED_STATEMENT) { char llbuf[22]; - my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), 22, llstr(id, llbuf), where); - if (se == SEND_ERROR) - send_error(thd); + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), sizeof(llbuf), llstr(id, llbuf), + where); return 0; } return (Prepared_statement *) stmt; @@ -144,29 +206,40 @@ find_prepared_statement(THD *thd, ulong id, const char *where, /* - Send prepared stmt info to client after prepare + Send prepared statement id and metadata to the client after prepare. + + SYNOPSIS + send_prep_stmt() + + RETURN VALUE + 0 in case of success, 1 otherwise */ #ifndef EMBEDDED_LIBRARY static bool send_prep_stmt(Prepared_statement *stmt, uint columns) { NET *net= &stmt->thd->net; - char buff[9]; + char buff[12]; + uint tmp; DBUG_ENTER("send_prep_stmt"); buff[0]= 0; /* OK packet indicator */ int4store(buff+1, stmt->id); int2store(buff+5, columns); int2store(buff+7, stmt->param_count); + buff[9]= 0; // Guard against a 4.1 client + tmp= min(stmt->thd->total_warn_count, 65535); + int2store(buff+10, tmp); + /* Send types and names of placeholders to the client XXX: fix this nasty upcast from List<Item_param> to List<Item> */ - DBUG_RETURN(my_net_write(net, buff, sizeof(buff)) || + DBUG_RETURN(my_net_write(net, buff, sizeof(buff)) || (stmt->param_count && stmt->thd->protocol_simple.send_fields((List<Item> *) &stmt->lex->param_list, - 0))); + Protocol::SEND_EOF))); } #else static bool send_prep_stmt(Prepared_statement *stmt, @@ -176,7 +249,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, thd->client_stmt_id= stmt->id; thd->client_param_count= stmt->param_count; - thd->net.last_errno= 0; + thd->clear_error(); return 0; } @@ -184,8 +257,20 @@ static bool send_prep_stmt(Prepared_statement *stmt, /* - Read the length of the parameter data and return back to - caller by positing the pointer to param data. + Read the length of the parameter data and return it back to + the caller. + + SYNOPSIS + get_param_length() + packet a pointer to the data + len remaining packet length + + DESCRIPTION + Read data length, position the packet to the first byte after it, + and return the length to the caller. + + RETURN VALUE + Length of data piece. */ #ifndef EMBEDDED_LIBRARY @@ -215,7 +300,7 @@ static ulong get_param_length(uchar **packet, ulong len) } if (len < 5) return 0; - (*packet)+=9; // Must be 254 when here + (*packet)+=9; // Must be 254 when here /* In our client-server protocol all numbers bigger than 2^24 stored as 8 bytes with uint8korr. Here we always know that @@ -230,19 +315,21 @@ static ulong get_param_length(uchar **packet, ulong len) #endif /*!EMBEDDED_LIBRARY*/ /* - Data conversion routines + Data conversion routines. + SYNOPSIS - set_param_xx() - param parameter item - pos input data buffer - len length of data in the buffer + set_param_xx() + param parameter item + pos input data buffer + len length of data in the buffer - All these functions read the data from pos, convert it to requested type - and assign to param; pos is advanced to predefined length. + DESCRIPTION + All these functions read the data from pos, convert it to requested + type and assign to param; pos is advanced to predefined length. - Make a note that the NULL handling is examined at first execution - (i.e. when input types altered) and for all subsequent executions - we don't read any values for this. + Make a note that the NULL handling is examined at first execution + (i.e. when input types altered) and for all subsequent executions + we don't read any values for this. RETURN VALUE none @@ -255,7 +342,7 @@ static void set_param_tiny(Item_param *param, uchar **pos, ulong len) return; #endif int8 value= (int8) **pos; - param->set_int(param->unsigned_flag ? (longlong) ((uint8) value) : + param->set_int(param->unsigned_flag ? (longlong) ((uint8) value) : (longlong) value, 4); *pos+= 1; } @@ -332,6 +419,13 @@ static void set_param_double(Item_param *param, uchar **pos, ulong len) *pos+= 8; } +static void set_param_decimal(Item_param *param, uchar **pos, ulong len) +{ + ulong length= get_param_length(pos, len); + param->set_decimal((char*)*pos, length); + *pos+= length; +} + #ifndef EMBEDDED_LIBRARY /* @@ -403,6 +497,7 @@ static void set_param_datetime(Item_param *param, uchar **pos, ulong len) *pos+= length; } + static void set_param_date(Item_param *param, uchar **pos, ulong len) { MYSQL_TIME tm; @@ -447,9 +542,10 @@ void set_param_time(Item_param *param, uchar **pos, ulong len) void set_param_datetime(Item_param *param, uchar **pos, ulong len) { - MYSQL_TIME *to= (MYSQL_TIME*)*pos; + MYSQL_TIME tm= *((MYSQL_TIME*)*pos); + tm.neg= 0; - param->set_time(to, MYSQL_TIMESTAMP_DATETIME, + param->set_time(&tm, MYSQL_TIMESTAMP_DATETIME, MAX_DATETIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); } @@ -471,7 +567,7 @@ static void set_param_str(Item_param *param, uchar **pos, ulong len) } -#undef get_param_length +#undef get_param_length static void setup_one_conversion_function(THD *thd, Item_param *param, uchar param_type) @@ -507,6 +603,12 @@ static void setup_one_conversion_function(THD *thd, Item_param *param, param->item_type= Item::REAL_ITEM; param->item_result_type= REAL_RESULT; break; + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_NEWDECIMAL: + param->set_param_func= set_param_decimal; + param->item_type= Item::DECIMAL_ITEM; + param->item_result_type= DECIMAL_RESULT; + break; case MYSQL_TYPE_TIME: param->set_param_func= set_param_time; param->item_type= Item::STRING_ITEM; @@ -571,25 +673,52 @@ static void setup_one_conversion_function(THD *thd, Item_param *param, #ifndef EMBEDDED_LIBRARY /* - Update the parameter markers by reading data from client packet - and if binary/update log is set, generate the valid query. + Routines to assign parameters from data supplied by the client. + + DESCRIPTION + Update the parameter markers by reading data from the packet and + and generate a valid query for logging. + + NOTES + This function, along with other _withlog functions is called when one of + binary, slow or general logs is open. Logging of prepared statements in + all cases is performed by means of conventional queries: if parameter + data was supplied from C API, each placeholder in the query is + replaced with its actual value; if we're logging a [Dynamic] SQL + prepared statement, parameter markers are replaced with variable names. + Example: + mysql_stmt_prepare("UPDATE t1 SET a=a*1.25 WHERE a=?") + --> general logs gets [Prepare] UPDATE t1 SET a*1.25 WHERE a=?" + mysql_stmt_execute(stmt); + --> general and binary logs get + [Execute] UPDATE t1 SET a*1.25 WHERE a=1" + If a statement has been prepared using SQL syntax: + PREPARE stmt FROM "UPDATE t1 SET a=a*1.25 WHERE a=?" + --> general log gets + [Query] PREPARE stmt FROM "UPDATE ..." + EXECUTE stmt USING @a + --> general log gets + [Query] EXECUTE stmt USING @a; + + RETURN VALUE + 0 if success, 1 otherwise */ static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, - uchar *read_pos, uchar *data_end, + uchar *read_pos, uchar *data_end, String *query) { THD *thd= stmt->thd; Item_param **begin= stmt->param_array; Item_param **end= begin + stmt->param_count; uint32 length= 0; - String str; + String str; const String *res; - DBUG_ENTER("insert_params_withlog"); + DBUG_ENTER("insert_params_withlog"); if (query->copy(stmt->query, stmt->query_length, default_charset_info)) DBUG_RETURN(1); - + for (Item_param **it= begin; it < end; ++it) { Item_param *param= *it; @@ -610,7 +739,7 @@ static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, if (query->replace(param->pos_in_query+length, 1, *res)) DBUG_RETURN(1); - + length+= res->length()-1; } DBUG_RETURN(0); @@ -618,13 +747,13 @@ static bool insert_params_withlog(Prepared_statement *stmt, uchar *null_array, static bool insert_params(Prepared_statement *stmt, uchar *null_array, - uchar *read_pos, uchar *data_end, + uchar *read_pos, uchar *data_end, String *expanded_query) { Item_param **begin= stmt->param_array; Item_param **end= begin + stmt->param_count; - DBUG_ENTER("insert_params"); + DBUG_ENTER("insert_params"); for (Item_param **it= begin; it < end; ++it) { @@ -658,7 +787,7 @@ static bool setup_conversion_functions(Prepared_statement *stmt, if (*read_pos++) //types supplied / first execute { /* - First execute or types altered by the client, setup the + First execute or types altered by the client, setup the conversion routines for all parameters (one time) */ Item_param **it= stmt->param_array; @@ -684,6 +813,17 @@ static bool setup_conversion_functions(Prepared_statement *stmt, #else +/* + Embedded counterparts of parameter assignment routines. + + DESCRIPTION + The main difference between the embedded library and the server is + that in embedded case we don't serialize/deserialize parameters data. + Additionally, for unknown reason, the client-side flag raised for + changed types of placeholders is ignored and we simply setup conversion + functions at each execute (TODO: fix). +*/ + static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query) { THD *thd= stmt->thd; @@ -706,8 +846,8 @@ static bool emb_insert_params(Prepared_statement *stmt, String *expanded_query) uchar *buff= (uchar*) client_param->buffer; param->unsigned_flag= client_param->is_unsigned; param->set_param_func(param, &buff, - client_param->length ? - *client_param->length : + client_param->length ? + *client_param->length : client_param->buffer_length); } } @@ -733,7 +873,7 @@ static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) if (query->copy(stmt->query, stmt->query_length, default_charset_info)) DBUG_RETURN(1); - + for (; it < end; ++it, ++client_param) { Item_param *param= *it; @@ -745,10 +885,10 @@ static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) else { uchar *buff= (uchar*)client_param->buffer; - param->unsigned_flag= client_param->is_unsigned; + param->unsigned_flag= client_param->is_unsigned; param->set_param_func(param, &buff, - client_param->length ? - *client_param->length : + client_param->length ? + *client_param->length : client_param->buffer_length); } } @@ -768,7 +908,8 @@ static bool emb_insert_params_withlog(Prepared_statement *stmt, String *query) /* - Set prepared statement parameters from user variables. + Assign prepared statement parameters from user variables. + SYNOPSIS insert_params_from_vars() stmt Statement @@ -806,12 +947,14 @@ static bool insert_params_from_vars(Prepared_statement *stmt, /* Do the same as insert_params_from_vars but also construct query text for binary log. + SYNOPSIS insert_params_from_vars() - stmt Statement + stmt Prepared statement varnames List of variables. Caller must ensure that number of variables in the list is equal to number of statement parameters - query The query with parameter markers replaced with their values + query The query with parameter markers replaced with corresponding + user variables that were used to execute the query. */ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, @@ -822,12 +965,13 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, Item_param **end= begin + stmt->param_count; user_var_entry *entry; LEX_STRING *varname; - DBUG_ENTER("insert_params_from_vars"); - List_iterator<LEX_STRING> var_it(varnames); String buf; const String *val; uint32 length= 0; + + DBUG_ENTER("insert_params_from_vars"); + if (query->copy(stmt->query, stmt->query_length, default_charset_info)) DBUG_RETURN(1); @@ -835,7 +979,8 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, { Item_param *param= *it; varname= var_it++; - if (get_var_with_binlog(stmt->thd, *varname, &entry)) + if (get_var_with_binlog(stmt->thd, stmt->lex->sql_command, + *varname, &entry)) DBUG_RETURN(1); if (param->set_from_user_var(stmt->thd, entry)) @@ -843,18 +988,19 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, /* Insert @'escaped-varname' instead of parameter in the query */ if (entry) { - char *begin, *ptr; + char *start, *ptr; buf.length(0); if (buf.reserve(entry->name.length*2+3)) DBUG_RETURN(1); - begin= ptr= buf.c_ptr_quick(); + start= ptr= buf.c_ptr_quick(); *ptr++= '@'; *ptr++= '\''; ptr+= escape_string_for_mysql(&my_charset_utf8_general_ci, - ptr, entry->name.str, entry->name.length); + ptr, 0, entry->name.str, + entry->name.length); *ptr++= '\''; - buf.length(ptr - begin); + buf.length(ptr - start); val= &buf; } else @@ -871,86 +1017,90 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, } /* - Validate INSERT statement: + Validate INSERT statement. SYNOPSIS mysql_test_insert() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables global/local table list RETURN VALUE - 0 ok - 1 error, sent to the client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int mysql_test_insert(Prepared_statement *stmt, - TABLE_LIST *table_list, - List<Item> &fields, - List<List_item> &values_list, - List<Item> &update_fields, - List<Item> &update_values, - enum_duplicates duplic) + +static bool mysql_test_insert(Prepared_statement *stmt, + TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic) { THD *thd= stmt->thd; - LEX *lex= stmt->lex; List_iterator_fast<List_item> its(values_list); List_item *values; - int res= -1; - TABLE_LIST *insert_table_list= - (TABLE_LIST*) lex->select_lex.table_list.first; DBUG_ENTER("mysql_test_insert"); - if ((res= insert_precheck(thd, table_list))) - DBUG_RETURN(res); + if (insert_precheck(thd, table_list)) + goto error; /* - open temporary memory pool for temporary data allocated by derived - tables & preparation procedure - Note that this is done without locks (should not be needed as we will not - access any data here) - If we would use locks, then we have to ensure we are not using - TL_WRITE_DELAYED as having two such locks can cause table corruption. + open temporary memory pool for temporary data allocated by derived + tables & preparation procedure + Note that this is done without locks (should not be needed as we will not + access any data here) + If we would use locks, then we have to ensure we are not using + TL_WRITE_DELAYED as having two such locks can cause table corruption. */ - if (open_normal_and_derived_tables(thd, table_list)) - { - DBUG_RETURN(-1); - } + if (open_normal_and_derived_tables(thd, table_list, 0)) + goto error; if ((values= its++)) { uint value_count; ulong counter= 0; + Item *unused_conds= 0; - table_list->table->insert_values=(byte *)1; // don't allocate insert_values - if ((res= mysql_prepare_insert(thd, table_list, insert_table_list, - insert_table_list, - table_list->table, fields, values, - update_fields, update_values, duplic))) + if (table_list->table) + { + // don't allocate insert_values + table_list->table->insert_values=(byte *)1; + } + + if (mysql_prepare_insert(thd, table_list, table_list->table, + fields, values, update_fields, update_values, + duplic, &unused_conds, FALSE, FALSE, FALSE)) goto error; value_count= values->elements; its.rewind(); + if (table_list->lock_type == TL_WRITE_DELAYED && + !(table_list->table->file->table_flags() & HA_CAN_INSERT_DELAYED)) + { + my_error(ER_ILLEGAL_HA, MYF(0), (table_list->view ? + table_list->view_name.str : + table_list->table_name)); + goto error; + } while ((values= its++)) { counter++; if (values->elements != value_count) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0), counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); goto error; } - if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) - goto error; + if (setup_fields(thd, 0, *values, 0, 0, 0)) + goto error; } } + DBUG_RETURN(FALSE); - res= 0; error: - lex->unit.cleanup(); - table_list->table->insert_values=0; - DBUG_RETURN(res); + /* insert_values is cleared in open_table */ + DBUG_RETURN(TRUE); } @@ -959,209 +1109,240 @@ error: SYNOPSIS mysql_test_update() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables list of tables used in this query RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + 0 success + 1 error, error message is set in THD + 2 convert to multi_update */ + static int mysql_test_update(Prepared_statement *stmt, - TABLE_LIST *table_list) + TABLE_LIST *table_list) { int res; THD *thd= stmt->thd; + uint table_count= 0; SELECT_LEX *select= &stmt->lex->select_lex; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint want_privilege; +#endif + bool need_reopen; DBUG_ENTER("mysql_test_update"); - if ((res= update_precheck(thd, table_list))) - DBUG_RETURN(res); + if (update_precheck(thd, table_list)) + goto error; - if (open_and_lock_tables(thd, table_list)) - res= -1; - else + for ( ; ; ) { - TABLE_LIST *update_table_list= (TABLE_LIST *)select->table_list.first; - if (!(res= mysql_prepare_update(thd, table_list, - update_table_list, - &select->where, - select->order_list.elements, - (ORDER *) select->order_list.first))) + if (open_tables(thd, &table_list, &table_count, 0)) + goto error; + + if (table_list->multitable_view) { - if (setup_fields(thd, 0, update_table_list, - select->item_list, 1, 0, 0) || - setup_fields(thd, 0, update_table_list, - stmt->lex->value_list, 0, 0, 0)) - res= -1; + DBUG_ASSERT(table_list->view != 0); + DBUG_PRINT("info", ("Switch to multi-update")); + /* pass counter value */ + thd->lex->table_count= table_count; + /* convert to multiupdate */ + DBUG_RETURN(2); } - stmt->lex->unit.cleanup(); + + if (!lock_tables(thd, table_list, table_count, &need_reopen)) + break; + if (!need_reopen) + goto error; + close_tables_for_reopen(thd, &table_list); } - /* TODO: here we should send types of placeholders to the client. */ - DBUG_RETURN(res); + + /* + thd->fill_derived_tables() is false here for sure (because it is + preparation of PS, so we even do not check it). + */ + if (mysql_handle_derived(thd->lex, &mysql_derived_prepare)) + goto error; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* TABLE_LIST contain right privilages request */ + want_privilege= table_list->grant.want_privilege; +#endif + + if (mysql_prepare_update(thd, table_list, &select->where, + select->order_list.elements, + (ORDER *) select->order_list.first)) + goto error; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + table_list->grant.want_privilege= want_privilege; + table_list->table->grant.want_privilege= want_privilege; + table_list->register_want_access(want_privilege); +#endif + thd->lex->select_lex.no_wrap_view_item= TRUE; + res= setup_fields(thd, 0, select->item_list, 1, 0, 0); + thd->lex->select_lex.no_wrap_view_item= FALSE; + if (res) + goto error; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* Check values */ + table_list->grant.want_privilege= + table_list->table->grant.want_privilege= + (SELECT_ACL & ~table_list->table->grant.privilege); + table_list->register_want_access(SELECT_ACL); +#endif + if (setup_fields(thd, 0, stmt->lex->value_list, 0, 0, 0)) + goto error; + /* TODO: here we should send types of placeholders to the client. */ + DBUG_RETURN(0); +error: + DBUG_RETURN(1); } /* - Validate DELETE statement + Validate DELETE statement. SYNOPSIS mysql_test_delete() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables list of tables used in this query RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int mysql_test_delete(Prepared_statement *stmt, - TABLE_LIST *table_list) + +static bool mysql_test_delete(Prepared_statement *stmt, + TABLE_LIST *table_list) { - int res; THD *thd= stmt->thd; LEX *lex= stmt->lex; DBUG_ENTER("mysql_test_delete"); - if ((res= delete_precheck(thd, table_list))) - DBUG_RETURN(res); + if (delete_precheck(thd, table_list) || + open_and_lock_tables(thd, table_list)) + goto error; - if (open_and_lock_tables(thd, table_list)) - res= -1; - else + if (!table_list->table) { - res= mysql_prepare_delete(thd, table_list, &lex->select_lex.where); - lex->unit.cleanup(); + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + table_list->view_db.str, table_list->view_name.str); + goto error; } - /* TODO: here we should send types of placeholders to the client. */ - DBUG_RETURN(res); + + DBUG_RETURN(mysql_prepare_delete(thd, table_list, &lex->select_lex.where)); +error: + DBUG_RETURN(TRUE); } /* Validate SELECT statement. - In case of success, if this query is not EXPLAIN, send column list info - back to client. SYNOPSIS mysql_test_select() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables list of tables used in the query + + DESCRIPTION + In case of success, if this query is not EXPLAIN, send column list info + back to the client. RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + 0 success + 1 error, error message is set in THD + 2 success, and statement metadata has been sent */ static int mysql_test_select(Prepared_statement *stmt, - TABLE_LIST *tables, bool text_protocol) + TABLE_LIST *tables, bool text_protocol) { THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX_UNIT *unit= &lex->unit; - int result= 1; DBUG_ENTER("mysql_test_select"); + lex->select_lex.context.resolve_in_select_list= TRUE; + ulong privilege= lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL; if (tables) { if (check_table_access(thd, privilege, tables,0)) - DBUG_RETURN(1); + goto error; } - else if (check_access(thd, privilege, any_db,0,0,0)) - DBUG_RETURN(1); + else if (check_access(thd, privilege, any_db,0,0,0,0)) + goto error; if (!lex->result && !(lex->result= new (stmt->mem_root) select_send)) { - send_error(thd); - goto err; + my_error(ER_OUTOFMEMORY, MYF(0), sizeof(select_send)); + goto error; } if (open_and_lock_tables(thd, tables)) - { - send_error(thd); - goto err; - } + goto error; thd->used_tables= 0; // Updated by setup_fields - // JOIN::prepare calls - if (unit->prepare(thd, 0, 0, "")) - { - send_error(thd); - goto err_prep; - } - if (!text_protocol) + /* + JOIN::prepare calls + It is not SELECT COMMAND for sure, so setup_tables will be called as + usual, and we pass 0 as setup_tables_done_option + */ + if (unit->prepare(thd, 0, 0)) + goto error; + if (!lex->describe && !text_protocol) { - if (lex->describe) - { - if (send_prep_stmt(stmt, 0) || thd->protocol->flush()) - goto err_prep; - } - else - { - /* Make copy of item list, as change_columns may change it */ - List<Item> fields(lex->select_lex.item_list); + /* Make copy of item list, as change_columns may change it */ + List<Item> fields(lex->select_lex.item_list); - /* Change columns if a procedure like analyse() */ - if (unit->last_procedure && - unit->last_procedure->change_columns(fields)) - goto err_prep; + /* Change columns if a procedure like analyse() */ + if (unit->last_procedure && unit->last_procedure->change_columns(fields)) + goto error; - /* - We can use lex->result as it should've been - prepared in unit->prepare call above. - */ - if (send_prep_stmt(stmt, lex->result->field_count(fields)) || - lex->result->send_fields(fields, 0) || - thd->protocol->flush()) - goto err_prep; - } + /* + We can use lex->result as it should've been prepared in + unit->prepare call above. + */ + if (send_prep_stmt(stmt, lex->result->field_count(fields)) || + lex->result->send_fields(fields, Protocol::SEND_EOF) || + thd->protocol->flush()) + goto error; + DBUG_RETURN(2); } - result= 0; // ok - -err_prep: - unit->cleanup(); -err: - DBUG_RETURN(result); + DBUG_RETURN(0); +error: + DBUG_RETURN(1); } /* - Validate and prepare for execution DO statement expressions + Validate and prepare for execution DO statement expressions. SYNOPSIS mysql_test_do_fields() - stmt prepared statemen handler - tables list of tables queries - values list of expressions + stmt prepared statement + tables list of tables used in this query + values list of expressions RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int mysql_test_do_fields(Prepared_statement *stmt, - TABLE_LIST *tables, - List<Item> *values) +static bool mysql_test_do_fields(Prepared_statement *stmt, + TABLE_LIST *tables, + List<Item> *values) { - DBUG_ENTER("mysql_test_do_fields"); THD *thd= stmt->thd; - int res= 0; - if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0))) - DBUG_RETURN(res); - if (tables && (res= open_and_lock_tables(thd, tables))) - { - DBUG_RETURN(res); - } - res= setup_fields(thd, 0, 0, *values, 0, 0, 0); - stmt->lex->unit.cleanup(); - if (res) - DBUG_RETURN(-1); - DBUG_RETURN(0); + DBUG_ENTER("mysql_test_do_fields"); + if (tables && check_table_access(thd, SELECT_ACL, tables, 0)) + DBUG_RETURN(TRUE); + + if (open_and_lock_tables(thd, tables)) + DBUG_RETURN(TRUE); + DBUG_RETURN(setup_fields(thd, 0, *values, 0, 0, 0)); } @@ -1170,42 +1351,36 @@ static int mysql_test_do_fields(Prepared_statement *stmt, SYNOPSIS mysql_test_set_fields() - stmt prepared statemen handler - tables list of tables queries - values list of expressions + stmt prepared statement + tables list of tables used in this query + values list of expressions RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int mysql_test_set_fields(Prepared_statement *stmt, - TABLE_LIST *tables, - List<set_var_base> *var_list) + +static bool mysql_test_set_fields(Prepared_statement *stmt, + TABLE_LIST *tables, + List<set_var_base> *var_list) { DBUG_ENTER("mysql_test_set_fields"); List_iterator_fast<set_var_base> it(*var_list); THD *thd= stmt->thd; set_var_base *var; - int res= 0; - - if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0))) - DBUG_RETURN(res); - if (tables && (res= open_and_lock_tables(thd, tables))) + if (tables && check_table_access(thd, SELECT_ACL, tables, 0) || + open_and_lock_tables(thd, tables)) goto error; + while ((var= it++)) { if (var->light_check(thd)) - { - stmt->lex->unit.cleanup(); - res= -1; goto error; - } } + DBUG_RETURN(FALSE); error: - stmt->lex->unit.cleanup(); - DBUG_RETURN(res); + DBUG_RETURN(TRUE); } @@ -1213,36 +1388,76 @@ error: Check internal SELECT of the prepared command SYNOPSIS - select_like_statement_test() - stmt - prepared table handler - tables - global list of tables + select_like_stmt_test() + stmt prepared statement + specific_prepare function of command specific prepare + setup_tables_done_option options to be passed to LEX::unit.prepare() + + NOTE + This function won't directly open tables used in select. They should + be opened either by calling function (and in this case you probably + should use select_like_stmt_test_with_open_n_lock()) or by + "specific_prepare" call (like this happens in case of multi-update). RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int select_like_statement_test(Prepared_statement *stmt, - TABLE_LIST *tables) + +static bool select_like_stmt_test(Prepared_statement *stmt, + bool (*specific_prepare)(THD *thd), + ulong setup_tables_done_option) { - DBUG_ENTER("select_like_statement_test"); + DBUG_ENTER("select_like_stmt_test"); THD *thd= stmt->thd; LEX *lex= stmt->lex; - int res= 0; - if (tables && (res= open_and_lock_tables(thd, tables))) - goto end; + lex->select_lex.context.resolve_in_select_list= TRUE; + + if (specific_prepare && (*specific_prepare)(thd)) + DBUG_RETURN(TRUE); thd->used_tables= 0; // Updated by setup_fields - // JOIN::prepare calls - if (lex->unit.prepare(thd, 0, 0, "")) - { - res= thd->net.report_error ? -1 : 1; - } -end: - lex->unit.cleanup(); - DBUG_RETURN(res); + /* Calls JOIN::prepare */ + DBUG_RETURN(lex->unit.prepare(thd, 0, setup_tables_done_option)); +} + +/* + Check internal SELECT of the prepared command (with opening and + locking of used tables). + + SYNOPSIS + select_like_stmt_test_with_open_n_lock() + stmt prepared statement + tables list of tables to be opened and locked + before calling specific_prepare function + specific_prepare function of command specific prepare + setup_tables_done_option options to be passed to LEX::unit.prepare() + + RETURN VALUE + FALSE success + TRUE error +*/ + +static bool +select_like_stmt_test_with_open_n_lock(Prepared_statement *stmt, + TABLE_LIST *tables, + bool (*specific_prepare)(THD *thd), + ulong setup_tables_done_option) +{ + DBUG_ENTER("select_like_stmt_test_with_open_n_lock"); + + /* + We should not call LEX::unit.cleanup() after this open_and_lock_tables() + call because we don't allow prepared EXPLAIN yet so derived tables will + clean up after themself. + */ + if (open_and_lock_tables(stmt->thd, tables)) + DBUG_RETURN(TRUE); + + DBUG_RETURN(select_like_stmt_test(stmt, specific_prepare, + setup_tables_done_option)); } @@ -1251,168 +1466,235 @@ end: SYNOPSIS mysql_test_create_table() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables list of tables used in this query RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int mysql_test_create_table(Prepared_statement *stmt, - TABLE_LIST *tables) + +static bool mysql_test_create_table(Prepared_statement *stmt) { DBUG_ENTER("mysql_test_create_table"); THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX *select_lex= &lex->select_lex; - int res= 0; - + bool res= FALSE; /* Skip first table, which is the table we are creating */ - TABLE_LIST *create_table, *create_table_local; - tables= lex->unlink_first_table(tables, &create_table, - &create_table_local); + bool link_to_local; + TABLE_LIST *create_table= lex->unlink_first_table(&link_to_local); + TABLE_LIST *tables= lex->query_tables; - if (!(res= create_table_precheck(thd, tables, create_table)) && - select_lex->item_list.elements) + if (create_table_precheck(thd, tables, create_table)) + DBUG_RETURN(TRUE); + + if (select_lex->item_list.elements) { - select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; - res= select_like_statement_test(stmt, tables); - select_lex->resolve_mode= SELECT_LEX::NOMATTER_MODE; + select_lex->context.resolve_in_select_list= TRUE; + res= select_like_stmt_test_with_open_n_lock(stmt, tables, 0, 0); } /* put tables back for PS rexecuting */ - tables= lex->link_first_table_back(tables, create_table, - create_table_local); + lex->link_first_table_back(create_table, link_to_local); DBUG_RETURN(res); } /* - Validate and prepare for execution multi update statement + Validate and prepare for execution a multi update statement. SYNOPSIS mysql_test_multiupdate() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables list of tables used in this query + converted converted to multi-update from usual update RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int mysql_test_multiupdate(Prepared_statement *stmt, - TABLE_LIST *tables) + +static bool mysql_test_multiupdate(Prepared_statement *stmt, + TABLE_LIST *tables, + bool converted) { - int res; - if ((res= multi_update_precheck(stmt->thd, tables))) - return res; - return select_like_statement_test(stmt, tables); + /* if we switched from normal update, rights are checked */ + if (!converted && multi_update_precheck(stmt->thd, tables)) + return TRUE; + + return select_like_stmt_test(stmt, &mysql_multi_update_prepare, + OPTION_SETUP_TABLES_DONE); } /* - Validate and prepare for execution multi delete statement + Validate and prepare for execution a multi delete statement. SYNOPSIS mysql_test_multidelete() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables list of tables used in this query RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message in THD is set. */ -static int mysql_test_multidelete(Prepared_statement *stmt, - TABLE_LIST *tables) + +static bool mysql_test_multidelete(Prepared_statement *stmt, + TABLE_LIST *tables) { - int res; stmt->thd->lex->current_select= &stmt->thd->lex->select_lex; if (add_item_to_list(stmt->thd, new Item_null())) - return -1; + { + my_error(ER_OUTOFMEMORY, MYF(0), 0); + goto error; + } - uint fake_counter; - if ((res= multi_delete_precheck(stmt->thd, tables, &fake_counter))) - return res; - return select_like_statement_test(stmt, tables); + if (multi_delete_precheck(stmt->thd, tables) || + select_like_stmt_test_with_open_n_lock(stmt, tables, + &mysql_multi_delete_prepare, + OPTION_SETUP_TABLES_DONE)) + goto error; + if (!tables->table) + { + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + tables->view_db.str, tables->view_name.str); + goto error; + } + return FALSE; +error: + return TRUE; } /* - Validate and prepare for execution INSERT ... SELECT statement + Wrapper for mysql_insert_select_prepare, to make change of local tables + after open_and_lock_tables() call. + + SYNOPSIS + mysql_insert_select_prepare_tester() + thd thread handle + + NOTE + We need to remove the first local table after open_and_lock_tables, + because mysql_handle_derived uses local tables lists. +*/ + +static bool mysql_insert_select_prepare_tester(THD *thd) +{ + SELECT_LEX *first_select= &thd->lex->select_lex; + TABLE_LIST *second_table= ((TABLE_LIST*)first_select->table_list.first)-> + next_local; + + /* Skip first table, which is the table we are inserting in */ + first_select->table_list.first= (byte *) second_table; + thd->lex->select_lex.context.table_list= + thd->lex->select_lex.context.first_name_resolution_table= second_table; + + return mysql_insert_select_prepare(thd); +} + + +/* + Validate and prepare for execution INSERT ... SELECT statement. SYNOPSIS mysql_test_insert_select() - stmt prepared statemen handler - tables list of tables queries + stmt prepared statement + tables list of tables used in this query RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, error message is set in THD */ -static int mysql_test_insert_select(Prepared_statement *stmt, - TABLE_LIST *tables) + +static bool mysql_test_insert_select(Prepared_statement *stmt, + TABLE_LIST *tables) { int res; LEX *lex= stmt->lex; - if ((res= insert_precheck(stmt->thd, tables))) - return res; - TABLE_LIST *first_local_table= - (TABLE_LIST *)lex->select_lex.table_list.first; - /* Skip first table, which is the table we are inserting in */ - lex->select_lex.table_list.first= (byte*) first_local_table->next; - /* - insert/replace from SELECT give its SELECT_LEX for SELECT, - and item_list belong to SELECT - */ - lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; - res= select_like_statement_test(stmt, tables); - /* revert changes*/ + TABLE_LIST *first_local_table; + + if (tables->table) + { + // don't allocate insert_values + tables->table->insert_values=(byte *)1; + } + + if (insert_precheck(stmt->thd, tables)) + return 1; + + /* store it, because mysql_insert_select_prepare_tester change it */ + first_local_table= (TABLE_LIST *)lex->select_lex.table_list.first; + DBUG_ASSERT(first_local_table != 0); + + res= + select_like_stmt_test_with_open_n_lock(stmt, tables, + &mysql_insert_select_prepare_tester, + OPTION_SETUP_TABLES_DONE); + /* revert changes made by mysql_insert_select_prepare_tester */ lex->select_lex.table_list.first= (byte*) first_local_table; - lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; return res; } /* - Send the prepare query results back to client + Perform semantic analysis of the parsed tree and send a response packet + to the client. + SYNOPSIS - send_prepare_results() - stmt prepared statement + check_prepared_statement() + stmt prepared statement + + DESCRIPTION + This function + - opens all tables and checks access rights + - validates semantics of statement columns and SQL functions + by calling fix_fields. + RETURN VALUE - 0 success - 1 error, sent to client + FALSE success, statement metadata is sent to client + TRUE error, error message is set in THD (but not sent) */ -static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) -{ + +static bool check_prepared_statement(Prepared_statement *stmt, + bool text_protocol) +{ THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX *select_lex= &lex->select_lex; - TABLE_LIST *tables=(TABLE_LIST*) select_lex->table_list.first; + TABLE_LIST *tables; enum enum_sql_command sql_command= lex->sql_command; int res= 0; - DBUG_ENTER("send_prepare_results"); - DBUG_PRINT("enter",("command: %d, param_count: %ld", + DBUG_ENTER("check_prepared_statement"); + DBUG_PRINT("enter",("command: %d, param_count: %u", sql_command, stmt->param_count)); - if ((&lex->select_lex != lex->all_selects_list || - lex->time_zone_tables_used) && - lex->unit.create_total_list(thd, lex, &tables)) - DBUG_RETURN(1); + lex->first_lists_tables_same(); + tables= lex->query_tables; + + /* set context for commands which do not use setup_tables */ + lex->select_lex.context.resolve_in_table_list_only(select_lex-> + get_table_list()); switch (sql_command) { case SQLCOM_REPLACE: case SQLCOM_INSERT: res= mysql_test_insert(stmt, tables, lex->field_list, - lex->many_values, - select_lex->item_list, lex->value_list, - lex->duplicates); + lex->many_values, + lex->update_list, lex->value_list, + lex->duplicates); break; case SQLCOM_UPDATE: res= mysql_test_update(stmt, tables); + /* mysql_test_update returns 2 if we need to switch to multi-update */ + if (res != 2) + break; + + case SQLCOM_UPDATE_MULTI: + res= mysql_test_multiupdate(stmt, tables, res == 2); break; case SQLCOM_DELETE: @@ -1420,15 +1702,17 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) break; case SQLCOM_SELECT: - if ((res= mysql_test_select(stmt, tables, text_protocol))) - goto error; - /* Statement and field info has already been sent */ - DBUG_RETURN(0); - + res= mysql_test_select(stmt, tables, text_protocol); + if (res == 2) + { + /* Statement and field info has already been sent */ + DBUG_RETURN(FALSE); + } + break; case SQLCOM_CREATE_TABLE: - res= mysql_test_create_table(stmt, tables); + res= mysql_test_create_table(stmt); break; - + case SQLCOM_DO: res= mysql_test_do_fields(stmt, tables, lex->insert_list); break; @@ -1440,10 +1724,6 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) case SQLCOM_DELETE_MULTI: res= mysql_test_multidelete(stmt, tables); break; - - case SQLCOM_UPDATE_MULTI: - res= mysql_test_multiupdate(stmt, tables); - break; case SQLCOM_INSERT_SELECT: case SQLCOM_REPLACE_SELECT: @@ -1468,23 +1748,30 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) case SQLCOM_SHOW_GRANTS: case SQLCOM_DROP_TABLE: case SQLCOM_RENAME_TABLE: + case SQLCOM_ALTER_TABLE: + case SQLCOM_COMMIT: + case SQLCOM_CREATE_INDEX: + case SQLCOM_DROP_INDEX: + case SQLCOM_ROLLBACK: + case SQLCOM_TRUNCATE: + case SQLCOM_CALL: + case SQLCOM_CREATE_VIEW: + case SQLCOM_DROP_VIEW: + case SQLCOM_REPAIR: + case SQLCOM_ANALYZE: + case SQLCOM_OPTIMIZE: break; default: - /* - All other is not supported yet - */ - res= -1; - my_error(ER_UNSUPPORTED_PS, MYF(0)); + /* All other statements are not supported yet. */ + my_message(ER_UNSUPPORTED_PS, ER(ER_UNSUPPORTED_PS), MYF(0)); goto error; } if (res == 0) - DBUG_RETURN(text_protocol? 0 : (send_prep_stmt(stmt, 0) || - thd->protocol->flush())); + DBUG_RETURN(text_protocol? FALSE : (send_prep_stmt(stmt, 0) || + thd->protocol->flush())); error: - if (res < 0) - send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } /* @@ -1496,15 +1783,13 @@ error: static bool init_param_array(Prepared_statement *stmt) { LEX *lex= stmt->lex; - THD *thd= stmt->thd; if ((stmt->param_count= lex->param_list.elements)) { if (stmt->param_count > (uint) UINT_MAX16) { /* Error code to be defined in 5.0 */ - send_error(thd, ER_UNKNOWN_ERROR, - "Prepared statement contains too many placeholders."); - return 1; + my_message(ER_PS_MANY_PARAM, ER(ER_PS_MANY_PARAM), MYF(0)); + return TRUE; } Item_param **to; List_iterator<Item_param> param_iterator(lex->param_list); @@ -1513,10 +1798,7 @@ static bool init_param_array(Prepared_statement *stmt) alloc_root(stmt->thd->mem_root, sizeof(Item_param*) * stmt->param_count); if (!stmt->param_array) - { - send_error(thd, ER_OUT_OF_RESOURCES); - return 1; - } + return TRUE; for (to= stmt->param_array; to < stmt->param_array + stmt->param_count; ++to) @@ -1524,241 +1806,378 @@ static bool init_param_array(Prepared_statement *stmt) *to= param_iterator++; } } - return 0; + return FALSE; } + /* - Given a query string with parameter markers, create a Prepared Statement - from it and send PS info back to the client. - + COM_STMT_PREPARE handler. + SYNOPSIS mysql_stmt_prepare() - packet query to be prepared - packet_length query string length, including ignored trailing NULL or - quote char. - name NULL or statement name. For unnamed statements binary PS - protocol is used, for named statements text protocol is - used. - RETURN - 0 OK, statement prepared successfully - other Error - + packet query to be prepared + packet_length query string length, including ignored + trailing NULL or quote char. + + DESCRIPTION + Given a query string with parameter markers, create a prepared + statement from it and send PS info back to the client. + NOTES - This function parses the query and sends the total number of parameters - and resultset metadata information back to client (if any), without - executing the query i.e. without any log/disk writes. This allows the - queries to be re-executed without re-parsing during execute. + This function parses the query and sends the total number of parameters + and resultset metadata information back to client (if any), without + executing the query i.e. without any log/disk writes. This allows the + queries to be re-executed without re-parsing during execute. If parameter markers are found in the query, then store the information - using Item_param along with maintaining a list in lex->param_array, so - that a fast and direct retrieval can be made without going through all + using Item_param along with maintaining a list in lex->param_array, so + that a fast and direct retrieval can be made without going through all field items. - + + RETURN VALUE + none: in case of success a new statement id and metadata is sent + to the client, otherwise an error message is set in THD. */ -int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, - LEX_STRING *name) +void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) { - LEX *lex; - Prepared_statement *stmt= new Prepared_statement(thd); - int error; + Prepared_statement *stmt; + bool error; DBUG_ENTER("mysql_stmt_prepare"); DBUG_PRINT("prep_query", ("%s", packet)); - /* - If this is an SQLCOM_PREPARE, we also increase Com_prepare_sql. - However, it seems handy if com_stmt_prepare is increased always, - no matter what kind of prepare is processed. - */ - statistic_increment(com_stmt_prepare, &LOCK_status); - - if (stmt == 0) - { - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_RETURN(1); - } + /* First of all clear possible warnings from the previous command */ + mysql_reset_thd_for_next_command(thd); - if (name) - { - stmt->name.length= name->length; - if (!(stmt->name.str= memdup_root(stmt->mem_root, (char*)name->str, - name->length))) - { - delete stmt; - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_RETURN(1); - } - } + if (! (stmt= new Prepared_statement(thd, &thd->protocol_prep))) + DBUG_VOID_RETURN; /* out of memory: error is set in Sql_alloc */ if (thd->stmt_map.insert(thd, stmt)) { /* - The error is sent in the insert. The statement itself + The error is set in the insert. The statement itself will be also deleted there (this is how the hash works). */ - DBUG_RETURN(1); - } - - thd->set_n_backup_statement(stmt, &thd->stmt_backup); - thd->set_n_backup_item_arena(stmt, &thd->stmt_backup); - - if (alloc_query(thd, packet, packet_length)) - { - thd->restore_backup_statement(stmt, &thd->stmt_backup); - thd->restore_backup_item_arena(stmt, &thd->stmt_backup); - /* Statement map deletes statement on erase */ - thd->stmt_map.erase(stmt); - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_RETURN(1); + DBUG_VOID_RETURN; } - mysql_log.write(thd, thd->command, "[%lu] %s", stmt->id, packet); - - thd->current_arena= stmt; - mysql_init_query(thd, (uchar *) thd->query, thd->query_length); /* Reset warnings from previous command */ - mysql_reset_errors(thd); - lex= thd->lex; - lex->safe_to_cache_query= 0; + mysql_reset_errors(thd, 0); + sp_cache_flush_obsolete(&thd->sp_proc_cache); + sp_cache_flush_obsolete(&thd->sp_func_cache); - error= yyparse((void *)thd) || thd->is_fatal_error || - thd->net.report_error || init_param_array(stmt); - /* - While doing context analysis of the query (in send_prepare_results) we - allocate a lot of additional memory: for open tables, JOINs, derived - tables, etc. Let's save a snapshot of current parse tree to the - statement and restore original THD. In cases when some tree - transformation can be reused on execute, we set again thd->mem_root from - stmt->mem_root (see setup_wild for one place where we do that). - */ - thd->restore_backup_item_arena(stmt, &thd->stmt_backup); + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),QUERY_PRIOR); - if (!error) - error= send_prepare_results(stmt, test(name)); + error= stmt->prepare(packet, packet_length); - /* restore to WAIT_PRIOR: QUERY_PRIOR is set inside alloc_query */ if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),WAIT_PRIOR); - lex_end(lex); - thd->restore_backup_statement(stmt, &thd->stmt_backup); - cleanup_items(stmt->free_list); - close_thread_tables(thd); - free_items(thd->free_list); - thd->rollback_item_tree_changes(); - thd->free_list= 0; - thd->current_arena= thd; if (error) { /* Statement map deletes statement on erase */ thd->stmt_map.erase(stmt); - stmt= NULL; - if (thd->net.report_error) - send_error(thd); - /* otherwise the error is sent inside yyparse/send_prepare_results */ } else { - stmt->setup_set_params(); - SELECT_LEX *sl= stmt->lex->all_selects_list; - for (; sl; sl= sl->next_select_in_list()) + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_PREPARE, format, stmt->id, + stmt->query_length, stmt->query); + + } + /* check_prepared_statemnt sends the metadata packet in case of success */ + DBUG_VOID_RETURN; +} + +/* + SYNOPSIS + get_dynamic_sql_string() + lex in main lex + query_len out length of the SQL statement (is set only + in case of success) + + DESCRIPTION + Get an SQL statement text from a user variable or from plain + text. If the statement is plain text, just assign the + pointers, otherwise allocate memory in thd->mem_root and copy + the contents of the variable, possibly with character + set conversion. + + RETURN VALUE + non-zero success, 0 in case of error (out of memory) +*/ + +static const char *get_dynamic_sql_string(LEX *lex, uint *query_len) +{ + THD *thd= lex->thd; + char *query_str= 0; + + if (lex->prepared_stmt_code_is_varref) + { + /* This is PREPARE stmt FROM or EXECUTE IMMEDIATE @var. */ + String str; + CHARSET_INFO *to_cs= thd->variables.collation_connection; + bool needs_conversion; + user_var_entry *entry; + String *var_value= &str; + uint32 unused, len; + /* + Convert @var contents to string in connection character set. Although + it is known that int/real/NULL value cannot be a valid query we still + convert it for error messages to be uniform. + */ + if ((entry= + (user_var_entry*)hash_search(&thd->user_vars, + (byte*)lex->prepared_stmt_code.str, + lex->prepared_stmt_code.length)) + && entry->value) { + my_bool is_var_null; + var_value= entry->val_str(&is_var_null, &str, NOT_FIXED_DEC); /* - Save WHERE, HAVING clause pointers, because they may be changed - during query optimisation. + NULL value of variable checked early as entry->value so here + we can't get NULL in normal conditions */ - sl->prep_where= sl->where; - sl->prep_having= sl->having; + DBUG_ASSERT(!is_var_null); + if (!var_value) + goto end; + } + else + { /* - Switch off a temporary flag that prevents evaluation of - subqueries in statement prepare. + variable absent or equal to NULL, so we need to set variable to + something reasonable to get a readable error message during parsing */ - sl->uncacheable&= ~UNCACHEABLE_PREPARE; + str.set(STRING_WITH_LEN("NULL"), &my_charset_latin1); } - stmt->state= Item_arena::PREPARED; - } - DBUG_RETURN(!stmt); + needs_conversion= String::needs_conversion(var_value->length(), + var_value->charset(), to_cs, + &unused); + + len= (needs_conversion ? var_value->length() * to_cs->mbmaxlen : + var_value->length()); + if (!(query_str= alloc_root(thd->mem_root, len+1))) + goto end; + + if (needs_conversion) + { + uint dummy_errors; + len= copy_and_convert(query_str, len, to_cs, var_value->ptr(), + var_value->length(), var_value->charset(), + &dummy_errors); + } + else + memcpy(query_str, var_value->ptr(), var_value->length()); + query_str[len]= '\0'; // Safety (mostly for debug) + *query_len= len; + } + else + { + query_str= lex->prepared_stmt_code.str; + *query_len= lex->prepared_stmt_code.length; + } +end: + return query_str; } -/* Reinit statement before execution */ -static void reset_stmt_for_execute(Prepared_statement *stmt) +/* Init PS/SP specific parse tree members. */ + +static void init_stmt_after_parse(LEX *lex) { - THD *thd= stmt->thd; - LEX *lex= stmt->lex; SELECT_LEX *sl= lex->all_selects_list; - + /* + Switch off a temporary flag that prevents evaluation of + subqueries in statement prepare. + */ for (; sl; sl= sl->next_select_in_list()) + sl->uncacheable&= ~UNCACHEABLE_PREPARE; +} + +/* + SQLCOM_PREPARE implementation. + + SYNOPSIS + mysql_sql_stmt_prepare() + thd thread handle + + DESCRIPTION + Prepare an SQL prepared statement. This is called from + mysql_execute_command and should therefore behave like an + ordinary query (e.g. should not reset any global THD data). + + RETURN VALUE + none: in case of success, OK packet is sent to the client, + otherwise an error message is set in THD +*/ + +void mysql_sql_stmt_prepare(THD *thd) +{ + LEX *lex= thd->lex; + LEX_STRING *name= &lex->prepared_stmt_name; + Prepared_statement *stmt; + const char *query; + uint query_len; + DBUG_ENTER("mysql_sql_stmt_prepare"); + DBUG_ASSERT(thd->protocol == &thd->protocol_simple); + LINT_INIT(query_len); + + if ((stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name))) { - /* remove option which was put by mysql_explain_union() */ - sl->options&= ~SELECT_DESCRIBE; /* - Copy WHERE, HAVING clause pointers to avoid damaging they by optimisation + If there is a statement with the same name, remove it. It is ok to + remove old and fail to insert a new one at the same time. */ - if (sl->prep_where) - { - sl->where= sl->prep_where->copy_andor_structure(thd); - sl->where->cleanup(); - } - if (sl->prep_having) - { - sl->having= sl->prep_having->copy_andor_structure(thd); - sl->having->cleanup(); - } - DBUG_ASSERT(sl->join == 0); - ORDER *order; - /* Fix GROUP list */ - for (order= (ORDER *)sl->group_list.first; order; order= order->next) - order->item= &order->item_ptr; - /* Fix ORDER list */ - for (order= (ORDER *)sl->order_list.first; order; order= order->next) - order->item= &order->item_ptr; + if (stmt->deallocate()) + DBUG_VOID_RETURN; + } - /* - TODO: When the new table structure is ready, then have a status bit - to indicate the table is altered, and re-do the setup_* - and open the tables back. - */ - for (TABLE_LIST *tables= (TABLE_LIST*) sl->table_list.first; - tables; - tables= tables->next) + if (! (query= get_dynamic_sql_string(lex, &query_len)) || + ! (stmt= new Prepared_statement(thd, &thd->protocol_simple))) + { + DBUG_VOID_RETURN; /* out of memory */ + } + + /* Set the name first, insert should know that this statement has a name */ + if (stmt->set_name(name)) + { + delete stmt; + DBUG_VOID_RETURN; + } + + if (thd->stmt_map.insert(thd, stmt)) + { + /* The statement is deleted and an error is set if insert fails */ + DBUG_VOID_RETURN; + } + + if (stmt->prepare(query, query_len+1)) + { + /* Statement map deletes the statement on erase */ + thd->stmt_map.erase(stmt); + } + else + send_ok(thd, 0L, 0L, "Statement prepared"); + + DBUG_VOID_RETURN; +} + +/* Reinit prepared statement/stored procedure before execution */ + +void reinit_stmt_before_use(THD *thd, LEX *lex) +{ + SELECT_LEX *sl= lex->all_selects_list; + DBUG_ENTER("reinit_stmt_before_use"); + + /* + We have to update "thd" pointer in LEX, all its units and in LEX::result, + since statements which belong to trigger body are associated with TABLE + object and because of this can be used in different threads. + */ + lex->thd= thd; + + if (lex->empty_field_list_on_rset) + { + lex->empty_field_list_on_rset= 0; + lex->field_list.empty(); + } + for (; sl; sl= sl->next_select_in_list()) + { + if (!sl->first_execution) { - tables->reinit_before_use(thd); - } + /* remove option which was put by mysql_explain_union() */ + sl->options&= ~SELECT_DESCRIBE; + + /* see unique_table() */ + sl->exclude_from_table_unique_test= FALSE; + /* + Copy WHERE, HAVING clause pointers to avoid damaging them + by optimisation + */ + if (sl->prep_where) + { + sl->where= sl->prep_where->copy_andor_structure(thd); + sl->where->cleanup(); + } + if (sl->prep_having) + { + sl->having= sl->prep_having->copy_andor_structure(thd); + sl->having->cleanup(); + } + DBUG_ASSERT(sl->join == 0); + ORDER *order; + /* Fix GROUP list */ + for (order= (ORDER *)sl->group_list.first; order; order= order->next) + order->item= &order->item_ptr; + /* Fix ORDER list */ + for (order= (ORDER *)sl->order_list.first; order; order= order->next) + order->item= &order->item_ptr; + } { SELECT_LEX_UNIT *unit= sl->master_unit(); unit->unclean(); unit->types.empty(); /* for derived tables & PS (which can't be reset by Item_subquery) */ unit->reinit_exec_mechanism(); + unit->set_thd(thd); } } + + /* + TODO: When the new table structure is ready, then have a status bit + to indicate the table is altered, and re-do the setup_* + and open the tables back. + */ + /* + NOTE: We should reset whole table list here including all tables added + by prelocking algorithm (it is not a problem for substatements since + they have their own table list). + */ + for (TABLE_LIST *tables= lex->query_tables; + tables; + tables= tables->next_global) + { + tables->reinit_before_use(thd); + } /* Cleanup of the special case of DELETE t1, t2 FROM t1, t2, t3 ... (multi-delete). We do a full clean up, although at the moment all we need to clean in the tables of MULTI-DELETE list is 'table' member. */ - for (TABLE_LIST *tables= (TABLE_LIST*) lex->auxilliary_table_list.first; + for (TABLE_LIST *tables= (TABLE_LIST*) lex->auxiliary_table_list.first; tables; - tables= tables->next) + tables= tables->next_global) { tables->reinit_before_use(thd); } lex->current_select= &lex->select_lex; + + /* restore original list used in INSERT ... SELECT */ + if (lex->leaf_tables_insert) + lex->select_lex.leaf_tables= lex->leaf_tables_insert; + if (lex->result) + { lex->result->cleanup(); + lex->result->set_thd(thd); + } + lex->allow_sum_func= 0; + lex->in_sum_func= NULL; + DBUG_VOID_RETURN; } -/* - Clears parameters from data left from previous execution or long data - +/* + Clears parameters from data left from previous execution or long data + SYNOPSIS reset_stmt_params() - stmt - prepared statement for which parameters should be reset + stmt prepared statement for which parameters should + be reset */ static void reset_stmt_params(Prepared_statement *stmt) @@ -1771,49 +2190,53 @@ static void reset_stmt_params(Prepared_statement *stmt) /* - Executes previously prepared query. - If there is any parameters, then replace markers with the data supplied - from client, and then execute the query. + COM_STMT_EXECUTE handler: execute a previously prepared statement. + SYNOPSIS mysql_stmt_execute() - thd Current thread - packet Query string - packet_length Query string length, including terminator character. + thd current thread + packet parameter types and data, if any + packet_length packet length, including the terminator character. + + DESCRIPTION + If there are any parameters, then replace parameter markers with the + data supplied from the client, and then execute the statement. + This function uses binary protocol to send a possible result set + to the client. + + RETURN VALUE + none: in case of success OK packet or a result set is sent to the + client, otherwise an error message is set in THD. */ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) { uchar *packet= (uchar*)packet_arg; // GCC 4.0.1 workaround ulong stmt_id= uint4korr(packet); - /* - Query text for binary log, or empty string if the query is not put into - binary log. - */ + ulong flags= (ulong) packet[4]; + /* Query text for binary, general or slow log, if any of them is open */ String expanded_query; #ifndef EMBEDDED_LIBRARY uchar *packet_end= packet + packet_length - 1; #endif Prepared_statement *stmt; + bool error; DBUG_ENTER("mysql_stmt_execute"); packet+= 9; /* stmt_id + 5 bytes of flags */ - statistic_increment(com_stmt_execute, &LOCK_status); - if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_execute", - SEND_ERROR))) + /* First of all clear possible warnings from the previous command */ + mysql_reset_thd_for_next_command(thd); + + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_execute"))) DBUG_VOID_RETURN; - DBUG_PRINT("exec_query:", ("%s", stmt->query)); + DBUG_PRINT("exec_query", ("%s", stmt->query)); + DBUG_PRINT("info",("stmt: %p", stmt)); - /* Check if we got an error when sending long data */ - if (stmt->state == Item_arena::ERROR) - { - send_error(thd, stmt->last_errno, stmt->last_error); - DBUG_VOID_RETURN; - } + sp_cache_flush_obsolete(&thd->sp_proc_cache); + sp_cache_flush_obsolete(&thd->sp_func_cache); - DBUG_ASSERT(thd->free_list == NULL); - mysql_reset_thd_for_next_command(thd); #ifndef EMBEDDED_LIBRARY if (stmt->param_count) { @@ -1825,138 +2248,164 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) } #else /* - In embedded library we re-install conversion routines each time - we set params, and also we don't need to parse packet. + In embedded library we re-install conversion routines each time + we set params, and also we don't need to parse packet. So we do it in one function. */ if (stmt->param_count && stmt->set_params_data(stmt, &expanded_query)) goto set_params_data_err; #endif - thd->protocol= &thd->protocol_prep; // Switch to binary protocol - execute_stmt(thd, stmt, &expanded_query, TRUE); - thd->protocol= &thd->protocol_simple; // Use normal protocol + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),QUERY_PRIOR); + + /* + If the free_list is not empty, we'll wrongly free some externally + allocated items when cleaning up after validation of the prepared + statement. + */ + DBUG_ASSERT(thd->free_list == NULL); + + error= stmt->execute(&expanded_query, + test(flags & (ulong) CURSOR_TYPE_READ_ONLY)); + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), WAIT_PRIOR); + if (error == 0) + { + const char *format= "[%lu] %.*b"; + mysql_log.write(thd, COM_STMT_EXECUTE, format, stmt->id, + thd->query_length, thd->query); + } + DBUG_VOID_RETURN; set_params_data_err: - reset_stmt_params(stmt); my_error(ER_WRONG_ARGUMENTS, MYF(0), "mysql_stmt_execute"); - send_error(thd); + reset_stmt_params(stmt); DBUG_VOID_RETURN; } /* - Execute prepared statement using parameter values from - lex->prepared_stmt_params and send result to the client using text protocol. + SQLCOM_EXECUTE implementation. + + SYNOPSIS + mysql_sql_stmt_execute() + thd thread handle + + DESCRIPTION + Execute prepared statement using parameter values from + lex->prepared_stmt_params and send result to the client using + text protocol. This is called from mysql_execute_command and + therefore should behave like an ordinary query (e.g. not change + global THD data, such as warning count, server status, etc). + This function uses text protocol to send a possible result set. + + RETURN + none: in case of success, OK (or result set) packet is sent to the + client, otherwise an error is set in THD */ -void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) +void mysql_sql_stmt_execute(THD *thd) { + LEX *lex= thd->lex; Prepared_statement *stmt; - /* - Query text for binary log, or empty string if the query is not put into - binary log. - */ + LEX_STRING *name= &lex->prepared_stmt_name; + /* Query text for binary, general or slow log, if any of them is open */ String expanded_query; DBUG_ENTER("mysql_sql_stmt_execute"); + DBUG_PRINT("info", ("EXECUTE: %.*s\n", name->length, name->str)); - /* See comment for statistics_increment in mysql_stmt_prepare */ - statistic_increment(com_stmt_execute, &LOCK_status); - if (!(stmt= (Prepared_statement*)thd->stmt_map.find_by_name(stmt_name))) + if (!(stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name))) { - my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), stmt_name->length, - stmt_name->str, "EXECUTE"); - send_error(thd); + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), + name->length, name->str, "EXECUTE"); DBUG_VOID_RETURN; } - if (stmt->param_count != thd->lex->prepared_stmt_params.elements) + if (stmt->param_count != lex->prepared_stmt_params.elements) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); - send_error(thd); DBUG_VOID_RETURN; } + DBUG_PRINT("info",("stmt: %p", stmt)); + + /* + If the free_list is not empty, we'll wrongly free some externally + allocated items when cleaning up after validation of the prepared + statement. + */ DBUG_ASSERT(thd->free_list == NULL); - /* Must go before setting variables, as it clears thd->user_var_events */ - mysql_reset_thd_for_next_command(thd); - thd->set_n_backup_statement(stmt, &thd->stmt_backup); - if (stmt->set_params_from_vars(stmt, - thd->stmt_backup.lex->prepared_stmt_params, + + if (stmt->set_params_from_vars(stmt, lex->prepared_stmt_params, &expanded_query)) - { - my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); - send_error(thd); - } - thd->command= COM_EXECUTE; /* For nice messages in general log */ - execute_stmt(thd, stmt, &expanded_query, FALSE); + goto set_params_data_err; + + (void) stmt->execute(&expanded_query, FALSE); + + DBUG_VOID_RETURN; + +set_params_data_err: + my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); + reset_stmt_params(stmt); DBUG_VOID_RETURN; } /* - Execute prepared statement. + COM_STMT_FETCH handler: fetches requested amount of rows from cursor + SYNOPSIS - execute_stmt() - thd Current thread - stmt Statement to execute - expanded_query If binary log is enabled, query string with parameter - placeholders replaced with actual values. Otherwise empty - string. - NOTES - Caller must set parameter values and thd::protocol. - thd->free_list is assumed to be garbage. + mysql_stmt_fetch() + thd Thread handle + packet Packet from client (with stmt_id & num_rows) + packet_length Length of packet */ -static void execute_stmt(THD *thd, Prepared_statement *stmt, - String *expanded_query, bool set_context) +void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length) { - DBUG_ENTER("execute_stmt"); - if (set_context) - thd->set_n_backup_statement(stmt, &thd->stmt_backup); - reset_stmt_for_execute(stmt); + /* assume there is always place for 8-16 bytes */ + ulong stmt_id= uint4korr(packet); + ulong num_rows= uint4korr(packet+4); + Prepared_statement *stmt; + Statement stmt_backup; + Server_side_cursor *cursor; + DBUG_ENTER("mysql_stmt_fetch"); - if (expanded_query->length() && - alloc_query(thd, (char *)expanded_query->ptr(), - expanded_query->length()+1)) + /* First of all clear possible warnings from the previous command */ + mysql_reset_thd_for_next_command(thd); + statistic_increment(thd->status_var.com_stmt_fetch, &LOCK_status); + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_fetch"))) + DBUG_VOID_RETURN; + + cursor= stmt->cursor; + if (!cursor) { - my_error(ER_OUTOFMEMORY, 0, expanded_query->length()); + my_error(ER_STMT_HAS_NO_OPEN_CURSOR, MYF(0), stmt_id); DBUG_VOID_RETURN; } - mysql_log.write(thd, thd->command, "[%lu] %s", stmt->id, thd->query); - /* - At first execution of prepared statement we will perform logical - transformations of the query tree (i.e. negations elimination). - This should be done permanently on the parse tree of this statement. - */ - thd->current_arena= stmt; + + thd->stmt_arena= stmt; + thd->set_n_backup_statement(stmt, &stmt_backup); if (!(specialflag & SPECIAL_NO_PRIOR)) - my_pthread_setprio(pthread_self(),QUERY_PRIOR); - mysql_execute_command(thd); - thd->lex->unit.cleanup(); + my_pthread_setprio(pthread_self(), QUERY_PRIOR); + + cursor->fetch(num_rows); + if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(), WAIT_PRIOR); - /* - 'start_time' is set in dispatch_command, but THD::query will - be freed when we return from this function. So let's log the slow - query here. - */ - log_slow_statement(thd); - /* Prevent from second logging in the end of dispatch_command */ - thd->enable_slow_log= FALSE; - - /* Free Items that were created during this execution of the PS. */ - free_items(thd->free_list); - thd->free_list= 0; - if (stmt->state == Item_arena::PREPARED) - stmt->state= Item_arena::EXECUTED; - thd->current_arena= thd; - cleanup_items(stmt->free_list); - thd->rollback_item_tree_changes(); - reset_stmt_params(stmt); - close_thread_tables(thd); // to close derived tables - thd->set_statement(&thd->stmt_backup); + + if (!cursor->is_open()) + { + stmt->close_cursor(); + thd->cursor= 0; + reset_stmt_params(stmt); + } + + thd->restore_backup_statement(stmt, &stmt_backup); + thd->stmt_arena= thd; + DBUG_VOID_RETURN; } @@ -1965,8 +2414,8 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, Reset a prepared statement in case there was a recoverable error. SYNOPSIS mysql_stmt_reset() - thd Thread handle - packet Packet with stmt id + thd Thread handle + packet Packet with stmt id DESCRIPTION This function resets statement to the state it was right after prepare. @@ -1974,6 +2423,7 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, - clear an error happened during mysql_stmt_send_long_data - cancel long data stream for all placeholders without having to call mysql_stmt_execute. + - close an open cursor Sends 'OK' packet in case of success (statement was reset) or 'ERROR' packet (unrecoverable error/statement not found/etc). */ @@ -1983,70 +2433,102 @@ void mysql_stmt_reset(THD *thd, char *packet) /* There is always space for 4 bytes in buffer */ ulong stmt_id= uint4korr(packet); Prepared_statement *stmt; - DBUG_ENTER("mysql_stmt_reset"); - statistic_increment(com_stmt_reset, &LOCK_status); - if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_reset", - SEND_ERROR))) + /* First of all clear possible warnings from the previous command */ + mysql_reset_thd_for_next_command(thd); + + statistic_increment(thd->status_var.com_stmt_reset, &LOCK_status); + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_reset"))) DBUG_VOID_RETURN; - stmt->state= Item_arena::PREPARED; + stmt->close_cursor(); - /* - Clear parameters from data which could be set by + /* + Clear parameters from data which could be set by mysql_stmt_send_long_data() call. */ reset_stmt_params(stmt); - mysql_reset_thd_for_next_command(thd); + stmt->state= Query_arena::PREPARED; + send_ok(thd); - + DBUG_VOID_RETURN; } /* Delete a prepared statement from memory. - Note: we don't send any reply to that command. + Note: we don't send any reply to this command. */ -void mysql_stmt_free(THD *thd, char *packet) +void mysql_stmt_close(THD *thd, char *packet) { /* There is always space for 4 bytes in packet buffer */ ulong stmt_id= uint4korr(packet); Prepared_statement *stmt; + DBUG_ENTER("mysql_stmt_close"); - DBUG_ENTER("mysql_stmt_free"); - - statistic_increment(com_stmt_close, &LOCK_status); - if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_close", - DONT_SEND_ERROR))) + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_close"))) DBUG_VOID_RETURN; - /* Statement map deletes statement on erase */ - thd->stmt_map.erase(stmt); + /* + The only way currently a statement can be deallocated when it's + in use is from within Dynamic SQL. + */ + DBUG_ASSERT(! (stmt->flags & (uint) Prepared_statement::IS_IN_USE)); + (void) stmt->deallocate(); + DBUG_VOID_RETURN; } /* - Long data in pieces from client + SQLCOM_DEALLOCATE implementation. + + DESCRIPTION + Close an SQL prepared statement. As this can be called from Dynamic + SQL, we should be careful to not close a statement that is currently + being executed. + + RETURN VALUE + none: OK packet is sent in case of success, otherwise an error + message is set in THD +*/ + +void mysql_sql_stmt_close(THD *thd) +{ + Prepared_statement* stmt; + LEX_STRING *name= &thd->lex->prepared_stmt_name; + DBUG_PRINT("info", ("DEALLOCATE PREPARE: %.*s\n", name->length, name->str)); + + if (! (stmt= (Prepared_statement*) thd->stmt_map.find_by_name(name))) + { + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), + name->length, name->str, "DEALLOCATE PREPARE"); + return; + } + + if (stmt->deallocate() == 0) + send_ok(thd); +} + +/* + Handle long data in pieces from client. SYNOPSIS mysql_stmt_get_longdata() - thd Thread handle - pos String to append - packet_length Length of string + thd Thread handle + packet String to append + packet_length Length of string (including end \0) DESCRIPTION - Get a part of a long data. - To make the protocol efficient, we are not sending any return packages - here. - If something goes wrong, then we will send the error on 'execute' - - We assume that the client takes care of checking that all parts are sent - to the server. (No checking that we get a 'end of column' in the server) + Get a part of a long data. To make the protocol efficient, we are + not sending any return packets here. If something goes wrong, then + we will send the error on 'execute' We assume that the client takes + care of checking that all parts are sent to the server. (No checking + that we get a 'end of column' in the server is performed). */ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) @@ -2055,14 +2537,15 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) uint param_number; Prepared_statement *stmt; Item_param *param; +#ifndef EMBEDDED_LIBRARY char *packet_end= packet + packet_length - 1; - +#endif DBUG_ENTER("mysql_stmt_get_longdata"); - statistic_increment(com_stmt_send_long_data, &LOCK_status); + statistic_increment(thd->status_var.com_stmt_send_long_data, &LOCK_status); #ifndef EMBEDDED_LIBRARY /* Minimal size of long data packet is 6 bytes */ - if ((ulong) (packet_end - packet) < MYSQL_LONG_DATA_HEADER) + if (packet_length <= MYSQL_LONG_DATA_HEADER) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "mysql_stmt_send_long_data"); DBUG_VOID_RETURN; @@ -2072,8 +2555,8 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) stmt_id= uint4korr(packet); packet+= 4; - if (!(stmt=find_prepared_statement(thd, stmt_id, "mysql_stmt_send_long_data", - DONT_SEND_ERROR))) + if (!(stmt=find_prepared_statement(thd, stmt_id, + "mysql_stmt_send_long_data"))) DBUG_VOID_RETURN; param_number= uint2korr(packet); @@ -2082,7 +2565,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) if (param_number >= stmt->param_count) { /* Error will be sent in execute call */ - stmt->state= Item_arena::ERROR; + stmt->state= Query_arena::ERROR; stmt->last_errno= ER_WRONG_ARGUMENTS; sprintf(stmt->last_error, ER(ER_WRONG_ARGUMENTS), "mysql_stmt_send_long_data"); @@ -2098,7 +2581,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) if (param->set_longdata(thd->extra_data, thd->extra_length)) #endif { - stmt->state= Item_arena::ERROR; + stmt->state= Query_arena::ERROR; stmt->last_errno= ER_OUTOFMEMORY; sprintf(stmt->last_error, ER(ER_OUTOFMEMORY), 0); } @@ -2106,13 +2589,72 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) } -Prepared_statement::Prepared_statement(THD *thd_arg) - :Statement(thd_arg), +/*************************************************************************** + Select_fetch_protocol_prep +****************************************************************************/ + +Select_fetch_protocol_prep::Select_fetch_protocol_prep(THD *thd_arg) + :protocol(thd_arg) +{} + +bool Select_fetch_protocol_prep::send_fields(List<Item> &list, uint flags) +{ + bool rc; + Protocol *save_protocol= thd->protocol; + + /* + Protocol::send_fields caches the information about column types: + this information is later used to send data. Therefore, the same + dedicated Protocol object must be used for all operations with + a cursor. + */ + thd->protocol= &protocol; + rc= select_send::send_fields(list, flags); + thd->protocol= save_protocol; + + return rc; +} + +bool Select_fetch_protocol_prep::send_eof() +{ + Protocol *save_protocol= thd->protocol; + + thd->protocol= &protocol; + ::send_eof(thd); + thd->protocol= save_protocol; + return FALSE; +} + + +bool +Select_fetch_protocol_prep::send_data(List<Item> &fields) +{ + Protocol *save_protocol= thd->protocol; + bool rc; + + thd->protocol= &protocol; + rc= select_send::send_data(fields); + thd->protocol= save_protocol; + return rc; +} + +/*************************************************************************** + Prepared_statement +****************************************************************************/ + +Prepared_statement::Prepared_statement(THD *thd_arg, Protocol *protocol_arg) + :Statement(&main_lex, &main_mem_root, + INITIALIZED, ++thd_arg->statement_id_counter), thd(thd_arg), + result(thd_arg), + protocol(protocol_arg), param_array(0), param_count(0), - last_errno(0) + last_errno(0), + flags((uint) IS_IN_USE) { + init_alloc_root(&main_mem_root, thd_arg->variables.query_alloc_block_size, + thd_arg->variables.query_prealloc_size); *last_error= '\0'; } @@ -2142,15 +2684,331 @@ void Prepared_statement::setup_set_params() } +/* + DESCRIPTION + Destroy this prepared statement, cleaning up all used memory + and resources. This is called from ::deallocate() to + handle COM_STMT_CLOSE and DEALLOCATE PREPARE or when + THD ends and all prepared statements are freed. +*/ + Prepared_statement::~Prepared_statement() { - free_items(free_list); + DBUG_ENTER("Prepared_statement::~Prepared_statement"); + DBUG_PRINT("enter",("stmt: %p cursor: %p", this, cursor)); + delete cursor; + /* + We have to call free on the items even if cleanup is called as some items, + like Item_param, don't free everything until free_items() + */ + free_items(); delete lex->result; + free_root(&main_mem_root, MYF(0)); + DBUG_VOID_RETURN; } -Item_arena::Type Prepared_statement::type() const +Query_arena::Type Prepared_statement::type() const { return PREPARED_STATEMENT; } + +void Prepared_statement::cleanup_stmt() +{ + DBUG_ENTER("Prepared_statement::cleanup_stmt"); + DBUG_PRINT("enter",("stmt: %p", this)); + + DBUG_ASSERT(lex->sphead == 0); + /* The order is important */ + lex->unit.cleanup(); + cleanup_items(free_list); + thd->cleanup_after_query(); + close_thread_tables(thd); + thd->rollback_item_tree_changes(); + + DBUG_VOID_RETURN; +} + + +bool Prepared_statement::set_name(LEX_STRING *name_arg) +{ + name.length= name_arg->length; + name.str= memdup_root(mem_root, (char*) name_arg->str, name_arg->length); + return name.str == 0; +} + +/************************************************************************** + Common parts of mysql_[sql]_stmt_prepare, mysql_[sql]_stmt_execute. + Essentially, these functions do all the magic of preparing/executing + a statement, leaving network communication, input data handling and + global THD state management to the caller. +***************************************************************************/ + +/* + Parse statement text, validate the statement, and prepare it for execution. + + SYNOPSIS + Prepared_statement::prepare() + packet statement text + packet_len + + DESCRIPTION + You should not change global THD state in this function, if at all + possible: it may be called from any context, e.g. when executing + a COM_* command, and SQLCOM_* command, or a stored procedure. + + NOTES + Precondition. + ------------- + The caller must ensure that thd->change_list and thd->free_list + is empty: this function will not back them up but will free + in the end of its execution. + + Postcondition. + -------------- + thd->mem_root contains unused memory allocated during validation. +*/ + +bool Prepared_statement::prepare(const char *packet, uint packet_len) +{ + bool error; + Statement stmt_backup; + Query_arena *old_stmt_arena; + DBUG_ENTER("Prepared_statement::prepare"); + /* + If this is an SQLCOM_PREPARE, we also increase Com_prepare_sql. + However, it seems handy if com_stmt_prepare is increased always, + no matter what kind of prepare is processed. + */ + statistic_increment(thd->status_var.com_stmt_prepare, &LOCK_status); + + /* + alloc_query() uses thd->memroot && thd->query, so we should call + both of backup_statement() and backup_query_arena() here. + */ + thd->set_n_backup_statement(this, &stmt_backup); + thd->set_n_backup_active_arena(this, &stmt_backup); + + if (alloc_query(thd, packet, packet_len)) + { + thd->restore_backup_statement(this, &stmt_backup); + thd->restore_active_arena(this, &stmt_backup); + DBUG_RETURN(TRUE); + } + + old_stmt_arena= thd->stmt_arena; + thd->stmt_arena= this; + lex_start(thd, (uchar*) thd->query, thd->query_length); + lex->safe_to_cache_query= FALSE; + lex->stmt_prepare_mode= TRUE; + + error= MYSQLparse((void *)thd) || thd->is_fatal_error || + thd->net.report_error || init_param_array(this); + + /* + While doing context analysis of the query (in check_prepared_statement) + we allocate a lot of additional memory: for open tables, JOINs, derived + tables, etc. Let's save a snapshot of current parse tree to the + statement and restore original THD. In cases when some tree + transformation can be reused on execute, we set again thd->mem_root from + stmt->mem_root (see setup_wild for one place where we do that). + */ + thd->restore_active_arena(this, &stmt_backup); + + /* + If called from a stored procedure, ensure that we won't rollback + external changes when cleaning up after validation. + */ + DBUG_ASSERT(thd->change_list.is_empty()); + + /* + The only case where we should have items in the thd->free_list is + after stmt->set_params_from_vars(), which may in some cases create + Item_null objects. + */ + + if (error == 0) + error= check_prepared_statement(this, name.str != 0); + + /* + Currently CREATE PROCEDURE/TRIGGER/EVENT are prohibited in prepared + statements: ensure we have no memory leak here if by someone tries + to PREPARE stmt FROM "CREATE PROCEDURE ..." + */ + DBUG_ASSERT(lex->sphead == NULL || error != 0); + if (lex->sphead) + { + delete lex->sphead; + lex->sphead= NULL; + } + + lex_end(lex); + cleanup_stmt(); + thd->restore_backup_statement(this, &stmt_backup); + thd->stmt_arena= old_stmt_arena; + + if (error == 0) + { + setup_set_params(); + init_stmt_after_parse(lex); + state= Query_arena::PREPARED; + flags&= ~ (uint) IS_IN_USE; + } + DBUG_RETURN(error); +} + +/* + Execute a prepared statement. + + SYNOPSIS + Prepared_statement::execute() + expanded_query A query for binlogging which has all parameter + markers ('?') replaced with their actual values. + open_cursor True if an attempt to open a cursor should be made. + Currenlty used only in the binary protocol. + + DESCRIPTION + You should not change global THD state in this function, if at all + possible: it may be called from any context, e.g. when executing + a COM_* command, and SQLCOM_* command, or a stored procedure. + + NOTES + Preconditions, postconditions. + ------------------------------ + See the comment for Prepared_statement::prepare(). + + RETURN + FALSE ok + TRUE Error +*/ + +bool Prepared_statement::execute(String *expanded_query, bool open_cursor) +{ + Statement stmt_backup; + Query_arena *old_stmt_arena; + bool error= TRUE; + + statistic_increment(thd->status_var.com_stmt_execute, &LOCK_status); + + /* Check if we got an error when sending long data */ + if (state == Query_arena::ERROR) + { + my_message(last_errno, last_error, MYF(0)); + return TRUE; + } + if (flags & (uint) IS_IN_USE) + { + my_error(ER_PS_NO_RECURSION, MYF(0)); + return TRUE; + } + + /* + For SHOW VARIABLES lex->result is NULL, as it's a non-SELECT + command. For such queries we don't return an error and don't + open a cursor -- the client library will recognize this case and + materialize the result set. + For SELECT statements lex->result is created in + check_prepared_statement. lex->result->simple_select() is FALSE + in INSERT ... SELECT and similar commands. + */ + + if (open_cursor && lex->result && lex->result->check_simple_select()) + { + DBUG_PRINT("info",("Cursor asked for not SELECT stmt")); + return TRUE; + } + + /* In case the command has a call to SP which re-uses this statement name */ + flags|= IS_IN_USE; + + close_cursor(); + + /* + If the free_list is not empty, we'll wrongly free some externally + allocated items when cleaning up after execution of this statement. + */ + DBUG_ASSERT(thd->change_list.is_empty()); + + /* + The only case where we should have items in the thd->free_list is + after stmt->set_params_from_vars(), which may in some cases create + Item_null objects. + */ + + thd->set_n_backup_statement(this, &stmt_backup); + if (expanded_query->length() && + alloc_query(thd, (char*) expanded_query->ptr(), + expanded_query->length()+1)) + { + my_error(ER_OUTOFMEMORY, 0, expanded_query->length()); + goto error; + } + /* + Expanded query is needed for slow logging, so we want thd->query + to point at it even after we restore from backup. This is ok, as + expanded query was allocated in thd->mem_root. + */ + stmt_backup.query= thd->query; + stmt_backup.query_length= thd->query_length; + + /* + Save orig_sql_command as we use it to disable slow logging for SHOW + commands (see log_slow_statement()). + */ + stmt_backup.lex->orig_sql_command= thd->lex->orig_sql_command; + + /* + At first execution of prepared statement we may perform logical + transformations of the query tree. Such changes should be performed + on the parse tree of current prepared statement and new items should + be allocated in its memory root. Set the appropriate pointer in THD + to the arena of the statement. + */ + old_stmt_arena= thd->stmt_arena; + thd->stmt_arena= this; + reinit_stmt_before_use(thd, lex); + + thd->protocol= protocol; /* activate stmt protocol */ + error= (open_cursor ? + mysql_open_cursor(thd, (uint) ALWAYS_MATERIALIZED_CURSOR, + &result, &cursor) : + mysql_execute_command(thd)); + thd->protocol= &thd->protocol_simple; /* use normal protocol */ + + /* Assert that if an error, no cursor is open */ + DBUG_ASSERT(! (error && cursor)); + + if (! cursor) + { + cleanup_stmt(); + reset_stmt_params(this); + } + + thd->set_statement(&stmt_backup); + thd->stmt_arena= old_stmt_arena; + + if (state == Query_arena::PREPARED) + state= Query_arena::EXECUTED; + +error: + flags&= ~ (uint) IS_IN_USE; + return error; +} + + +/* Common part of DEALLOCATE PREPARE and mysql_stmt_close */ + +bool Prepared_statement::deallocate() +{ + /* We account deallocate in the same manner as mysql_stmt_close */ + statistic_increment(thd->status_var.com_stmt_close, &LOCK_status); + if (flags & (uint) IS_IN_USE) + { + my_error(ER_PS_NO_RECURSION, MYF(0)); + return TRUE; + } + /* Statement map calls delete stmt on erase */ + thd->stmt_map.erase(this); + return FALSE; +} diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index 388034e0f1a..f6766aec285 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -19,6 +18,7 @@ */ #include "mysql_priv.h" +#include "sql_trigger.h" static TABLE_LIST *rename_tables(THD *thd, TABLE_LIST *table_list, @@ -44,7 +44,8 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) if (thd->locked_tables || thd->active_transaction()) { - my_error(ER_LOCK_OR_ACTIVE_TRANSACTION,MYF(0)); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); DBUG_RETURN(1); } @@ -64,10 +65,10 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) table_list= reverse_table_list(table_list); /* Find the last renamed table */ - for (table=table_list ; - table->next != ren_table ; - table=table->next->next) ; - table=table->next->next; // Skip error table + for (table= table_list; + table->next_local != ren_table ; + table= table->next_local->next_local) ; + table= table->next_local->next_local; // Skip error table /* Revert to old names */ rename_tables(thd, table, 1); @@ -80,7 +81,6 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) /* Lets hope this doesn't fail as the result will be messy */ if (!error) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -90,7 +90,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) send_ok(thd); } - unlock_table_names(thd,table_list); + unlock_table_names(thd, table_list, (TABLE_LIST*) 0); err: pthread_mutex_unlock(&LOCK_open); @@ -115,8 +115,8 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list) while (table_list) { - TABLE_LIST *next= table_list->next; - table_list->next= prev; + TABLE_LIST *next= table_list->next_local; + table_list->next_local= prev; prev= table_list; table_list= next; } @@ -125,58 +125,147 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list) /* + Rename a single table or a view + + SYNPOSIS + do_rename() + thd Thread handle + ren_table A table/view to be renamed + new_db The database to which the table to be moved to + new_table_name The new table/view name + new_table_alias The new table/view alias + skip_error Whether to skip error + + DESCRIPTION + Rename a single table or a view. + + RETURN + false Ok + true rename failed +*/ + +bool +do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name, + char *new_table_alias, bool skip_error) +{ + int rc= 1; + char name[FN_REFLEN]; + const char *new_alias, *old_alias; + frm_type_enum frm_type; + db_type table_type; + + DBUG_ENTER("do_rename"); + + if (lower_case_table_names == 2) + { + old_alias= ren_table->alias; + new_alias= new_table_alias; + } + else + { + old_alias= ren_table->table_name; + new_alias= new_table_name; + } + sprintf(name,"%s/%s/%s%s",mysql_data_home, + new_db, new_alias, reg_ext); + unpack_filename(name, name); + if (!access(name,F_OK)) + { + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias); + DBUG_RETURN(1); // This can't be skipped + } + sprintf(name,"%s/%s/%s%s",mysql_data_home, + ren_table->db, old_alias, + reg_ext); + unpack_filename(name, name); + + frm_type= mysql_frm_type(thd, name, &table_type); + switch (frm_type) + { + case FRMTYPE_TABLE: + { + if (table_type == DB_TYPE_UNKNOWN) + my_error(ER_FILE_NOT_FOUND, MYF(0), name, my_errno); + else + { + if (!(rc= mysql_rename_table(table_type, ren_table->db, old_alias, + new_db, new_alias))) + { + if ((rc= Table_triggers_list::change_table_name(thd, ren_table->db, + old_alias, + new_db, + new_alias))) + { + /* + We've succeeded in renaming table's .frm and in updating + corresponding handler data, but have failed to update table's + triggers appropriately. So let us revert operations on .frm + and handler's data and report about failure to rename table. + */ + (void) mysql_rename_table(table_type, new_db, new_alias, + ren_table->db, old_alias); + } + } + } + break; + } + case FRMTYPE_VIEW: + /* change of schema is not allowed */ + if (strcmp(ren_table->db, new_db)) + my_error(ER_FORBID_SCHEMA_CHANGE, MYF(0), ren_table->db, + new_db); + else + rc= mysql_rename_view(thd, new_alias, ren_table); + break; + default: + DBUG_ASSERT(0); // should never happen + case FRMTYPE_ERROR: + my_error(ER_FILE_NOT_FOUND, MYF(0), name, my_errno); + break; + } + if (rc && !skip_error) + DBUG_RETURN(1); + + DBUG_RETURN(0); + +} +/* Rename all tables in list; Return pointer to wrong entry if something goes wrong. Note that the table_list may be empty! */ +/* + Rename tables/views in the list + + SYNPOSIS + rename_tables() + thd Thread handle + table_list List of tables to rename + skip_error Whether to skip errors + + DESCRIPTION + Take a table/view name from and odd list element and rename it to a + the name taken from list element+1. Note that the table_list may be + empty. + + RETURN + false Ok + true rename failed +*/ + static TABLE_LIST * rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error) { - TABLE_LIST *ren_table,*new_table; + TABLE_LIST *ren_table, *new_table; + DBUG_ENTER("rename_tables"); - for (ren_table=table_list ; ren_table ; ren_table=new_table->next) + for (ren_table= table_list; ren_table; ren_table= new_table->next_local) { - db_type table_type; - char name[FN_REFLEN]; - const char *new_alias, *old_alias; - - new_table=ren_table->next; - if (lower_case_table_names == 2) - { - old_alias= ren_table->alias; - new_alias= new_table->alias; - } - else - { - old_alias= ren_table->real_name; - new_alias= new_table->real_name; - } - sprintf(name,"%s/%s/%s%s",mysql_data_home, - new_table->db, new_alias, reg_ext); - unpack_filename(name, name); - if (!access(name,F_OK)) - { - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_alias); - DBUG_RETURN(ren_table); // This can't be skipped - } - sprintf(name,"%s/%s/%s%s",mysql_data_home, - ren_table->db, old_alias, - reg_ext); - unpack_filename(name, name); - if ((table_type=get_table_type(name)) == DB_TYPE_UNKNOWN) - { - my_error(ER_FILE_NOT_FOUND, MYF(0), name, my_errno); - if (!skip_error) - DBUG_RETURN(ren_table); - } - else if (mysql_rename_table(table_type, - ren_table->db, old_alias, - new_table->db, new_alias)) - { - if (!skip_error) - DBUG_RETURN(ren_table); - } + new_table= ren_table->next_local; + if (do_rename(thd, ren_table, new_table->db, new_table->table_name, + new_table->alias, skip_error)) + DBUG_RETURN(ren_table); } DBUG_RETURN(0); } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index f83313a8fd8..b451c612398 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB & Sasha +/* Copyright (C) 2000-2006 MySQL AB & Sasha This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -23,45 +22,50 @@ int max_binlog_dump_events = 0; // unlimited my_bool opt_sporadic_binlog_dump_fail = 0; +#ifndef DBUG_OFF static int binlog_dump_count = 0; +#endif -int check_binlog_magic(IO_CACHE* log, const char** errmsg) -{ - char magic[4]; - DBUG_ASSERT(my_b_tell(log) == 0); - - if (my_b_read(log, (byte*) magic, sizeof(magic))) - { - *errmsg = "I/O error reading the header from the binary log"; - sql_print_error("%s, errno=%d, io cache code=%d", *errmsg, my_errno, - log->error); - return 1; - } - if (memcmp(magic, BINLOG_MAGIC, sizeof(magic))) - { - *errmsg = "Binlog has bad magic number; It's not a binary log file that can be used by this version of MySQL"; - return 1; - } - return 0; -} +/* + fake_rotate_event() builds a fake (=which does not exist physically in any + binlog) Rotate event, which contains the name of the binlog we are going to + send to the slave (because the slave may not know it if it just asked for + MASTER_LOG_FILE='', MASTER_LOG_POS=4). + < 4.0.14, fake_rotate_event() was called only if the requested pos was 4. + After this version we always call it, so that a 3.23.58 slave can rely on + it to detect if the master is 4.0 (and stop) (the _fake_ Rotate event has + zeros in the good positions which, by chance, make it possible for the 3.23 + slave to detect that this event is unexpected) (this is luck which happens + because the master and slave disagree on the size of the header of + Log_event). + + Relying on the event length of the Rotate event instead of these + well-placed zeros was not possible as Rotate events have a variable-length + part. +*/ static int fake_rotate_event(NET* net, String* packet, char* log_file_name, - ulonglong position, const char**errmsg) + ulonglong position, const char** errmsg) { - char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN]; - memset(header, 0, 4); // when does not matter + DBUG_ENTER("fake_rotate_event"); + char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN+100]; + /* + 'when' (the timestamp) is set to 0 so that slave could distinguish between + real and fake Rotate events (if necessary) + */ + memset(header, 0, 4); header[EVENT_TYPE_OFFSET] = ROTATE_EVENT; char* p = log_file_name+dirname_length(log_file_name); uint ident_len = (uint) strlen(p); - ulong event_len = ident_len + ROTATE_EVENT_OVERHEAD; + ulong event_len = ident_len + LOG_EVENT_HEADER_LEN + ROTATE_HEADER_LEN; int4store(header + SERVER_ID_OFFSET, server_id); int4store(header + EVENT_LEN_OFFSET, event_len); int2store(header + FLAGS_OFFSET, 0); - + // TODO: check what problems this may cause and fix them int4store(header + LOG_POS_OFFSET, 0); - + packet->append(header, sizeof(header)); int8store(buf+R_POS_OFFSET,position); packet->append(buf, ROTATE_HEADER_LEN); @@ -69,9 +73,9 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name, if (my_net_write(net, (char*)packet->ptr(), packet->length())) { *errmsg = "failed on my_net_write()"; - return -1; + DBUG_RETURN(-1); } - return 0; + DBUG_RETURN(0); } static int send_file(THD *thd) @@ -146,42 +150,6 @@ static int send_file(THD *thd) } -File open_binlog(IO_CACHE *log, const char *log_file_name, - const char **errmsg) -{ - File file; - DBUG_ENTER("open_binlog"); - - if ((file = my_open(log_file_name, O_RDONLY | O_BINARY | O_SHARE, - MYF(MY_WME))) < 0) - { - sql_print_error("Failed to open log (\ -file '%s', errno %d)", log_file_name, my_errno); - *errmsg = "Could not open log file"; // This will not be sent - goto err; - } - if (init_io_cache(log, file, IO_SIZE*2, READ_CACHE, 0, 0, - MYF(MY_WME | MY_DONT_CHECK_FILESIZE))) - { - sql_print_error("Failed to create a cache on log (\ -file '%s')", log_file_name); - *errmsg = "Could not open log file"; // This will not be sent - goto err; - } - if (check_binlog_magic(log,errmsg)) - goto err; - DBUG_RETURN(file); - -err: - if (file >= 0) - { - my_close(file,MYF(0)); - end_io_cache(log); - } - DBUG_RETURN(-1); -} - - /* Adjust the position pointer in the binary log file for all running slaves @@ -258,41 +226,39 @@ bool log_in_use(const char* log_name) return result; } -int purge_error_message(THD* thd, int res) +bool purge_error_message(THD* thd, int res) { - const char *errmsg= 0; + uint errmsg= 0; switch (res) { case 0: break; - case LOG_INFO_EOF: errmsg= "Target log not found in binlog index"; break; - case LOG_INFO_IO: errmsg= "I/O error reading log index file"; break; - case LOG_INFO_INVALID: - errmsg= "Server configuration does not permit binlog purge"; break; - case LOG_INFO_SEEK: errmsg= "Failed on fseek()"; break; - case LOG_INFO_MEM: errmsg= "Out of memory"; break; - case LOG_INFO_FATAL: errmsg= "Fatal error during purge"; break; - case LOG_INFO_IN_USE: errmsg= "A purgeable log is in use, will not purge"; - break; - default: errmsg= "Unknown error during purge"; break; + case LOG_INFO_EOF: errmsg= ER_UNKNOWN_TARGET_BINLOG; break; + case LOG_INFO_IO: errmsg= ER_IO_ERR_LOG_INDEX_READ; break; + case LOG_INFO_INVALID:errmsg= ER_BINLOG_PURGE_PROHIBITED; break; + case LOG_INFO_SEEK: errmsg= ER_FSEEK_FAIL; break; + case LOG_INFO_MEM: errmsg= ER_OUT_OF_RESOURCES; break; + case LOG_INFO_FATAL: errmsg= ER_BINLOG_PURGE_FATAL_ERR; break; + case LOG_INFO_IN_USE: errmsg= ER_LOG_IN_USE; break; + default: errmsg= ER_LOG_PURGE_UNKNOWN_ERR; break; } if (errmsg) { - send_error(thd, 0, errmsg); - return 1; + my_message(errmsg, ER(errmsg), MYF(0)); + return TRUE; } send_ok(thd); - return 0; + return FALSE; } -int purge_master_logs(THD* thd, const char* to_log) +bool purge_master_logs(THD* thd, const char* to_log) { char search_file_name[FN_REFLEN]; if (!mysql_bin_log.is_open()) { send_ok(thd); - return 0; + return FALSE; } mysql_bin_log.make_log_name(search_file_name, to_log); @@ -302,7 +268,7 @@ int purge_master_logs(THD* thd, const char* to_log) } -int purge_master_logs_before_date(THD* thd, time_t purge_time) +bool purge_master_logs_before_date(THD* thd, time_t purge_time) { if (!mysql_bin_log.is_open()) { @@ -313,6 +279,36 @@ int purge_master_logs_before_date(THD* thd, time_t purge_time) mysql_bin_log.purge_logs_before_date(purge_time)); } +int test_for_non_eof_log_read_errors(int error, const char **errmsg) +{ + if (error == LOG_READ_EOF) + return 0; + my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; + switch (error) { + case LOG_READ_BOGUS: + *errmsg = "bogus data in log event"; + break; + case LOG_READ_TOO_LARGE: + *errmsg = "log event entry exceeded max_allowed_packet; \ +Increase max_allowed_packet on master"; + break; + case LOG_READ_IO: + *errmsg = "I/O error reading log event"; + break; + case LOG_READ_MEM: + *errmsg = "memory allocation failed reading log event"; + break; + case LOG_READ_TRUNC: + *errmsg = "binlog truncated in the middle of event"; + break; + default: + *errmsg = "unknown error reading log event on the master"; + break; + } + return error; +} + + /* TODO: Clean up loop to only have one call to send_file() */ @@ -329,6 +325,8 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, int error; const char *errmsg = "Unknown error"; NET* net = &thd->net; + pthread_mutex_t *log_lock; + bool binlog_can_be_corrupted= FALSE; #ifndef DBUG_OFF int left_events = max_binlog_dump_events; #endif @@ -388,34 +386,47 @@ impossible position"; goto err; } - my_b_seek(&log, pos); // Seek will done on next read /* We need to start a packet with something other than 255 - to distiquish it from error + to distinguish it from error */ - packet->set("\0", 1, &my_charset_bin); + packet->set("\0", 1, &my_charset_bin); /* This is the start of a new packet */ /* - Before 4.0.14 we called fake_rotate_event below only if - (pos == BIN_LOG_HEADER_SIZE), because if this is false then the slave + Tell the client about the log name with a fake Rotate event; + this is needed even if we also send a Format_description_log_event + just after, because that event does not contain the binlog's name. + Note that as this Rotate event is sent before + Format_description_log_event, the slave cannot have any info to + understand this event's format, so the header len of + Rotate_log_event is FROZEN (so in 5.0 it will have a header shorter + than other events except FORMAT_DESCRIPTION_EVENT). + Before 4.0.14 we called fake_rotate_event below only if (pos == + BIN_LOG_HEADER_SIZE), because if this is false then the slave already knows the binlog's name. - Now we always call fake_rotate_event; if the slave already knew the log's - name (ex: CHANGE MASTER TO MASTER_LOG_FILE=...) this is useless but does - not harm much. It is nice for 3.23 (>=.58) slaves which test Rotate events - to see if the master is 4.0 (then they choose to stop because they can't - replicate 4.0); by always calling fake_rotate_event we are sure that - 3.23.58 and newer will detect the problem as soon as replication starts - (BUG#198). + Since, we always call fake_rotate_event; if the slave already knew + the log's name (ex: CHANGE MASTER TO MASTER_LOG_FILE=...) this is + useless but does not harm much. It is nice for 3.23 (>=.58) slaves + which test Rotate events to see if the master is 4.0 (then they + choose to stop because they can't replicate 4.0); by always calling + fake_rotate_event we are sure that 3.23.58 and newer will detect the + problem as soon as replication starts (BUG#198). Always calling fake_rotate_event makes sending of normal - (=from-binlog) Rotate events a priori unneeded, but it is not so simple: - the 2 Rotate events are not equivalent, the normal one is before the Stop - event, the fake one is after. If we don't send the normal one, then the - Stop event will be interpreted (by existing 4.0 slaves) as "the master - stopped", which is wrong. So for safety, given that we want minimum - modification of 4.0, we send the normal and fake Rotates. + (=from-binlog) Rotate events a priori unneeded, but it is not so + simple: the 2 Rotate events are not equivalent, the normal one is + before the Stop event, the fake one is after. If we don't send the + normal one, then the Stop event will be interpreted (by existing 4.0 + slaves) as "the master stopped", which is wrong. So for safety, + given that we want minimum modification of 4.0, we send the normal + and fake Rotates. */ if (fake_rotate_event(net, packet, log_file_name, pos, &errmsg)) { + /* + This error code is not perfect, as fake_rotate_event() does not + read anything from the binlog; if it fails it's because of an + error in my_net_write(), fortunately it will say so in errmsg. + */ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; goto err; } @@ -427,10 +438,82 @@ impossible position"; */ thd->variables.max_allowed_packet+= MAX_LOG_EVENT_HEADER; - while (!net->error && net->vio != 0 && !thd->killed) + /* + We can set log_lock now, it does not move (it's a member of + mysql_bin_log, and it's already inited, and it will be destroyed + only at shutdown). + */ + log_lock = mysql_bin_log.get_log_lock(); + if (pos > BIN_LOG_HEADER_SIZE) { - pthread_mutex_t *log_lock = mysql_bin_log.get_log_lock(); + /* + Try to find a Format_description_log_event at the beginning of + the binlog + */ + if (!(error = Log_event::read_log_event(&log, packet, log_lock))) + { + /* + The packet has offsets equal to the normal offsets in a binlog + event +1 (the first character is \0). + */ + DBUG_PRINT("info", + ("Looked for a Format_description_log_event, found event type %d", + (*packet)[EVENT_TYPE_OFFSET+1])); + if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) + { + binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+1] & + LOG_EVENT_BINLOG_IN_USE_F); + (*packet)[FLAGS_OFFSET+1] &= ~LOG_EVENT_BINLOG_IN_USE_F; + /* + mark that this event with "log_pos=0", so the slave + should not increment master's binlog position + (rli->group_master_log_pos) + */ + int4store((char*) packet->ptr()+LOG_POS_OFFSET+1, 0); + /* + if reconnect master sends FD event with `created' as 0 + to avoid destroying temp tables. + */ + int4store((char*) packet->ptr()+LOG_EVENT_MINIMAL_HEADER_LEN+ + ST_CREATED_OFFSET+1, (ulong) 0); + /* send it */ + if (my_net_write(net, (char*)packet->ptr(), packet->length())) + { + errmsg = "Failed on my_net_write()"; + my_errno= ER_UNKNOWN_ERROR; + goto err; + } + + /* + No need to save this event. We are only doing simple reads + (no real parsing of the events) so we don't need it. And so + we don't need the artificial Format_description_log_event of + 3.23&4.x. + */ + } + } + else + { + if (test_for_non_eof_log_read_errors(error, &errmsg)) + goto err; + /* + It's EOF, nothing to do, go on reading next events, the + Format_description_log_event will be found naturally if it is written. + */ + } + /* reset the packet as we wrote to it in any case */ + packet->set("\0", 1, &my_charset_bin); + } /* end of if (pos > BIN_LOG_HEADER_SIZE); */ + else + { + /* The Format_description_log_event event will be found naturally. */ + } + /* seek to the requested position, to start the requested dump */ + my_b_seek(&log, pos); // Seek will done on next read + + while (!net->error && net->vio != 0 && !thd->killed) + { while (!(error = Log_event::read_log_event(&log, packet, log_lock))) { #ifndef DBUG_OFF @@ -442,12 +525,23 @@ impossible position"; goto err; } #endif - if (my_net_write(net, (char*)packet->ptr(), packet->length()) ) + + if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) + { + binlog_can_be_corrupted= test((*packet)[FLAGS_OFFSET+1] & + LOG_EVENT_BINLOG_IN_USE_F); + (*packet)[FLAGS_OFFSET+1] &= ~LOG_EVENT_BINLOG_IN_USE_F; + } + else if ((*packet)[EVENT_TYPE_OFFSET+1] == STOP_EVENT) + binlog_can_be_corrupted= FALSE; + + if (my_net_write(net, (char*)packet->ptr(), packet->length())) { errmsg = "Failed on my_net_write()"; my_errno= ER_UNKNOWN_ERROR; goto err; } + DBUG_PRINT("info", ("log event code %d", (*packet)[LOG_EVENT_OFFSET+1] )); if ((*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT) @@ -461,39 +555,25 @@ impossible position"; } packet->set("\0", 1, &my_charset_bin); } + + /* + here we were reading binlog that was not closed properly (as a result + of a crash ?). treat any corruption as EOF + */ + if (binlog_can_be_corrupted && error != LOG_READ_MEM) + error=LOG_READ_EOF; /* TODO: now that we are logging the offset, check to make sure - the recorded offset and the actual match + the recorded offset and the actual match. + Guilhem 2003-06: this is not true if this master is a slave + <4.0.15 running with --log-slave-updates, because then log_pos may + be the offset in the-master-of-this-master's binlog. */ - if (error != LOG_READ_EOF) - { - my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; - switch (error) { - case LOG_READ_BOGUS: - errmsg = "bogus data in log event"; - break; - case LOG_READ_TOO_LARGE: - errmsg = "log event entry exceeded max_allowed_packet; \ -Increase max_allowed_packet on master"; - break; - case LOG_READ_IO: - errmsg = "I/O error reading log event"; - break; - case LOG_READ_MEM: - errmsg = "memory allocation failed reading log event"; - break; - case LOG_READ_TRUNC: - errmsg = "binlog truncated in the middle of event"; - break; - default: - errmsg = "unknown error reading log event on the master"; - break; - } + if (test_for_non_eof_log_read_errors(error, &errmsg)) goto err; - } if (!(flags & BINLOG_DUMP_NON_BLOCK) && - mysql_bin_log.is_active(log_file_name)) + mysql_bin_log.is_active(log_file_name)) { /* Block until there is more data in the log @@ -529,9 +609,9 @@ Increase max_allowed_packet on master"; now, but we'll be quick and just read one record TODO: - Add an counter that is incremented for each time we update - the binary log. We can avoid the following read if the counter - has not been updated since last read. + Add an counter that is incremented for each time we update the + binary log. We can avoid the following read if the counter + has not been updated since last read. */ pthread_mutex_lock(log_lock); @@ -603,7 +683,8 @@ Increase max_allowed_packet on master"; else { bool loop_breaker = 0; - // need this to break out of the for loop from switch + /* need this to break out of the for loop from switch */ + thd->proc_info = "Finished reading one binlog; switching to next binlog"; switch (mysql_bin_log.find_next_log(&linfo, 1)) { case LOG_INFO_EOF: @@ -618,21 +699,28 @@ Increase max_allowed_packet on master"; } if (loop_breaker) - break; + break; end_io_cache(&log); (void) my_close(file, MYF(MY_WME)); /* - Even if the previous log contained a Rotate_log_event, we still fake - one. + Call fake_rotate_event() in case the previous log (the one which + we have just finished reading) did not contain a Rotate event + (for example (I don't know any other example) the previous log + was the last one before the master was shutdown & restarted). + This way we tell the slave about the new log's name and + position. If the binlog is 5.0, the next event we are going to + read and send is Format_description_log_event. */ if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0 || - fake_rotate_event(net, packet, log_file_name, BIN_LOG_HEADER_SIZE, &errmsg)) + fake_rotate_event(net, packet, log_file_name, BIN_LOG_HEADER_SIZE, + &errmsg)) { my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; goto err; } + packet->length(0); packet->append('\0'); } @@ -664,7 +752,8 @@ err: pthread_mutex_unlock(&LOCK_thread_count); if (file >= 0) (void) my_close(file, MYF(MY_WME)); - send_error(thd, my_errno, errmsg); + + my_message(my_errno, errmsg, MYF(0)); DBUG_VOID_RETURN; } @@ -673,17 +762,17 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) int slave_errno= 0; int thread_mask; DBUG_ENTER("start_slave"); - - if (check_access(thd, SUPER_ACL, any_db,0,0,0)) + + if (check_access(thd, SUPER_ACL, any_db,0,0,0,0)) DBUG_RETURN(1); lock_slave_threads(mi); // this allows us to cleanly read slave_running // Get a mask of _stopped_ threads init_thread_mask(&thread_mask,mi,1 /* inverse */); /* - Below we will start all stopped threads. - But if the user wants to start only one thread, do as if the other thread - was running (as we don't wan't to touch the other thread), so set the - bit to 0 for the other thread + Below we will start all stopped threads. But if the user wants to + start only one thread, do as if the other thread was running (as we + don't wan't to touch the other thread), so set the bit to 0 for the + other thread */ if (thd->lex->slave_thd_opt) thread_mask&= thd->lex->slave_thd_opt; @@ -694,10 +783,10 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) slave_errno=ER_MASTER_INFO; else if (server_id_supplied && *mi->host) { - /* - If we will start SQL thread we will care about UNTIL options - If not and they are specified we will ignore them and warn user - about this fact. + /* + If we will start SQL thread we will care about UNTIL options If + not and they are specified we will ignore them and warn user + about this fact. */ if (thread_mask & SLAVE_SQL) { @@ -707,19 +796,19 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) { mi->rli.until_condition= RELAY_LOG_INFO::UNTIL_MASTER_POS; mi->rli.until_log_pos= thd->lex->mi.pos; - /* - We don't check thd->lex->mi.log_file_name for NULL here + /* + We don't check thd->lex->mi.log_file_name for NULL here since it is checked in sql_yacc.yy */ strmake(mi->rli.until_log_name, thd->lex->mi.log_file_name, - sizeof(mi->rli.until_log_name)-1); - } + sizeof(mi->rli.until_log_name)-1); + } else if (thd->lex->mi.relay_log_pos) { mi->rli.until_condition= RELAY_LOG_INFO::UNTIL_RELAY_POS; mi->rli.until_log_pos= thd->lex->mi.relay_log_pos; strmake(mi->rli.until_log_name, thd->lex->mi.relay_log_name, - sizeof(mi->rli.until_log_name)-1); + sizeof(mi->rli.until_log_name)-1); } else clear_until_condition(&mi->rli); @@ -737,30 +826,30 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) p_end points to the first invalid character. If it equals to p, no digits were found, error. If it contains '\0' it means conversion went ok. - */ + */ if (p_end==p || *p_end) slave_errno=ER_BAD_SLAVE_UNTIL_COND; } else slave_errno=ER_BAD_SLAVE_UNTIL_COND; - + /* mark the cached result of the UNTIL comparison as "undefined" */ - mi->rli.until_log_names_cmp_result= + mi->rli.until_log_names_cmp_result= RELAY_LOG_INFO::UNTIL_LOG_NAMES_CMP_UNKNOWN; /* Issuing warning then started without --skip-slave-start */ if (!opt_skip_slave_start) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_MISSING_SKIP_SLAVE, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_MISSING_SKIP_SLAVE, ER(ER_MISSING_SKIP_SLAVE)); } - + pthread_mutex_unlock(&mi->rli.data_lock); } else if (thd->lex->mi.pos || thd->lex->mi.relay_log_pos) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNTIL_COND_IGNORED, - ER(ER_UNTIL_COND_IGNORED)); - - + ER(ER_UNTIL_COND_IGNORED)); + if (!slave_errno) slave_errno = start_slave_threads(0 /*no mutex */, 1 /* wait for start */, @@ -772,16 +861,18 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) slave_errno = ER_BAD_SLAVE; } else - //no error if all threads are already started, only a warning + { + /* no error if all threads are already started, only a warning */ push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING, ER(ER_SLAVE_WAS_RUNNING)); - + } + unlock_slave_threads(mi); - + if (slave_errno) { if (net_report) - send_error(thd, slave_errno); + my_message(slave_errno, ER(slave_errno), MYF(0)); DBUG_RETURN(1); } else if (net_report) @@ -793,12 +884,14 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report ) { + DBUG_ENTER("stop_slave"); + int slave_errno; if (!thd) thd = current_thd; - if (check_access(thd, SUPER_ACL, any_db,0,0,0)) - return 1; + if (check_access(thd, SUPER_ACL, any_db,0,0,0,0)) + DBUG_RETURN(1); thd->proc_info = "Killing slave"; int thread_mask; lock_slave_threads(mi); @@ -831,13 +924,13 @@ int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report ) if (slave_errno) { if (net_report) - send_error(thd, slave_errno); - return 1; + my_message(slave_errno, ER(slave_errno), MYF(0)); + DBUG_RETURN(1); } else if (net_report) send_ok(thd); - return 0; + DBUG_RETURN(0); } @@ -877,7 +970,7 @@ int reset_slave(THD *thd, MASTER_INFO* mi) 1 /* just reset */, &errmsg))) goto err; - + /* Clear master's log coordinates and reset host/user/etc to the values specified in mysqld's options (only for good display of SHOW SLAVE STATUS; @@ -886,13 +979,13 @@ int reset_slave(THD *thd, MASTER_INFO* mi) STATUS; before doing START SLAVE; */ init_master_info_with_options(mi); - /* + /* Reset errors (the idea is that we forget about the old master). */ clear_slave_error(&mi->rli); clear_until_condition(&mi->rli); - + // close master_info_file, relay_log_info_file, set mi->inited=rli->inited=0 end_master_info(mi); // and delete these two files @@ -912,7 +1005,7 @@ int reset_slave(THD *thd, MASTER_INFO* mi) err: unlock_slave_threads(mi); - if (error) + if (error) my_error(sql_errno, MYF(0), errmsg); DBUG_RETURN(error); } @@ -935,7 +1028,7 @@ err: slave_server_id the slave's server id */ - + void kill_zombie_dump_threads(uint32 slave_server_id) { @@ -960,13 +1053,13 @@ void kill_zombie_dump_threads(uint32 slave_server_id) it will be slow because it will iterate through the list again. We just to do kill the thread ourselves. */ - tmp->awake(1/*prepare to die*/); + tmp->awake(THD::KILL_QUERY); pthread_mutex_unlock(&tmp->LOCK_delete); } } -int change_master(THD* thd, MASTER_INFO* mi) +bool change_master(THD* thd, MASTER_INFO* mi) { int thread_mask; const char* errmsg= 0; @@ -977,9 +1070,9 @@ int change_master(THD* thd, MASTER_INFO* mi) init_thread_mask(&thread_mask,mi,0 /*not inverse*/); if (thread_mask) // We refuse if any slave thread is running { - net_printf(thd,ER_SLAVE_MUST_STOP); + my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0)); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } thd->proc_info = "Changing master"; @@ -988,9 +1081,9 @@ int change_master(THD* thd, MASTER_INFO* mi) if (init_master_info(mi, master_info_file, relay_log_info_file, 0, thread_mask)) { - send_error(thd, ER_MASTER_INFO); + my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0)); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } /* @@ -1000,9 +1093,9 @@ int change_master(THD* thd, MASTER_INFO* mi) */ /* - If the user specified host or port without binlog or position, + If the user specified host or port without binlog or position, reset binlog's name to FIRST and position to 4. - */ + */ if ((lex_mi->host || lex_mi->port) && !lex_mi->log_file_name && !lex_mi->pos) { @@ -1017,7 +1110,7 @@ int change_master(THD* thd, MASTER_INFO* mi) { mi->master_log_pos= lex_mi->pos; } - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); if (lex_mi->host) strmake(mi->host, lex_mi->host, sizeof(mi->host)-1); @@ -1029,7 +1122,7 @@ int change_master(THD* thd, MASTER_INFO* mi) mi->port = lex_mi->port; if (lex_mi->connect_retry) mi->connect_retry = lex_mi->connect_retry; - + if (lex_mi->ssl != LEX_MASTER_INFO::SSL_UNCHANGED) mi->ssl= (lex_mi->ssl == LEX_MASTER_INFO::SSL_ENABLE); if (lex_mi->ssl_ca) @@ -1045,7 +1138,7 @@ int change_master(THD* thd, MASTER_INFO* mi) #ifndef HAVE_OPENSSL if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath || lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key ) - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS)); #endif @@ -1098,7 +1191,12 @@ int change_master(THD* thd, MASTER_INFO* mi) Relay log's IO_CACHE may not be inited, if rli->inited==0 (server was never a slave before). */ - flush_master_info(mi, 0); + if (flush_master_info(mi, 0)) + { + my_error(ER_RELAY_LOG_INIT, MYF(0), "Failed to flush master info file"); + unlock_slave_threads(mi); + DBUG_RETURN(TRUE); + } if (need_relay_log_purge) { relay_log_purge= 1; @@ -1107,9 +1205,9 @@ int change_master(THD* thd, MASTER_INFO* mi) 0 /* not only reset, but also reinit */, &errmsg)) { - net_printf(thd, 0, "Failed purging old relay logs: %s",errmsg); + my_error(ER_RELAY_LOG_FAIL, MYF(0), errmsg); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } else @@ -1121,15 +1219,15 @@ int change_master(THD* thd, MASTER_INFO* mi) mi->rli.group_relay_log_name, mi->rli.group_relay_log_pos, 0 /*no data lock*/, - &msg)) + &msg, 0)) { - net_printf(thd,0,"Failed initializing relay log position: %s",msg); + my_error(ER_RELAY_LOG_INIT, MYF(0), msg); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } mi->rli.group_master_log_pos = mi->master_log_pos; - DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + DBUG_PRINT("info", ("master_log_pos: %lu", (ulong) mi->master_log_pos)); /* Coordinates in rli were spoilt by the 'if (need_relay_log_purge)' block, @@ -1167,14 +1265,15 @@ int change_master(THD* thd, MASTER_INFO* mi) unlock_slave_threads(mi); thd->proc_info = 0; send_ok(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } int reset_master(THD* thd) { if (!mysql_bin_log.is_open()) { - my_error(ER_FLUSH_MASTER_BINLOG_CLOSED, MYF(ME_BELL+ME_WAITTANG)); + my_message(ER_FLUSH_MASTER_BINLOG_CLOSED, + ER(ER_FLUSH_MASTER_BINLOG_CLOSED), MYF(ME_BELL+ME_WAITTANG)); return 1; } return mysql_bin_log.reset_logs(thd); @@ -1198,22 +1297,28 @@ int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1, } -int show_binlog_events(THD* thd) +bool mysql_show_binlog_events(THD* thd) { Protocol *protocol= thd->protocol; - DBUG_ENTER("show_binlog_events"); + DBUG_ENTER("mysql_show_binlog_events"); List<Item> field_list; const char *errmsg = 0; + bool ret = TRUE; IO_CACHE log; File file = -1; Log_event::init_show_field_list(&field_list); - if (protocol-> send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); + + Format_description_log_event *description_event= new + Format_description_log_event(3); /* MySQL 4.0 by default */ if (mysql_bin_log.is_open()) { LEX_MASTER_INFO *lex_mi= &thd->lex->mi; + SELECT_LEX_UNIT *unit= &thd->lex->unit; ha_rows event_count, limit_start, limit_end; my_off_t pos = max(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly char search_file_name[FN_REFLEN], *name; @@ -1221,9 +1326,10 @@ int show_binlog_events(THD* thd) pthread_mutex_t *log_lock = mysql_bin_log.get_log_lock(); LOG_INFO linfo; Log_event* ev; - - limit_start= thd->lex->current_select->offset_limit; - limit_end= thd->lex->current_select->select_limit + limit_start; + + unit->set_limit(thd->lex->current_select); + limit_start= unit->offset_limit_cnt; + limit_end= unit->select_limit_cnt; name= search_file_name; if (log_file_name) @@ -1244,10 +1350,38 @@ int show_binlog_events(THD* thd) goto err; pthread_mutex_lock(log_lock); + + /* + open_binlog() sought to position 4. + Read the first event in case it's a Format_description_log_event, to + know the format. If there's no such event, we are 3.23 or 4.x. This + code, like before, can't read 3.23 binlogs. + This code will fail on a mixed relay log (one which has Format_desc then + Rotate then Format_desc). + */ + + ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event); + if (ev) + { + if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT) + { + delete description_event; + description_event= (Format_description_log_event*) ev; + } + else + delete ev; + } + my_b_seek(&log, pos); + if (!description_event->is_valid()) + { + errmsg="Invalid Format_description event; could be out of memory"; + goto err; + } + for (event_count = 0; - (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,0)); ) + (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event)); ) { if (event_count >= limit_start && ev->net_send(protocol, linfo.log_file_name, pos)) @@ -1275,7 +1409,10 @@ int show_binlog_events(THD* thd) pthread_mutex_unlock(log_lock); } + ret= FALSE; + err: + delete description_event; if (file >= 0) { end_io_cache(&log); @@ -1285,19 +1422,19 @@ err: if (errmsg) { my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), - "SHOW BINLOG EVENTS", errmsg); - DBUG_RETURN(-1); + "SHOW BINLOG EVENTS", errmsg); + DBUG_RETURN(TRUE); } send_eof(thd); pthread_mutex_lock(&LOCK_thread_count); thd->current_linfo = 0; pthread_mutex_unlock(&LOCK_thread_count); - DBUG_RETURN(0); + DBUG_RETURN(ret); } -int show_binlog_info(THD* thd) +bool show_binlog_info(THD* thd) { Protocol *protocol= thd->protocol; DBUG_ENTER("show_binlog_info"); @@ -1308,8 +1445,9 @@ int show_binlog_info(THD* thd) field_list.push_back(new Item_empty_string("Binlog_Do_DB",255)); field_list.push_back(new Item_empty_string("Binlog_Ignore_DB",255)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); if (mysql_bin_log.is_open()) @@ -1322,10 +1460,10 @@ int show_binlog_info(THD* thd) protocol->store(&binlog_do_db); protocol->store(&binlog_ignore_db); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -1337,11 +1475,11 @@ int show_binlog_info(THD* thd) thd Thread specific variable RETURN VALUES - 0 ok - 1 error (Error message sent to client) + FALSE OK + TRUE error */ -int show_binlogs(THD* thd) +bool show_binlogs(THD* thd) { IO_CACHE *index_file; LOG_INFO cur; @@ -1355,16 +1493,16 @@ int show_binlogs(THD* thd) if (!mysql_bin_log.is_open()) { - //TODO: Replace with ER() error message - send_error(thd, 0, "You are not using binary logging"); + my_message(ER_NO_BINARY_LOGGING, ER(ER_NO_BINARY_LOGGING), MYF(0)); return 1; } field_list.push_back(new Item_empty_string("Log_name", 255)); - field_list.push_back(new Item_return_int("File_size", 20, + field_list.push_back(new Item_return_int("File_size", 20, MYSQL_TYPE_LONGLONG)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); pthread_mutex_lock(mysql_bin_log.get_log_lock()); mysql_bin_log.lock_index(); @@ -1407,11 +1545,11 @@ int show_binlogs(THD* thd) } mysql_bin_log.unlock_index(); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: mysql_bin_log.unlock_index(); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } @@ -1431,17 +1569,16 @@ int log_loaded_block(IO_CACHE* file) lf_info->last_pos_in_file = file->pos_in_file; if (lf_info->wrote_create_file) { - Append_block_log_event a(lf_info->thd, lf_info->db, buffer, block_len, - lf_info->log_delayed); + Append_block_log_event a(lf_info->thd, lf_info->thd->db, buffer, + block_len, lf_info->log_delayed); mysql_bin_log.write(&a); } else { - Create_file_log_event c(lf_info->thd,lf_info->ex,lf_info->db, - lf_info->table_name, *lf_info->fields, - lf_info->handle_dup, lf_info->ignore, buffer, - block_len, lf_info->log_delayed); - mysql_bin_log.write(&c); + Begin_load_query_log_event b(lf_info->thd, lf_info->thd->db, + buffer, block_len, + lf_info->log_delayed); + mysql_bin_log.write(&b); lf_info->wrote_create_file = 1; DBUG_SYNC_POINT("debug_lock.created_file_event",10); } diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 21b3d2955f7..1fbc6eb30cf 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB & Sasha +/* Copyright (C) 2000-2006 MySQL AB & Sasha This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -36,24 +35,25 @@ extern I_List<i_string> binlog_do_db, binlog_ignore_db; extern int max_binlog_dump_events; extern my_bool opt_sporadic_binlog_dump_fail; -#define KICK_SLAVE(thd) { pthread_mutex_lock(&(thd)->LOCK_delete); (thd)->awake(0 /* do not prepare to die*/); pthread_mutex_unlock(&(thd)->LOCK_delete); } - -File open_binlog(IO_CACHE *log, const char *log_file_name, - const char **errmsg); +#define KICK_SLAVE(thd) do { \ + pthread_mutex_lock(&(thd)->LOCK_delete); \ + (thd)->awake(THD::NOT_KILLED); \ + pthread_mutex_unlock(&(thd)->LOCK_delete); \ + } while(0) int start_slave(THD* thd, MASTER_INFO* mi, bool net_report); int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report); -int change_master(THD* thd, MASTER_INFO* mi); -int show_binlog_events(THD* thd); +bool change_master(THD* thd, MASTER_INFO* mi); +bool mysql_show_binlog_events(THD* thd); int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1, const char* log_file_name2, ulonglong log_pos2); int reset_slave(THD *thd, MASTER_INFO* mi); int reset_master(THD* thd); -int purge_master_logs(THD* thd, const char* to_log); -int purge_master_logs_before_date(THD* thd, time_t purge_time); +bool purge_master_logs(THD* thd, const char* to_log); +bool purge_master_logs_before_date(THD* thd, time_t purge_time); bool log_in_use(const char* log_name); void adjust_linfo_offsets(my_off_t purge_offset); -int show_binlogs(THD* thd); +bool show_binlogs(THD* thd); extern int init_master_info(MASTER_INFO* mi); void kill_zombie_dump_threads(uint32 slave_server_id); int check_binlog_magic(IO_CACHE* log, const char** errmsg); @@ -62,12 +62,7 @@ typedef struct st_load_file_info { THD* thd; my_off_t last_pos_in_file; - sql_exchange* ex; - List <Item> *fields; - enum enum_duplicates handle_dup; - char* db; - char* table_name; - bool wrote_create_file, log_delayed, ignore; + bool wrote_create_file, log_delayed; } LOAD_FILE_INFO; int log_loaded_block(IO_CACHE* file); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index af3ad782ee3..08780efbedb 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000-2004 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -23,6 +22,7 @@ #include "mysql_priv.h" #include "sql_select.h" +#include "sql_cursor.h" #include <m_ctype.h> #include <hash.h> @@ -30,21 +30,45 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref", "MAYBE_REF","ALL","range","index","fulltext", - "ref_or_null","unique_subquery","index_subquery" + "ref_or_null","unique_subquery","index_subquery", + "index_merge" }; +struct st_sargable_param; + static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array); -static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, +static bool make_join_statistics(JOIN *join, TABLE_LIST *leaves, COND *conds, DYNAMIC_ARRAY *keyuse); static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse, - JOIN_TAB *join_tab, + JOIN_TAB *join_tab, uint tables, COND *conds, - table_map table_map, SELECT_LEX *select_lex); + COND_EQUAL *cond_equal, + table_map table_map, SELECT_LEX *select_lex, + st_sargable_param **sargables); static int sort_keyuse(KEYUSE *a,KEYUSE *b); static void set_position(JOIN *join,uint index,JOIN_TAB *table,KEYUSE *key); static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, table_map used_tables); -static void find_best_combination(JOIN *join,table_map rest_tables); +static void choose_plan(JOIN *join,table_map join_tables); + +static void best_access_path(JOIN *join, JOIN_TAB *s, THD *thd, + table_map remaining_tables, uint idx, + double record_count, double read_time); +static void optimize_straight_join(JOIN *join, table_map join_tables); +static void greedy_search(JOIN *join, table_map remaining_tables, + uint depth, uint prune_level); +static void best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, double record_count, + double read_time, uint depth, + uint prune_level); +static uint determine_search_depth(JOIN* join); +static int join_tab_cmp(const void* ptr1, const void* ptr2); +static int join_tab_cmp_straight(const void* ptr1, const void* ptr2); +/* + TODO: 'find_best' is here only temporarily until 'greedy_search' is + tested and approved. +*/ static void find_best(JOIN *join,table_map rest_tables,uint index, double record_count,double read_time); static uint cache_record_length(JOIN *join,uint index); @@ -55,38 +79,64 @@ static store_key *get_store_key(THD *thd, KEY_PART_INFO *key_part, char *key_buff, uint maybe_null); static bool make_simple_join(JOIN *join,TABLE *tmp_table); +static void make_outerjoin_info(JOIN *join); static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item); -static void make_join_readinfo(JOIN *join,uint options); +static void make_join_readinfo(JOIN *join, ulonglong options); static bool only_eq_ref_tables(JOIN *join, ORDER *order, table_map tables); static void update_depend_map(JOIN *join); static void update_depend_map(JOIN *join, ORDER *order); static ORDER *remove_const(JOIN *join,ORDER *first_order,COND *cond, bool change_list, bool *simple_order); static int return_zero_rows(JOIN *join, select_result *res,TABLE_LIST *tables, - List<Item> &fields, bool send_row, - uint select_options, const char *info, - Item *having, Procedure *proc, - SELECT_LEX_UNIT *unit); -static COND *optimize_cond(THD *thd, COND *conds, + List<Item> &fields, bool send_row, + ulonglong select_options, const char *info, + Item *having); +static COND *build_equal_items(THD *thd, COND *cond, + COND_EQUAL *inherited, + List<TABLE_LIST> *join_list, + COND_EQUAL **cond_equal_ref); +static COND* substitute_for_best_equal_field(COND *cond, + COND_EQUAL *cond_equal, + void *table_join_idx); +static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, + COND *conds, bool top); +static bool check_interleaving_with_nj(JOIN_TAB *last, JOIN_TAB *next); +static void restore_prev_nj_state(JOIN_TAB *last); +static void reset_nj_counters(List<TABLE_LIST> *join_list); +static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list, + uint first_unused); + +static COND *optimize_cond(JOIN *join, COND *conds, + List<TABLE_LIST> *join_list, Item::cond_result *cond_value); static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item); static bool open_tmp_table(TABLE *table); static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, - ulong options); + ulonglong options); static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table, Procedure *proc); -static int sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records); -static int sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records); -static int flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last); -static int end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); -static int end_send_group(JOIN *join, JOIN_TAB *join_tab,bool end_of_records); -static int end_write(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); -static int end_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); -static int end_unique_update(JOIN *join,JOIN_TAB *join_tab, - bool end_of_records); -static int end_write_group(JOIN *join, JOIN_TAB *join_tab, - bool end_of_records); -static int test_if_group_changed(List<Item_buff> &list); + +static enum_nested_loop_state +evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, + int error, my_bool *report_error); +static enum_nested_loop_state +evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab); +static enum_nested_loop_state +flush_cached_records(JOIN *join, JOIN_TAB *join_tab, bool skip_last); +static enum_nested_loop_state +end_send(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); +static enum_nested_loop_state +end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); +static enum_nested_loop_state +end_write(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); +static enum_nested_loop_state +end_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); +static enum_nested_loop_state +end_unique_update(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); +static enum_nested_loop_state +end_write_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records); + +static int test_if_group_changed(List<Cached_item> &list); static int join_read_const_table(JOIN_TAB *tab, POSITION *pos); static int join_read_system(JOIN_TAB *tab); static int join_read_const(JOIN_TAB *tab); @@ -106,12 +156,12 @@ static int join_read_prev_same(READ_RECORD *info); static int join_read_prev(READ_RECORD *info); static int join_ft_read_first(JOIN_TAB *tab); static int join_ft_read_next(READ_RECORD *info); -static int join_read_always_key_or_null(JOIN_TAB *tab); -static int join_read_next_same_or_null(READ_RECORD *info); +int join_read_always_key_or_null(JOIN_TAB *tab); +int join_read_next_same_or_null(READ_RECORD *info); static COND *make_cond_for_table(COND *cond,table_map table, table_map used_table); static Item* part_of_refkey(TABLE *form,Field *field); -static uint find_shortest_key(TABLE *table, const key_map *usable_keys); +uint find_shortest_key(TABLE *table, const key_map *usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, ha_rows select_limit, bool no_changes); static bool list_contains_unique_index(TABLE *table, @@ -126,6 +176,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, ulong offset,Item *having); static int remove_dup_with_hash_index(THD *thd,TABLE *table, uint field_count, Field **first_field, + ulong key_length,Item *having); static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count); static ulong used_blob_length(CACHE_FIELD **ptr); @@ -134,8 +185,8 @@ static void reset_cache_read(JOIN_CACHE *cache); static void reset_cache_write(JOIN_CACHE *cache); static void read_cached_record(JOIN_TAB *tab); static bool cmp_buffer_with_ref(JOIN_TAB *tab); -static bool setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields, - List<Item> &all_fields,ORDER *new_order); +static bool setup_new_fields(THD *thd, List<Item> &fields, + List<Item> &all_fields, ORDER *new_order); static ORDER *create_distinct_group(THD *thd, Item **ref_pointer_array, ORDER *order, List<Item> &fields, bool *all_order_by_fields_used); @@ -158,26 +209,37 @@ static void init_tmptable_sum_functions(Item_sum **func); static void update_tmptable_sum_func(Item_sum **func,TABLE *tmp_table); static void copy_sum_funcs(Item_sum **func_ptr, Item_sum **end); static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab); +static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr); static bool init_sum_functions(Item_sum **func, Item_sum **end); static bool update_sum_func(Item_sum **func); static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, bool distinct, const char *message=NullS); static Item *remove_additional_cond(Item* conds); +static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab); /* This handles SELECT with and without UNION */ -int handle_select(THD *thd, LEX *lex, select_result *result) +bool handle_select(THD *thd, LEX *lex, select_result *result, + ulong setup_tables_done_option) { - int res; + bool res; register SELECT_LEX *select_lex = &lex->select_lex; DBUG_ENTER("handle_select"); if (select_lex->next_select() || select_lex->master_unit()->fake_select_lex) - res=mysql_union(thd, lex, result, &lex->unit); + res= mysql_union(thd, lex, result, &lex->unit, setup_tables_done_option); else + { + SELECT_LEX_UNIT *unit= &lex->unit; + unit->set_limit(unit->global_parameters); + /* + 'options' of mysql_select will be set in JOIN, as far as JOIN for + every PS/SP execution new, we will not need reset this flag if + setup_tables_done_option changed for next rexecution + */ res= mysql_select(thd, &select_lex->ref_pointer_array, (TABLE_LIST*) select_lex->table_list.first, select_lex->with_wild, select_lex->item_list, @@ -188,45 +250,113 @@ int handle_select(THD *thd, LEX *lex, select_result *result) (ORDER*) select_lex->group_list.first, select_lex->having, (ORDER*) lex->proc_list.first, - select_lex->options | thd->options, - result, &(lex->unit), &(lex->select_lex)); - - /* Don't set res if it's -1 as we may want this later */ + select_lex->options | thd->options | + setup_tables_done_option, + result, unit, select_lex); + } DBUG_PRINT("info",("res: %d report_error: %d", res, thd->net.report_error)); - if (thd->net.report_error || res<0) + res|= thd->net.report_error; + if (unlikely(res)) { - result->send_error(0, NullS); + /* If we had a another error reported earlier then this will be ignored */ + result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR)); result->abort(); - res= 1; // Error sent to client } DBUG_RETURN(res); } /* + Fix fields referenced from inner selects. + + SYNOPSIS + fix_inner_refs() + thd Thread handle + all_fields List of all fields used in select + select Current select + ref_pointer_array Array of references to Items used in current select + + DESCRIPTION + The function fixes fields referenced from inner selects and + also fixes references (Item_ref objects) to these fields. Each field + is fixed as a usual hidden field of the current select - it is added + to the all_fields list and the pointer to it is saved in the + ref_pointer_array if latter is provided. + After the field has been fixed we proceed with fixing references + (Item_ref objects) to this field from inner subqueries. If the + ref_pointer_array is provided then Item_ref objects is set to + reference element in that array with the pointer to the field. + + RETURN + TRUE an error occured + FALSE ok +*/ + +bool +fix_inner_refs(THD *thd, List<Item> &all_fields, SELECT_LEX *select, + Item **ref_pointer_array) +{ + Item_outer_ref *ref; + bool res= FALSE; + List_iterator<Item_outer_ref> ref_it(select->inner_refs_list); + while ((ref= ref_it++)) + { + Item_field *item= ref->outer_field; + /* + TODO: this field item already might be present in the select list. + In this case instead of adding new field item we could use an + existing one. The change will lead to less operations for copying fields, + smaller temporary tables and less data passed through filesort. + */ + if (ref_pointer_array) + { + int el= all_fields.elements; + ref_pointer_array[el]= (Item*)item; + /* Add the field item to the select list of the current select. */ + all_fields.push_front((Item*)item); + /* + If it's needed reset each Item_ref item that refers this field with + a new reference taken from ref_pointer_array. + */ + ref->ref= ref_pointer_array + el; + } + if (!ref->fixed && ref->fix_fields(thd, 0)) + { + res= TRUE; + break; + } + thd->used_tables|= item->used_tables(); + } + return res; +} + +/* Function to setup clauses without sum functions */ inline int setup_without_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + TABLE_LIST *leaves, List<Item> &fields, List<Item> &all_fields, COND **conds, ORDER *order, ORDER *group, bool *hidden_group_fields) { - bool save_allow_sum_func; int res; + nesting_map save_allow_sum_func=thd->lex->allow_sum_func ; DBUG_ENTER("setup_without_group"); - save_allow_sum_func= thd->allow_sum_func; - thd->allow_sum_func= 0; - res= (setup_conds(thd, tables, conds) || - setup_order(thd, ref_pointer_array, tables, fields, all_fields, - order) || - setup_group(thd, ref_pointer_array, tables, fields, all_fields, - group, hidden_group_fields)); - thd->allow_sum_func= save_allow_sum_func; + thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level); + res= setup_conds(thd, tables, leaves, conds); + + thd->lex->allow_sum_func|= 1 << thd->lex->current_select->nest_level; + res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields, + order); + thd->lex->allow_sum_func&= ~(1 << thd->lex->current_select->nest_level); + res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields, + group, hidden_group_fields); + thd->lex->allow_sum_func= save_allow_sum_func; DBUG_RETURN(res); } @@ -263,18 +393,39 @@ JOIN::prepare(Item ***rref_pointer_array, tables_list= tables_init; select_lex= select_lex_arg; select_lex->join= this; + join_list= &select_lex->top_join_list; union_part= (unit_arg->first_select()->next_select() != 0); thd->lex->current_select->is_item_list_lookup= 1; + /* + If we have already executed SELECT, then it have not sense to prevent + its table from update (see unique_table()) + */ + if (thd->derived_tables_processing) + select_lex->exclude_from_table_unique_test= TRUE; + /* Check that all tables, fields, conds and order are ok */ - if (setup_tables(tables_list) || - setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || + if (!(select_options & OPTION_SETUP_TABLES_DONE) && + setup_tables_and_check_access(thd, &select_lex->context, join_list, + tables_list, &conds, + &select_lex->leaf_tables, FALSE, + SELECT_ACL, SELECT_ACL)) + DBUG_RETURN(-1); + + TABLE_LIST *table_ptr; + for (table_ptr= select_lex->leaf_tables; + table_ptr; + table_ptr= table_ptr->next_leaf) + tables++; + + if (setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || select_lex->setup_ref_array(thd, og_num) || - setup_fields(thd, (*rref_pointer_array), tables_list, fields_list, 1, + setup_fields(thd, (*rref_pointer_array), fields_list, 1, &all_fields, 1) || - setup_without_group(thd, (*rref_pointer_array), tables_list, fields_list, - all_fields, &conds, order, group_list, + setup_without_group(thd, (*rref_pointer_array), tables_list, + select_lex->leaf_tables, fields_list, + all_fields, &conds, order, group_list, &hidden_group_fields)) DBUG_RETURN(-1); /* purecov: inspected */ @@ -282,31 +433,66 @@ JOIN::prepare(Item ***rref_pointer_array, if (having) { + nesting_map save_allow_sum_func= thd->lex->allow_sum_func; thd->where="having clause"; - thd->allow_sum_func=1; + thd->lex->allow_sum_func|= 1 << select_lex_arg->nest_level; select_lex->having_fix_field= 1; bool having_fix_rc= (!having->fixed && - (having->fix_fields(thd, tables_list, &having) || + (having->fix_fields(thd, &having) || having->check_cols(1))); select_lex->having_fix_field= 0; if (having_fix_rc || thd->net.report_error) DBUG_RETURN(-1); /* purecov: inspected */ + thd->lex->allow_sum_func= save_allow_sum_func; } - // Is it subselect + if (!thd->lex->view_prepare_mode) { Item_subselect *subselect; + /* Is it subselect? */ if ((subselect= select_lex->master_unit()->item)) { Item_subselect::trans_res res; if ((res= subselect->select_transformer(this)) != Item_subselect::RES_OK) + { + select_lex->fix_prepare_information(thd, &conds, &having); DBUG_RETURN((res == Item_subselect::RES_ERROR)); + } + } + } + + select_lex->fix_prepare_information(thd, &conds, &having); + + if (order) + { + ORDER *ord; + for (ord= order; ord; ord= ord->next) + { + Item *item= *ord->item; + if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) + item->split_sum_func(thd, ref_pointer_array, all_fields); } } if (having && having->with_sum_func) - having->split_sum_func2(thd, ref_pointer_array, all_fields, &having); + having->split_sum_func2(thd, ref_pointer_array, all_fields, + &having, TRUE); + if (select_lex->inner_refs_list.elements && + fix_inner_refs(thd, all_fields, select_lex, ref_pointer_array)) + DBUG_RETURN(-1); + + if (select_lex->inner_sum_func_list) + { + Item_sum *end=select_lex->inner_sum_func_list; + Item_sum *item_sum= end; + do + { + item_sum= item_sum->next; + item_sum->split_sum_func2(thd, ref_pointer_array, + all_fields, item_sum->ref_by, FALSE); + } while (item_sum != end); + } if (setup_ftfuncs(select_lex)) /* should be after having->fix_fields */ DBUG_RETURN(-1); @@ -333,13 +519,11 @@ JOIN::prepare(Item ***rref_pointer_array, } if (flag == 3) { - my_error(ER_MIX_OF_GROUP_FUNC_AND_FIELDS,MYF(0)); + my_message(ER_MIX_OF_GROUP_FUNC_AND_FIELDS, + ER(ER_MIX_OF_GROUP_FUNC_AND_FIELDS), MYF(0)); DBUG_RETURN(-1); } } - TABLE_LIST *table_ptr; - for (table_ptr= tables_list ; table_ptr ; table_ptr= table_ptr->next) - tables++; } { /* Caclulate the number of groups */ @@ -353,43 +537,40 @@ JOIN::prepare(Item ***rref_pointer_array, goto err; /* purecov: inspected */ if (procedure) { - if (setup_new_fields(thd, tables_list, fields_list, all_fields, + if (setup_new_fields(thd, fields_list, all_fields, procedure->param_fields)) goto err; /* purecov: inspected */ if (procedure->group) { if (!test_if_subpart(procedure->group,group_list)) { /* purecov: inspected */ - my_message(0,"Can't handle procedures with differents groups yet", - MYF(0)); /* purecov: inspected */ + my_message(ER_DIFF_GROUPS_PROC, ER(ER_DIFF_GROUPS_PROC), + MYF(0)); /* purecov: inspected */ goto err; /* purecov: inspected */ } } #ifdef NOT_NEEDED else if (!group_list && procedure->flags & PROC_GROUP) { - my_message(0,"Select must have a group with this procedure",MYF(0)); + my_message(ER_NO_GROUP_FOR_PROC, MYF(0)); goto err; } #endif if (order && (procedure->flags & PROC_NO_SORT)) { /* purecov: inspected */ - my_message(0,"Can't use order with this procedure",MYF(0)); /* purecov: inspected */ + my_message(ER_ORDER_WITH_PROC, ER(ER_ORDER_WITH_PROC), + MYF(0)); /* purecov: inspected */ goto err; /* purecov: inspected */ } } + if (!procedure && result && result->prepare(fields_list, unit_arg)) + goto err; /* purecov: inspected */ + /* Init join struct */ count_field_types(&tmp_table_param, all_fields, 0); ref_pointer_array_size= all_fields.elements*sizeof(Item*); this->group= group_list != 0; - row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR : - unit_arg->select_limit_cnt); - /* select_limit is used to decide if we are likely to scan the whole table */ - select_limit= unit_arg->select_limit_cnt; - if (having || (select_options & OPTION_FOUND_ROWS)) - select_limit= HA_POS_ERROR; - do_send_rows = (unit_arg->select_limit_cnt) ? 1 : 0; unit= unit_arg; #ifdef RESTRICTED_GROUP @@ -399,18 +580,6 @@ JOIN::prepare(Item ***rref_pointer_array, goto err; } #endif - /* - We must not yet prepare the result table if it is the same as one of the - source tables (INSERT SELECT). This is checked in mysql_execute_command() - and OPTION_BUFFER_RESULT is added to the select_options. A temporary - table is then used to hold the result. The preparation may disable - indexes on the result table, which may be used during the select, if it - is the same table (Bug #6034). Do the preparation after the select phase. - */ - if (! procedure && ! test(select_options & OPTION_BUFFER_RESULT) && - result && result->prepare(fields_list, unit_arg)) - goto err; /* purecov: inspected */ - if (select_lex->olap == ROLLUP_TYPE && rollup_init()) goto err; if (alloc_func_list()) @@ -424,41 +593,87 @@ err: DBUG_RETURN(-1); /* purecov: inspected */ } + /* - test if it is known for optimisation IN subquery + Remove the predicates pushed down into the subquery - SYNOPSYS - JOIN::test_in_subselect - where - pointer for variable in which conditions should be - stored if subquery is known + SYNOPSIS + JOIN::remove_subq_pushed_predicates() + where IN Must be NULL + OUT The remaining WHERE condition, or NULL - RETURN - 1 - known - 0 - unknown + DESCRIPTION + Given that this join will be executed using (unique|index)_subquery, + without "checking NULL", remove the predicates that were pushed down + into the subquery. + + We can remove the equalities that will be guaranteed to be true by the + fact that subquery engine will be using index lookup. + + If the subquery compares scalar values, we can remove the condition that + was wrapped into trig_cond (it will be checked when needed by the subquery + engine) + + If the subquery compares row values, we need to keep the wrapped + equalities in the WHERE clause: when the left (outer) tuple has both NULL + and non-NULL values, we'll do a full table scan and will rely on the + equalities corresponding to non-NULL parts of left tuple to filter out + non-matching records. */ -bool JOIN::test_in_subselect(Item **where) +void JOIN::remove_subq_pushed_predicates(Item **where) { if (conds->type() == Item::FUNC_ITEM && ((Item_func *)this->conds)->functype() == Item_func::EQ_FUNC && ((Item_func *)conds)->arguments()[0]->type() == Item::REF_ITEM && ((Item_func *)conds)->arguments()[1]->type() == Item::FIELD_ITEM) { - join_tab->info= "Using index"; *where= 0; - return 1; + return; } if (conds->type() == Item::COND_ITEM && ((class Item_func *)this->conds)->functype() == Item_func::COND_AND_FUNC) { - if ((*where= remove_additional_cond(conds))) - join_tab->info= "Using index; Using where"; - else - join_tab->info= "Using index"; - return 1; + *where= remove_additional_cond(conds); + } +} + + +/* + Index lookup-based subquery: save some flags for EXPLAIN output + + SYNOPSIS + save_index_subquery_explain_info() + join_tab Subquery's join tab (there is only one as index lookup is + only used for subqueries that are single-table SELECTs) + where Subquery's WHERE clause + + DESCRIPTION + For index lookup-based subquery (i.e. one executed with + subselect_uniquesubquery_engine or subselect_indexsubquery_engine), + check its EXPLAIN output row should contain + "Using index" (TAB_INFO_FULL_SCAN_ON_NULL) + "Using Where" (TAB_INFO_USING_WHERE) + "Full scan on NULL key" (TAB_INFO_FULL_SCAN_ON_NULL) + and set appropriate flags in join_tab->packed_info. +*/ + +static void save_index_subquery_explain_info(JOIN_TAB *join_tab, Item* where) +{ + join_tab->packed_info= TAB_INFO_HAVE_VALUE; + if (join_tab->table->used_keys.is_set(join_tab->ref.key)) + join_tab->packed_info |= TAB_INFO_USING_INDEX; + if (where) + join_tab->packed_info |= TAB_INFO_USING_WHERE; + for (uint i = 0; i < join_tab->ref.key_parts; i++) + { + if (join_tab->ref.cond_guards[i]) + { + join_tab->packed_info |= TAB_INFO_FULL_SCAN_ON_NULL; + break; + } } - return 0; } @@ -468,6 +683,7 @@ bool JOIN::test_in_subselect(Item **where) 1 - error error code saved in field 'error' */ + int JOIN::optimize() { @@ -477,6 +693,16 @@ JOIN::optimize() DBUG_RETURN(0); optimized= 1; + if (thd->lex->orig_sql_command != SQLCOM_SHOW_STATUS) + thd->status_var.last_query_cost= 0.0; + + row_limit= ((select_distinct || order || group_list) ? HA_POS_ERROR : + unit->select_limit_cnt); + /* select_limit is used to decide if we are likely to scan the whole table */ + select_limit= unit->select_limit_cnt; + if (having || (select_options & OPTION_FOUND_ROWS)) + select_limit= HA_POS_ERROR; + do_send_rows = (unit->select_limit_cnt) ? 1 : 0; // Ignore errors of execution if option IGNORE present if (thd->lex->ignore) thd->lex->current_select->no_error= 1; @@ -491,15 +717,44 @@ JOIN::optimize() } else if ((conds=new Item_cond_and(conds,having))) { - conds->fix_fields(thd, tables_list, &conds); + /* + Item_cond_and can't be fixed after creation, so we do not check + conds->fixed + */ + conds->fix_fields(thd, &conds); conds->change_ref_to_fields(thd, tables_list); conds->top_level_item(); having= 0; } } #endif + SELECT_LEX *sel= thd->lex->current_select; + if (sel->first_cond_optimization) + { + /* + The following code will allocate the new items in a permanent + MEMROOT for prepared statements and stored procedures. + */ - conds= optimize_cond(thd, conds, &cond_value); + Query_arena *arena= thd->stmt_arena, backup; + if (arena->is_conventional()) + arena= 0; // For easier test + else + thd->set_n_backup_active_arena(arena, &backup); + + sel->first_cond_optimization= 0; + + /* Convert all outer joins to inner joins if possible */ + conds= simplify_joins(this, join_list, conds, TRUE); + build_bitmap_for_nested_joins(join_list, 0); + + sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0; + + if (arena) + thd->restore_active_arena(arena, &backup); + } + + conds= optimize_cond(this, conds, join_list, &cond_value); if (thd->net.report_error) { error= 1; @@ -508,19 +763,24 @@ JOIN::optimize() } { - Item::cond_result having_value; - having= optimize_cond(thd, having, &having_value); + having= optimize_cond(this, having, join_list, &having_value); if (thd->net.report_error) { error= 1; DBUG_PRINT("error",("Error from optimize_cond")); DBUG_RETURN(1); } + if (select_lex->where) + select_lex->cond_value= cond_value; + if (select_lex->having) + select_lex->having_value= having_value; if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE || - (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) + (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) { /* Impossible cond */ - zero_result_cause= having_value == Item::COND_FALSE ? + DBUG_PRINT("info", (having_value == Item::COND_FALSE ? + "Impossible HAVING" : "Impossible WHERE")); + zero_result_cause= having_value == Item::COND_FALSE ? "Impossible HAVING" : "Impossible WHERE"; error= 0; DBUG_RETURN(0); @@ -537,10 +797,11 @@ JOIN::optimize() or 1 if all items were resolved, or 0, or an error number HA_ERR_... */ - if ((res=opt_sum_query(tables_list, all_fields, conds))) + if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) { if (res == HA_ERR_KEY_NOT_FOUND) { + DBUG_PRINT("info",("No matching min/max row")); zero_result_cause= "No matching min/max row"; error=0; DBUG_RETURN(0); @@ -549,8 +810,10 @@ JOIN::optimize() { thd->fatal_error(); error= res; + DBUG_PRINT("error",("Error from opt_sum_query")); DBUG_RETURN(1); } + DBUG_PRINT("info",("Select tables optimized away")); zero_result_cause= "Select tables optimized away"; tables_list= 0; // All tables resolved /* @@ -561,8 +824,9 @@ JOIN::optimize() Notice that make_cond_for_table() will always succeed to remove all computed conditions, because opt_sum_query() is applicable only to conjunctions. + Preserve conditions for EXPLAIN. */ - if (conds) + if (conds && !(thd->lex->describe & DESCRIBE_EXTENDED)) { COND *table_independent_conds= make_cond_for_table(conds, PSEUDO_TABLE_BITS, 0); @@ -575,15 +839,16 @@ JOIN::optimize() } if (!tables_list) { + DBUG_PRINT("info",("No tables")); error= 0; DBUG_RETURN(0); } error= -1; // Error is sent to client - sort_by_table= get_sort_by_table(order, group_list, tables_list); + sort_by_table= get_sort_by_table(order, group_list, select_lex->leaf_tables); /* Calculate how to do the join */ thd->proc_info= "statistics"; - if (make_join_statistics(this, tables_list, conds, &keyuse) || + if (make_join_statistics(this, select_lex->leaf_tables, conds, &keyuse) || thd->is_fatal_error) { DBUG_PRINT("error",("Error: make_join_statistics() failed")); @@ -620,20 +885,51 @@ JOIN::optimize() if (const_tables && !thd->locked_tables && !(select_options & SELECT_NO_UNLOCK)) mysql_unlock_some_tables(thd, table, const_tables); - if (!conds && outer_join) { /* Handle the case where we have an OUTER JOIN without a WHERE */ conds=new Item_int((longlong) 1,1); // Always true } - select=make_select(*table, const_table_map, - const_table_map, conds, &error); + select= make_select(*table, const_table_map, + const_table_map, conds, 1, &error); if (error) { /* purecov: inspected */ error= -1; /* purecov: inspected */ DBUG_PRINT("error",("Error: make_select() failed")); DBUG_RETURN(1); } + + reset_nj_counters(join_list); + make_outerjoin_info(this); + + /* + Among the equal fields belonging to the same multiple equality + choose the one that is to be retrieved first and substitute + all references to these in where condition for a reference for + the selected field. + */ + if (conds) + { + conds= substitute_for_best_equal_field(conds, cond_equal, map2table); + conds->update_used_tables(); + DBUG_EXECUTE("where", print_where(conds, "after substitute_best_equal");); + } + + /* + Permorm the the optimization on fields evaluation mentioned above + for all on expressions. + */ + for (JOIN_TAB *tab= join_tab + const_tables; tab < join_tab + tables ; tab++) + { + if (*tab->on_expr_ref) + { + *tab->on_expr_ref= substitute_for_best_equal_field(*tab->on_expr_ref, + tab->cond_equal, + map2table); + (*tab->on_expr_ref)->update_used_tables(); + } + } + if (make_join_select(this, select, conds)) { zero_result_cause= @@ -663,17 +959,22 @@ JOIN::optimize() } /* Check if we can optimize away GROUP BY/DISTINCT. - We can do that if there are no aggregate functions and the + We can do that if there are no aggregate functions, the fields in DISTINCT clause (if present) and/or columns in GROUP BY (if present) contain direct references to all key parts of - an unique index (in whatever order). + an unique index (in whatever order) and if the key parts of the + unique index cannot contain NULLs. Note that the unique keys for DISTINCT and GROUP BY should not be the same (as long as they are unique). The FROM clause must contain a single non-constant table. */ if (tables - const_tables == 1 && (group_list || select_distinct) && - !tmp_table_param.sum_func_count) + !tmp_table_param.sum_func_count && + (!join_tab[const_tables].select || + !join_tab[const_tables].select->quick || + join_tab[const_tables].select->quick->get_type() != + QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) { if (group_list && list_contains_unique_index(join_tab[const_tables].table, @@ -840,53 +1141,47 @@ JOIN::optimize() if (join_tab[0].type == JT_EQ_REF && join_tab[0].ref.items[0]->name == in_left_expr_name) { - if (test_in_subselect(&where)) - { - join_tab[0].type= JT_UNIQUE_SUBQUERY; - error= 0; - DBUG_RETURN(unit->item-> - change_engine(new - subselect_uniquesubquery_engine(thd, - join_tab, - unit->item, - where))); - } + remove_subq_pushed_predicates(&where); + save_index_subquery_explain_info(join_tab, where); + join_tab[0].type= JT_UNIQUE_SUBQUERY; + error= 0; + DBUG_RETURN(unit->item-> + change_engine(new + subselect_uniquesubquery_engine(thd, + join_tab, + unit->item, + where))); } else if (join_tab[0].type == JT_REF && join_tab[0].ref.items[0]->name == in_left_expr_name) { - if (test_in_subselect(&where)) - { - join_tab[0].type= JT_INDEX_SUBQUERY; - error= 0; - DBUG_RETURN(unit->item-> - change_engine(new - subselect_indexsubquery_engine(thd, - join_tab, - unit->item, - where, - 0))); - } + remove_subq_pushed_predicates(&where); + save_index_subquery_explain_info(join_tab, where); + join_tab[0].type= JT_INDEX_SUBQUERY; + error= 0; + DBUG_RETURN(unit->item-> + change_engine(new + subselect_indexsubquery_engine(thd, + join_tab, + unit->item, + where, + NULL, + 0))); } } else if (join_tab[0].type == JT_REF_OR_NULL && join_tab[0].ref.items[0]->name == in_left_expr_name && - having->type() == Item::FUNC_ITEM && - ((Item_func *) having)->functype() == - Item_func::ISNOTNULLTEST_FUNC) + having->name == in_having_cond) { join_tab[0].type= JT_INDEX_SUBQUERY; error= 0; - - if ((conds= remove_additional_cond(conds))) - join_tab->info= "Using index; Using where"; - else - join_tab->info= "Using index"; - + conds= remove_additional_cond(conds); + save_index_subquery_explain_info(join_tab, conds); DBUG_RETURN(unit->item-> change_engine(new subselect_indexsubquery_engine(thd, join_tab, unit->item, conds, + having, 1))); } @@ -911,32 +1206,52 @@ JOIN::optimize() #endif DBUG_EXECUTE("info",TEST_join(this);); - /* - Because filesort always does a full table scan or a quick range scan - we must add the removed reference to the select for the table. - We only need to do this when we have a simple_order or simple_group - as in other cases the join is done before the sort. - */ - if (const_tables != tables && - (order || group_list) && - join_tab[const_tables].type != JT_ALL && - join_tab[const_tables].type != JT_FT && - join_tab[const_tables].type != JT_REF_OR_NULL && - (order && simple_order || group_list && simple_group)) - { - if (add_ref_to_table_cond(thd,&join_tab[const_tables])) - DBUG_RETURN(1); - } - if (!(select_options & SELECT_BIG_RESULT) && - ((group_list && const_tables != tables && - (!simple_group || - !test_if_skip_sort_order(&join_tab[const_tables], group_list, - unit->select_limit_cnt, 0))) || - select_distinct) && - tmp_table_param.quick_group && !procedure) + if (const_tables != tables) { - need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort + /* + Because filesort always does a full table scan or a quick range scan + we must add the removed reference to the select for the table. + We only need to do this when we have a simple_order or simple_group + as in other cases the join is done before the sort. + */ + if ((order || group_list) && + join_tab[const_tables].type != JT_ALL && + join_tab[const_tables].type != JT_FT && + join_tab[const_tables].type != JT_REF_OR_NULL && + (order && simple_order || group_list && simple_group)) + { + if (add_ref_to_table_cond(thd,&join_tab[const_tables])) + DBUG_RETURN(1); + } + + if (!(select_options & SELECT_BIG_RESULT) && + ((group_list && + (!simple_group || + !test_if_skip_sort_order(&join_tab[const_tables], group_list, + unit->select_limit_cnt, 0))) || + select_distinct) && + tmp_table_param.quick_group && !procedure) + { + need_tmp=1; simple_order=simple_group=0; // Force tmp table without sort + } + if (order) + { + /* + Force using of tmp table if sorting by a SP or UDF function due to + their expensive and probably non-deterministic nature. + */ + for (ORDER *tmp_order= order; tmp_order ; tmp_order=tmp_order->next) + { + Item *item= *tmp_order->item; + if (item->walk(&Item::is_expensive_processor,(byte*)0)) + { + /* Force tmp table without sort */ + need_tmp=1; simple_order=simple_group=0; + break; + } + } + } } tmp_having= having; @@ -947,6 +1262,20 @@ JOIN::optimize() } having= 0; + /* + The loose index scan access method guarantees that all grouping or + duplicate row elimination (for distinct) is already performed + during data retrieval, and that all MIN/MAX functions are already + computed for each group. Thus all MIN/MAX functions should be + treated as regular functions, and there is no need to perform + grouping in the main execution loop. + Notice that currently loose index scan is applicable only for + single table queries, thus it is sufficient to test only the first + join_tab element of the plan for its access method. + */ + if (join_tab->is_using_loose_index_scan()) + tmp_table_param.precomputed_group_by= TRUE; + /* Create a tmp table if distinct or if the sort is too complicated */ if (need_tmp) { @@ -1004,13 +1333,15 @@ JOIN::optimize() if (create_sort_index(thd, this, group_list, HA_POS_ERROR, HA_POS_ERROR) || alloc_group_fields(this, group_list) || - make_sum_func_list(all_fields, fields_list, 1)) + make_sum_func_list(all_fields, fields_list, 1) || + setup_sum_funcs(thd, sum_funcs)) DBUG_RETURN(1); group_list=0; } else { - if (make_sum_func_list(all_fields, fields_list, 0)) + if (make_sum_func_list(all_fields, fields_list, 0) || + setup_sum_funcs(thd, sum_funcs)) DBUG_RETURN(1); if (!group_list && ! exec_tmp_table1->distinct && order && simple_order) { @@ -1048,9 +1379,10 @@ JOIN::optimize() order=0; } } - - if (thd->lex->subqueries) + + if (select_lex->uncacheable && !is_top_level_join()) { + /* If this join belongs to an uncacheable subquery */ if (!(tmp_join= (JOIN*)thd->alloc(sizeof(JOIN)))) DBUG_RETURN(-1); error= 0; // Ensure that tmp_join.error= 0 @@ -1076,18 +1408,11 @@ int JOIN::reinit() { DBUG_ENTER("JOIN::reinit"); - /* TODO move to unit reinit */ - unit->offset_limit_cnt =select_lex->offset_limit; - unit->select_limit_cnt =select_lex->select_limit+select_lex->offset_limit; - if (unit->select_limit_cnt < select_lex->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR) - select_lex->options&= ~OPTION_FOUND_ROWS; - - if (!optimized && setup_tables(tables_list)) - DBUG_RETURN(1); - - /* Reset of sum functions */ + + unit->offset_limit_cnt= (ha_rows)(select_lex->offset_limit ? + select_lex->offset_limit->val_uint() : + ULL(0)); + first_record= 0; if (exec_tmp_table1) @@ -1095,14 +1420,14 @@ JOIN::reinit() exec_tmp_table1->file->extra(HA_EXTRA_RESET_STATE); exec_tmp_table1->file->delete_all_rows(); free_io_cache(exec_tmp_table1); - filesort_free_buffers(exec_tmp_table1); + filesort_free_buffers(exec_tmp_table1,0); } if (exec_tmp_table2) { exec_tmp_table2->file->extra(HA_EXTRA_RESET_STATE); exec_tmp_table2->file->delete_all_rows(); free_io_cache(exec_tmp_table2); - filesort_free_buffers(exec_tmp_table2); + filesort_free_buffers(exec_tmp_table2,0); } if (items0) set_items_ref_array(items0); @@ -1113,6 +1438,7 @@ JOIN::reinit() if (tmp_join) restore_tmp(); + /* Reset of sum functions */ if (sum_funcs) { Item_sum *func, **func_ptr= sum_funcs; @@ -1146,7 +1472,7 @@ JOIN::exec() List<Item> *columns_list= &fields_list; int tmp_error; DBUG_ENTER("JOIN::exec"); - + error= 0; if (procedure) { @@ -1159,13 +1485,7 @@ JOIN::exec() } columns_list= &procedure_fields_list; } - else if (test(select_options & OPTION_BUFFER_RESULT) && - result && result->prepare(fields_list, unit)) - { - error= 1; - thd->limit_found_rows= thd->examined_row_count= 0; - DBUG_VOID_RETURN; - } + (void) result->prepare2(); // Currently, this cannot fail. if (!tables_list && (tables || !select_lex->with_sum_func)) { // Only test of functions @@ -1174,7 +1494,8 @@ JOIN::exec() (zero_result_cause?zero_result_cause:"No tables used")); else { - result->send_fields(*columns_list, 1); + result->send_fields(*columns_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); /* We have to test for 'conds' here as the WHERE may not be constant even if we don't have any tables for prepared statements or if @@ -1216,12 +1537,12 @@ JOIN::exec() if (zero_result_cause) { - (void) return_zero_rows(this, result, tables_list, *columns_list, + (void) return_zero_rows(this, result, select_lex->leaf_tables, + *columns_list, send_row_on_empty_set(), select_options, zero_result_cause, - having, procedure, - unit); + having); DBUG_VOID_RETURN; } @@ -1242,7 +1563,8 @@ JOIN::exec() simple_order= simple_group; skip_sort_order= 0; } - if (order && + if (order && + (order != group_list || !(select_options & SELECT_BIG_RESULT)) && (const_tables == tables || ((simple_order || skip_sort_order) && test_if_skip_sort_order(&join_tab[const_tables], order, @@ -1267,16 +1589,31 @@ JOIN::exec() */ curr_join->examined_rows= 0; + if ((curr_join->select_lex->options & OPTION_SCHEMA_TABLE) && + !thd->lex->describe && + get_schema_tables_result(curr_join, PROCESSED_BY_JOIN_EXEC)) + { + DBUG_VOID_RETURN; + } + /* Create a tmp table if distinct or if the sort is too complicated */ if (need_tmp) { if (tmp_join) + { + /* + We are in a non cacheable sub query. Get the saved join structure + after optimization. + (curr_join may have been modified during last exection and we need + to reset it) + */ curr_join= tmp_join; + } curr_tmp_table= exec_tmp_table1; /* Copy data to the temporary table */ thd->proc_info= "Copying to tmp table"; - + DBUG_PRINT("info", ("%s", thd->proc_info)); if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) { error= tmp_error; @@ -1357,7 +1694,7 @@ JOIN::exec() DBUG_PRINT("info",("Creating group table")); /* Free first data from old join */ - curr_join->join_free(0); + curr_join->join_free(); if (make_simple_join(curr_join, curr_tmp_table)) DBUG_VOID_RETURN; calc_group_buffer(curr_join, group_list); @@ -1374,6 +1711,15 @@ JOIN::exec() else { /* group data to new table */ + + /* + If the access method is loose index scan then all MIN/MAX + functions are precomputed, and should be treated as regular + functions. See extended comment in JOIN::exec. + */ + if (curr_join->join_tab->is_using_loose_index_scan()) + curr_join->tmp_table_param.precomputed_group_by= TRUE; + if (!(curr_tmp_table= exec_tmp_table2= create_tmp_table(thd, &curr_join->tmp_table_param, @@ -1400,9 +1746,11 @@ JOIN::exec() { DBUG_VOID_RETURN; } + sortorder= curr_join->sortorder; } thd->proc_info="Copying to group table"; + DBUG_PRINT("info", ("%s", thd->proc_info)); tmp_error= -1; if (curr_join != this) { @@ -1419,17 +1767,18 @@ JOIN::exec() } } if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1)) + 1, TRUE)) DBUG_VOID_RETURN; curr_join->group_list= 0; - if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, + if (setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || + (tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) { error= tmp_error; DBUG_VOID_RETURN; } end_read_record(&curr_join->join_tab->read_record); - curr_join->const_tables= curr_join->tables; // Mark free for join_free() + curr_join->const_tables= curr_join->tables; // Mark free for cleanup() curr_join->join_tab[0].table= 0; // Table is freed // No sum funcs anymore @@ -1453,7 +1802,7 @@ JOIN::exec() if (curr_tmp_table->distinct) curr_join->select_distinct=0; /* Each row is unique */ - curr_join->join_free(0); /* Free quick selects */ + curr_join->join_free(); /* Free quick selects */ if (curr_join->select_distinct && ! curr_join->group_list) { thd->proc_info="Removing duplicates"; @@ -1509,7 +1858,9 @@ JOIN::exec() curr_join->set_items_ref_array(items3); if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1) || thd->is_fatal_error) + 1, TRUE) || + setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || + thd->is_fatal_error) DBUG_VOID_RETURN; } if (curr_join->group_list || curr_join->order) @@ -1575,11 +1926,9 @@ JOIN::exec() /* table->keyuse is set in the case there was an original WHERE clause on the table that was optimized away. - table->on_expr tells us that it was a LEFT JOIN and there will be - at least one row generated from the table. */ if (curr_table->select_cond || - (curr_table->keyuse && !curr_table->on_expr)) + (curr_table->keyuse && !curr_table->first_inner)) { /* We have to sort all rows */ curr_join->select_limit= HA_POS_ERROR; @@ -1607,13 +1956,49 @@ JOIN::exec() (select_options & OPTION_FOUND_ROWS ? HA_POS_ERROR : unit->select_limit_cnt))) DBUG_VOID_RETURN; + sortorder= curr_join->sortorder; } } + /* XXX: When can we have here thd->net.report_error not zero? */ + if (thd->net.report_error) + { + error= thd->net.report_error; + DBUG_VOID_RETURN; + } curr_join->having= curr_join->tmp_having; - thd->proc_info="Sending data"; - error= thd->net.report_error ? -1 : - do_select(curr_join, curr_fields_list, NULL, procedure); - thd->limit_found_rows= curr_join->send_records; + curr_join->fields= curr_fields_list; + curr_join->procedure= procedure; + + if (is_top_level_join() && thd->cursor && tables != const_tables) + { + /* + We are here if this is JOIN::exec for the last select of the main unit + and the client requested to open a cursor. + We check that not all tables are constant because this case is not + handled by do_select() separately, and this case is not implemented + for cursors yet. + */ + DBUG_ASSERT(error == 0); + /* + curr_join is used only for reusable joins - that is, + to perform SELECT for each outer row (like in subselects). + This join is main, so we know for sure that curr_join == join. + */ + DBUG_ASSERT(curr_join == this); + /* Open cursor for the last join sweep */ + error= thd->cursor->open(this); + } + else + { + thd->proc_info="Sending data"; + DBUG_PRINT("info", ("%s", thd->proc_info)); + result->send_fields((procedure ? curr_join->procedure_fields_list : + *curr_fields_list), + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); + error= do_select(curr_join, curr_fields_list, NULL, procedure); + thd->limit_found_rows= curr_join->send_records; + } + /* Accumulate the counts from all join iterations of all join parts. */ thd->examined_row_count+= curr_join->examined_rows; DBUG_PRINT("counts", ("thd->examined_row_count: %lu", @@ -1627,9 +2012,9 @@ JOIN::exec() */ int -JOIN::cleanup() +JOIN::destroy() { - DBUG_ENTER("JOIN::cleanup"); + DBUG_ENTER("JOIN::destroy"); select_lex->join= 0; if (tmp_join) @@ -1644,11 +2029,11 @@ JOIN::cleanup() } tmp_join->tmp_join= 0; tmp_table_param.copy_field=0; - DBUG_RETURN(tmp_join->cleanup()); + DBUG_RETURN(tmp_join->destroy()); } + cond_equal= 0; - lock=0; // It's faster to unlock later - join_free(1); + cleanup(1); if (exec_tmp_table1) free_tmp_table(thd, exec_tmp_table1); if (exec_tmp_table2) @@ -1656,33 +2041,75 @@ JOIN::cleanup() delete select; delete_dynamic(&keyuse); delete procedure; - for (SELECT_LEX_UNIT *lex_unit= select_lex->first_inner_unit(); - lex_unit != 0; - lex_unit= lex_unit->next_unit()) - { - error|= lex_unit->cleanup(); - } DBUG_RETURN(error); } +/* + An entry point to single-unit select (a select without UNION). -int + SYNOPSIS + mysql_select() + + thd thread handler + rref_pointer_array a reference to ref_pointer_array of + the top-level select_lex for this query + tables list of all tables used in this query. + The tables have been pre-opened. + wild_num number of wildcards used in the top level + select of this query. + For example statement + SELECT *, t1.*, catalog.t2.* FROM t0, t1, t2; + has 3 wildcards. + fields list of items in SELECT list of the top-level + select + e.g. SELECT a, b, c FROM t1 will have Item_field + for a, b and c in this list. + conds top level item of an expression representing + WHERE clause of the top level select + og_num total number of ORDER BY and GROUP BY clauses + arguments + order linked list of ORDER BY agruments + group linked list of GROUP BY arguments + having top level item of HAVING expression + proc_param list of PROCEDUREs + select_options select options (BIG_RESULT, etc) + result an instance of result set handling class. + This object is responsible for send result + set rows to the client or inserting them + into a table. + select_lex the only SELECT_LEX of this query + unit top-level UNIT of this query + UNIT is an artificial object created by the parser + for every SELECT clause. + e.g. SELECT * FROM t1 WHERE a1 IN (SELECT * FROM t2) + has 2 unions. + + RETURN VALUE + FALSE success + TRUE an error +*/ + +bool mysql_select(THD *thd, Item ***rref_pointer_array, TABLE_LIST *tables, uint wild_num, List<Item> &fields, COND *conds, uint og_num, ORDER *order, ORDER *group, - Item *having, ORDER *proc_param, ulong select_options, + Item *having, ORDER *proc_param, ulonglong select_options, select_result *result, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) { - int err; + bool err; bool free_join= 1; DBUG_ENTER("mysql_select"); + select_lex->context.resolve_in_select_list= TRUE; JOIN *join; if (select_lex->join != 0) { join= select_lex->join; - // is it single SELECT in derived table, called in derived table creation + /* + is it single SELECT in derived table, called in derived table + creation + */ if (select_lex->linkage != DERIVED_TABLE_TYPE || (select_options & SELECT_DESCRIBE)) { @@ -1691,14 +2118,14 @@ mysql_select(THD *thd, Item ***rref_pointer_array, //here is EXPLAIN of subselect or derived table if (join->change_result(result)) { - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } else { - if (join->prepare(rref_pointer_array, tables, wild_num, - conds, og_num, order, group, having, proc_param, - select_lex, unit)) + if (err= join->prepare(rref_pointer_array, tables, wild_num, + conds, og_num, order, group, having, proc_param, + select_lex, unit)) { goto err; } @@ -1710,12 +2137,12 @@ mysql_select(THD *thd, Item ***rref_pointer_array, else { if (!(join= new JOIN(thd, fields, select_options, result))) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); thd->proc_info="init"; thd->used_tables=0; // Updated by setup_fields - if (join->prepare(rref_pointer_array, tables, wild_num, - conds, og_num, order, group, having, proc_param, - select_lex, unit)) + if (err= join->prepare(rref_pointer_array, tables, wild_num, + conds, og_num, order, group, having, proc_param, + select_lex, unit)) { goto err; } @@ -1737,6 +2164,16 @@ mysql_select(THD *thd, Item ***rref_pointer_array, join->exec(); + if (thd->cursor && thd->cursor->is_open()) + { + /* + A cursor was opened for the last sweep in exec(). + We are here only if this is mysql_select for top-level SELECT_LEX_UNIT + and there were no error. + */ + free_join= 0; + } + if (thd->lex->describe & DESCRIBE_EXTENDED) { select_lex->where= join->conds_history; @@ -1747,11 +2184,8 @@ err: if (free_join) { thd->proc_info="end"; - err= join->cleanup(); - if (thd->net.report_error) - err= -1; - delete join; - DBUG_RETURN(err); + err|= select_lex->cleanup(); + DBUG_RETURN(err || thd->net.report_error); } DBUG_RETURN(join->error); } @@ -1784,6 +2218,19 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, DBUG_RETURN(HA_POS_ERROR); /* This shouldn't happend */ } +/* + This structure is used to collect info on potentially sargable + predicates in order to check whether they become sargable after + reading const tables. + We form a bitmap of indexes that can be used for sargable predicates. + Only such indexes are involved in range analysis. +*/ +typedef struct st_sargable_param +{ + Field *field; /* field against which to check sargability */ + Item **arg_value; /* values of potential keys for lookups */ + uint num_values; /* number of values in the above array */ +} SARGABLE_PARAM; /* Calculate the best possible join and initialize the join structure @@ -1794,10 +2241,11 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, */ static bool -make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, +make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds, DYNAMIC_ARRAY *keyuse_array) { int error; + TABLE *table; uint i,table_count,const_count,key; table_map found_const_table_map, all_table_map, found_ref, refs; key_map const_ref, eq_part; @@ -1805,6 +2253,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, JOIN_TAB *stat,*stat_end,*s,**stat_ref; KEYUSE *keyuse,*start_keyuse; table_map outer_join=0; + SARGABLE_PARAM *sargables= 0; JOIN_TAB *stat_vector[MAX_TABLES+1]; DBUG_ENTER("make_join_statistics"); @@ -1821,15 +2270,18 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, found_const_table_map= all_table_map=0; const_count=0; - for (s=stat,i=0 ; tables ; s++,tables=tables->next,i++) + for (s= stat, i= 0; + tables; + s++, tables= tables->next_leaf, i++) { - TABLE *table; + TABLE_LIST *embedding= tables->embedding; stat_vector[i]=s; s->keys.init(); s->const_keys.init(); s->checked_keys.init(); s->needed_reg.init(); table_vector[i]=s->table=table=tables->table; + table->pos_in_table_list= tables; error= table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); if(error) { @@ -1839,34 +2291,49 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, table->quick_keys.clear_all(); table->reginfo.join_tab=s; table->reginfo.not_exists_optimize=0; - bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->keys); + bzero((char*) table->const_key_parts, sizeof(key_part_map)*table->s->keys); all_table_map|= table->map; s->join=join; s->info=0; // For describe - if ((s->on_expr=tables->on_expr)) + + s->dependent= tables->dep_tables; + s->key_dependent= 0; + if (tables->schema_table) + table->file->records= 2; + + s->on_expr_ref= &tables->on_expr; + if (*s->on_expr_ref) { - /* Left join */ - if (!table->file->records) + /* s is the only inner table of an outer join */ + if (!table->file->records && !embedding) { // Empty table - s->key_dependent=s->dependent=0; // Ignore LEFT JOIN depend. + s->dependent= 0; // Ignore LEFT JOIN depend. set_position(join,const_count++,s,(KEYUSE*) 0); continue; } - s->key_dependent=s->dependent= - s->on_expr->used_tables() & ~(table->map); - if (table->outer_join & JOIN_TYPE_LEFT) - s->dependent|=stat_vector[i-1]->dependent | table_vector[i-1]->map; - if (tables->outer_join & JOIN_TYPE_RIGHT) - s->dependent|=tables->next->table->map; - outer_join|=table->map; + outer_join|= table->map; + s->embedding_map= 0; + for (;embedding; embedding= embedding->embedding) + s->embedding_map|= embedding->nested_join->nj_map; continue; } - if (tables->straight) // We don't have to move this - s->dependent= table_vector[i-1]->map | stat_vector[i-1]->dependent; - else - s->dependent=(table_map) 0; - s->key_dependent=(table_map) 0; - if ((table->system || table->file->records <= 1) && ! s->dependent && + if (embedding) + { + /* s belongs to a nested join, maybe to several embedded joins */ + s->embedding_map= 0; + do + { + NESTED_JOIN *nested_join= embedding->nested_join; + s->embedding_map|=nested_join->nj_map; + s->dependent|= embedding->dep_tables; + embedding= embedding->embedding; + outer_join|= nested_join->used_tables; + } + while (embedding); + continue; + } + + if ((table->s->system || table->file->records <= 1) && ! s->dependent && !(table->file->table_flags() & HA_NOT_EXACT_COUNT) && !table->fulltext_searched) { @@ -1876,45 +2343,44 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, stat_vector[i]=0; join->outer_join=outer_join; - /* - If outer join: Re-arrange tables in stat_vector so that outer join - tables are after all tables it is dependent of. - For example: SELECT * from A LEFT JOIN B ON B.c=C.c, C WHERE A.C=C.C - Will shift table B after table C. - */ - if (outer_join) + if (join->outer_join) { - table_map used_tables=0L; - for (i=0 ; i < join->tables-1 ; i++) + /* + Build transitive closure for relation 'to be dependent on'. + This will speed up the plan search for many cases with outer joins, + as well as allow us to catch illegal cross references/ + Warshall's algorithm is used to build the transitive closure. + As we use bitmaps to represent the relation the complexity + of the algorithm is O((number of tables)^2). + */ + for (i= 0, s= stat ; i < table_count ; i++, s++) { - if (stat_vector[i]->dependent & ~used_tables) + for (uint j= 0 ; j < table_count ; j++) { - JOIN_TAB *save= stat_vector[i]; - uint j; - for (j=i+1; - j < join->tables && stat_vector[j]->dependent & ~used_tables; - j++) - { - JOIN_TAB *tmp=stat_vector[j]; // Move element up - stat_vector[j]=save; - save=tmp; - } - if (j == join->tables) - { - join->tables=0; // Don't use join->table - my_error(ER_WRONG_OUTER_JOIN,MYF(0)); - DBUG_RETURN(1); - } - stat_vector[i]=stat_vector[j]; - stat_vector[j]=save; + table= stat[j].table; + if (s->dependent & table->map) + s->dependent |= table->reginfo.join_tab->dependent; + } + if (s->dependent) + s->table->maybe_null= 1; + } + /* Catch illegal cross references for outer joins */ + for (i= 0, s= stat ; i < table_count ; i++, s++) + { + if (s->dependent & s->table->map) + { + join->tables=0; // Don't use join->table + my_message(ER_WRONG_OUTER_JOIN, ER(ER_WRONG_OUTER_JOIN), MYF(0)); + DBUG_RETURN(1); } - used_tables|= stat_vector[i]->table->map; + s->key_dependent= s->dependent; } } if (conds || outer_join) if (update_ref_and_keys(join->thd, keyuse_array, stat, join->tables, - conds, ~outer_join, join->select_lex)) + conds, join->cond_equal, + ~outer_join, join->select_lex, &sargables)) DBUG_RETURN(1); /* Read tables with 0 or 1 rows (system tables) */ @@ -1941,6 +2407,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, int ref_changed; do { + more_const_tables_found: ref_changed = 0; found_ref=0; @@ -1951,20 +2418,55 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, for (JOIN_TAB **pos=stat_vector+const_count ; (s= *pos) ; pos++) { - TABLE *table=s->table; + table=s->table; + + /* + If equi-join condition by a key is null rejecting and after a + substitution of a const table the key value happens to be null + then we can state that there are no matches for this equi-join. + */ + if ((keyuse= s->keyuse) && *s->on_expr_ref && !s->embedding_map) + { + /* + When performing an outer join operation if there are no matching rows + for the single row of the outer table all the inner tables are to be + null complemented and thus considered as constant tables. + Here we apply this consideration to the case of outer join operations + with a single inner table only because the case with nested tables + would require a more thorough analysis. + TODO. Apply single row substitution to null complemented inner tables + for nested outer join operations. + */ + while (keyuse->table == table) + { + if (!(keyuse->val->used_tables() & ~join->const_table_map) && + keyuse->val->is_null() && keyuse->null_rejecting) + { + s->type= JT_CONST; + mark_as_null_row(table); + found_const_table_map|= table->map; + join->const_table_map|= table->map; + set_position(join,const_count++,s,(KEYUSE*) 0); + goto more_const_tables_found; + } + keyuse++; + } + } + if (s->dependent) // If dependent on some table { // All dep. must be constants if (s->dependent & ~(found_const_table_map)) continue; if (table->file->records <= 1L && - !(table->file->table_flags() & HA_NOT_EXACT_COUNT)) + !(table->file->table_flags() & HA_NOT_EXACT_COUNT) && + !table->pos_in_table_list->embedding) { // system table int tmp= 0; s->type=JT_SYSTEM; join->const_table_map|=table->map; set_position(join,const_count++,s,(KEYUSE*) 0); - if ((tmp= join_read_const_table(s,join->positions+const_count-1))) + if ((tmp= join_read_const_table(s, join->positions+const_count-1))) { if (tmp > 0) DBUG_RETURN(1); // Fatal error @@ -2001,38 +2503,63 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, } while (keyuse->table == table && keyuse->key == key); if (eq_part.is_prefix(table->key_info[key].key_parts) && - ((table->key_info[key].flags & (HA_NOSAME | HA_END_SPACE_KEY)) == - HA_NOSAME) && - !table->fulltext_searched) + !table->fulltext_searched && + !table->pos_in_table_list->embedding) { - if (const_ref == eq_part) - { // Found everything for ref. - int tmp; - ref_changed = 1; - s->type= JT_CONST; - join->const_table_map|=table->map; - set_position(join,const_count++,s,start_keyuse); - if (create_ref_for_key(join, s, start_keyuse, - found_const_table_map)) - DBUG_RETURN(1); - if ((tmp=join_read_const_table(s, - join->positions+const_count-1))) - { - if (tmp > 0) - DBUG_RETURN(1); // Fatal error + if ((table->key_info[key].flags & (HA_NOSAME | HA_END_SPACE_KEY)) + == HA_NOSAME) + { + if (const_ref == eq_part) + { // Found everything for ref. + int tmp; + ref_changed = 1; + s->type= JT_CONST; + join->const_table_map|=table->map; + set_position(join,const_count++,s,start_keyuse); + if (create_ref_for_key(join, s, start_keyuse, + found_const_table_map)) + DBUG_RETURN(1); + if ((tmp=join_read_const_table(s, + join->positions+const_count-1))) + { + if (tmp > 0) + DBUG_RETURN(1); // Fatal error + } + else + found_const_table_map|= table->map; + break; } else - found_const_table_map|= table->map; - break; + found_ref|= refs; // Table is const if all refs are const } - else - found_ref|= refs; // Table is const if all refs are const - } + else if (const_ref == eq_part) + s->const_keys.set_bit(key); + } } } } } while (join->const_table_map & found_ref && ref_changed); + /* + Update info on indexes that can be used for search lookups as + reading const tables may has added new sargable predicates. + */ + if (const_count && sargables) + { + for( ; sargables->field ; sargables++) + { + Field *field= sargables->field; + JOIN_TAB *join_tab= field->table->reginfo.join_tab; + key_map possible_keys= field->key_start; + possible_keys.intersect(field->table->keys_in_use_for_query); + bool is_const= 1; + for (uint j=0; j < sargables->num_values; j++) + is_const&= sargables->arg_value[j]->const_item(); + if (is_const) + join_tab[0].const_keys.merge(possible_keys); + } + } + /* Calc how many (possible) matched records in each table */ for (s=stat ; s < stat_end ; s++) @@ -2057,14 +2584,23 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, if (s->worst_seeks < 2.0) // Fix for small tables s->worst_seeks=2.0; - if (! s->const_keys.is_clear_all()) + /* + Add to stat->const_keys those indexes for which all group fields or + all select distinct fields participate in one index. + */ + add_group_and_distinct_keys(join, s); + + if (!s->const_keys.is_clear_all() && + !s->table->pos_in_table_list->embedding) { ha_rows records; SQL_SELECT *select; select= make_select(s->table, found_const_table_map, found_const_table_map, - s->on_expr ? s->on_expr : conds, - &error); + *s->on_expr_ref ? *s->on_expr_ref : conds, + 1, &error); + if (!select) + DBUG_RETURN(1); records= get_quick_record_count(join->thd, select, s->table, &s->const_keys, join->row_limit); s->quick=select->quick; @@ -2081,7 +2617,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, join->const_table_map|= s->table->map; set_position(join,const_count++,s,(KEYUSE*) 0); s->type= JT_CONST; - if (s->on_expr) + if (*s->on_expr_ref) { /* Generate empty row */ s->info= "Impossible ON condition"; @@ -2099,17 +2635,17 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, } } - /* Find best combination and return it */ join->join_tab=stat; join->map2table=stat_ref; join->table= join->all_tables=table_vector; join->const_tables=const_count; join->found_const_table_map=found_const_table_map; + /* Find an optimal join order of the non-constant tables. */ if (join->const_tables != join->tables) { optimize_keyuse(join, keyuse_array); - find_best_combination(join,all_table_map & ~join->const_table_map); + choose_plan(join, all_table_map & ~join->const_table_map); } else { @@ -2117,6 +2653,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, sizeof(POSITION)*join->const_tables); join->best_read=1.0; } + /* Generate an execution plan from the found optimal join order. */ DBUG_RETURN(join->thd->killed || get_best_combination(join)); } @@ -2140,6 +2677,7 @@ typedef struct key_field_t { // Used when finding key fields when val IS NULL. */ bool null_rejecting; + bool *cond_guard; /* See KEYUSE::cond_guard */ } KEY_FIELD; /* Values in optimize */ @@ -2182,7 +2720,19 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, { if (old->field == new_fields->field) { - if (new_fields->val->used_tables()) + /* + NOTE: below const_item() call really works as "!used_tables()", i.e. + it can return FALSE where it is feasible to make it return TRUE. + + The cause is as follows: Some of the tables are already known to be + const tables (the detection code is in make_join_statistics(), + above the update_ref_and_keys() call), but we didn't propagate + information about this: TABLE::const_table is not set to TRUE, and + Item::update_used_tables() hasn't been called for each item. + The result of this is that we're missing some 'ref' accesses. + TODO: OptimizerTeam: Fix this + */ + if (!new_fields->val->const_item()) { /* If the value matches, we can use the key reference. @@ -2212,7 +2762,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, new_fields->null_rejecting); } else if (old->eq_func && new_fields->eq_func && - ((!old->val->used_tables() && old->val->is_null()) || + ((old->val->const_item() && old->val->is_null()) || new_fields->val->is_null())) { /* field = expression OR field IS NULL */ @@ -2265,10 +2815,12 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, add_key_field() key_fields Pointer to add key, if usable and_level And level, to be stored in KEY_FIELD + cond Condition predicate field Field used in comparision eq_func True if we used =, <=> or IS NULL value Value used for comparison with field usable_tables Tables which can be used for key optimization + sargables IN/OUT Array of found sargable candidates NOTES If we are doing a NOT NULL comparison on a NOT NULL field in a outer join @@ -2280,8 +2832,8 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, static void add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, - Field *field,bool eq_func,Item **value, uint num_values, - table_map usable_tables) + Field *field, bool eq_func, Item **value, uint num_values, + table_map usable_tables, SARGABLE_PARAM **sargables) { uint exists_optimize= 0; if (!(field->flags & PART_KEY_FLAG)) @@ -2291,6 +2843,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, !field->table->maybe_null || field->null_ptr) return; // Not a key. Skip it exists_optimize= KEY_OPTIMIZE_EXISTS; + DBUG_ASSERT(num_values == 1); } else { @@ -2336,13 +2889,45 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, is_const&= value[i]->const_item(); if (is_const) stat[0].const_keys.merge(possible_keys); + else if (!eq_func) + { + /* + Save info to be able check whether this predicate can be + considered as sargable for range analisis after reading const tables. + We do not save info about equalities as update_const_equal_items + will take care of updating info on keys from sargable equalities. + */ + (*sargables)--; + (*sargables)->field= field; + (*sargables)->arg_value= value; + (*sargables)->num_values= num_values; + } /* We can't always use indexes when comparing a string index to a number. cmp_type() is checked to allow compare of dates to numbers. eq_func is NEVER true when num_values > 1 */ if (!eq_func) - return; + { + /* + Additional optimization: if we're processing + "t.key BETWEEN c1 AND c1" then proceed as if we were processing + "t.key = c1". + TODO: This is a very limited fix. A more generic fix is possible. + There are 2 options: + A) Make equality propagation code be able to handle BETWEEN + (including cases like t1.key BETWEEN t2.key AND t3.key) + B) Make range optimizer to infer additional "t.key = c" equalities + and use them in equality propagation process (see details in + OptimizerKBAndTodo) + */ + if ((cond->functype() != Item_func::BETWEEN) || + ((Item_func_between*) cond)->negated || + !value[0]->eq(value[1], field->binary())) + return; + eq_func= TRUE; + } + if (field->result_type() == STRING_RESULT) { if ((*value)->result_type() != STRING_RESULT) @@ -2355,21 +2940,14 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, /* We can't use indexes if the effective collation of the operation differ from the field collation. - - We can also not used index on a text column, as the column may - contain 'x' 'x\t' 'x ' and 'read_next_same' will stop after - 'x' when searching for WHERE col='x ' */ if (field->cmp_type() == STRING_RESULT && - (((Field_str*)field)->charset() != cond->compare_collation() || - ((*value)->type() != Item::NULL_ITEM && - (field->flags & BLOB_FLAG) && !field->binary()))) + ((Field_str*)field)->charset() != cond->compare_collation()) return; } } } } - DBUG_ASSERT(num_values == 1); /* For the moment eq_func is always true. This slot is reserved for future extensions where we want to remembers other things than just eq comparisons @@ -2388,25 +2966,72 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, Item_func *cond, We use null_rejecting in add_not_null_conds() to add 'othertbl.field IS NOT NULL' to tab->select_cond. */ - (*key_fields)->null_rejecting= ((cond->functype() == Item_func::EQ_FUNC) && - ((*value)->type() == Item::FIELD_ITEM) && + (*key_fields)->null_rejecting= ((cond->functype() == Item_func::EQ_FUNC || + cond->functype() == Item_func::MULT_EQUAL_FUNC) && + ((*value)->type() == Item::FIELD_ITEM) && ((Item_field*)*value)->field->maybe_null()); + (*key_fields)->cond_guard= NULL; (*key_fields)++; } /* - SYNOPSIS - add_key_fields() - key_fields Add KEY_FIELD entries to this array (and move the - pointer) - and_level AND-level (a value that is different for every n-way - AND operation) - cond Condition to analyze - usable_tables Value to pass to add_key_field + Add possible keys to array of possible keys originated from a simple predicate + + SYNPOSIS + add_key_equal_fields() + key_fields Pointer to add key, if usable + and_level And level, to be stored in KEY_FIELD + cond Condition predicate + field Field used in comparision + eq_func True if we used =, <=> or IS NULL + value Value used for comparison with field + Is NULL for BETWEEN and IN + usable_tables Tables which can be used for key optimization + sargables IN/OUT Array of found sargable candidates + + NOTES + If field items f1 and f2 belong to the same multiple equality and + a key is added for f1, the the same key is added for f2. + + RETURN + *key_fields is incremented if we stored a key in the array */ + static void -add_key_fields(KEY_FIELD **key_fields,uint *and_level, - COND *cond, table_map usable_tables) +add_key_equal_fields(KEY_FIELD **key_fields, uint and_level, + Item_func *cond, Item_field *field_item, + bool eq_func, Item **val, + uint num_values, table_map usable_tables, + SARGABLE_PARAM **sargables) +{ + Field *field= field_item->field; + add_key_field(key_fields, and_level, cond, field, + eq_func, val, num_values, usable_tables, sargables); + Item_equal *item_equal= field_item->item_equal; + if (item_equal) + { + /* + Add to the set of possible key values every substitution of + the field for an equal field included into item_equal + */ + Item_equal_iterator it(*item_equal); + Item_field *item; + while ((item= it++)) + { + if (!field->eq(item->field)) + { + add_key_field(key_fields, and_level, cond, item->field, + eq_func, val, num_values, usable_tables, + sargables); + } + } + } +} + +static void +add_key_fields(JOIN *join, KEY_FIELD **key_fields, uint *and_level, + COND *cond, table_map usable_tables, + SARGABLE_PARAM **sargables) { if (cond->type() == Item_func::COND_ITEM) { @@ -2417,28 +3042,57 @@ add_key_fields(KEY_FIELD **key_fields,uint *and_level, { Item *item; while ((item=li++)) - add_key_fields(key_fields,and_level,item,usable_tables); + add_key_fields(join, key_fields, and_level, item, usable_tables, + sargables); for (; org_key_fields != *key_fields ; org_key_fields++) org_key_fields->level= *and_level; } else { (*and_level)++; - add_key_fields(key_fields,and_level,li++,usable_tables); + add_key_fields(join, key_fields, and_level, li++, usable_tables, + sargables); Item *item; while ((item=li++)) { KEY_FIELD *start_key_fields= *key_fields; (*and_level)++; - add_key_fields(key_fields,and_level,item,usable_tables); + add_key_fields(join, key_fields, and_level, item, usable_tables, + sargables); *key_fields=merge_key_fields(org_key_fields,start_key_fields, *key_fields,++(*and_level)); } } return; } - /* If item is of type 'field op field/constant' add it to key_fields */ + /* + Subquery optimization: Conditions that are pushed down into subqueries + are wrapped into Item_func_trig_cond. We process the wrapped condition + but need to set cond_guard for KEYUSE elements generated from it. + */ + { + if (cond->type() == Item::FUNC_ITEM && + ((Item_func*)cond)->functype() == Item_func::TRIG_COND_FUNC) + { + Item *cond_arg= ((Item_func*)cond)->arguments()[0]; + if (!join->group_list && !join->order && + join->unit->item && + join->unit->item->substype() == Item_subselect::IN_SUBS && + !join->unit->first_select()->next_select()) + { + KEY_FIELD *save= *key_fields; + add_key_fields(join, key_fields, and_level, cond_arg, usable_tables, + sargables); + // Indicate that this ref access candidate is for subquery lookup: + for (; save != *key_fields; save++) + save->cond_guard= ((Item_func_trig_cond*)cond)->get_trig_var(); + } + return; + } + } + + /* If item is of type 'field op field/constant' add it to key_fields */ if (cond->type() != Item::FUNC_ITEM) return; Item_func *cond_func= (Item_func*) cond; @@ -2446,17 +3100,44 @@ add_key_fields(KEY_FIELD **key_fields,uint *and_level, case Item_func::OPTIMIZE_NONE: break; case Item_func::OPTIMIZE_KEY: - // BETWEEN, IN, NOT + { + Item **values; + // BETWEEN, IN, NE if (cond_func->key_item()->real_item()->type() == Item::FIELD_ITEM && !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*)(cond_func->key_item()->real_item()))->field, - cond_func->argument_count() == 2 && - cond_func->functype() == Item_func::IN_FUNC && - !((Item_func_in*)cond_func)->negated, - cond_func->arguments()+1, cond_func->argument_count()-1, - usable_tables); + { + values= cond_func->arguments()+1; + if (cond_func->functype() == Item_func::NE_FUNC && + cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM && + !(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT)) + values--; + DBUG_ASSERT(cond_func->functype() != Item_func::IN_FUNC || + cond_func->argument_count() != 2); + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->key_item()->real_item()), + 0, values, + cond_func->argument_count()-1, + usable_tables, sargables); + } + if (cond_func->functype() == Item_func::BETWEEN) + { + values= cond_func->arguments(); + for (uint i= 1 ; i < cond_func->argument_count() ; i++) + { + Item_field *field_item; + if (cond_func->arguments()[i]->real_item()->type() == Item::FIELD_ITEM + && + !(cond_func->arguments()[i]->used_tables() & OUTER_REF_TABLE_BIT)) + { + field_item= (Item_field *) (cond_func->arguments()[i]->real_item()); + add_key_equal_fields(key_fields, *and_level, cond_func, + field_item, 0, values, 1, usable_tables, + sargables); + } + } + } break; + } case Item_func::OPTIMIZE_OP: { bool equal_func=(cond_func->functype() == Item_func::EQ_FUNC || @@ -2465,21 +3146,21 @@ add_key_fields(KEY_FIELD **key_fields,uint *and_level, if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM && !(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[0])->real_item()) - ->field, - equal_func, - cond_func->arguments()+1, 1, usable_tables); + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[0])->real_item(), + equal_func, + cond_func->arguments()+1, 1, usable_tables, + sargables); } if (cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM && cond_func->functype() != Item_func::LIKE_FUNC && !(cond_func->arguments()[1]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[1])->real_item()) - ->field, - equal_func, - cond_func->arguments(),1,usable_tables); + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[1])->real_item(), + equal_func, + cond_func->arguments(),1,usable_tables, + sargables); } break; } @@ -2491,15 +3172,56 @@ add_key_fields(KEY_FIELD **key_fields,uint *and_level, Item *tmp=new Item_null; if (unlikely(!tmp)) // Should never be true return; - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[0])->real_item()) - ->field, + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[0])->real_item(), cond_func->functype() == Item_func::ISNULL_FUNC, - &tmp, 1, usable_tables); + &tmp, 1, usable_tables, sargables); + } + break; + case Item_func::OPTIMIZE_EQUAL: + Item_equal *item_equal= (Item_equal *) cond; + Item *const_item= item_equal->get_const(); + Item_equal_iterator it(*item_equal); + Item_field *item; + if (const_item) + { + /* + For each field field1 from item_equal consider the equality + field1=const_item as a condition allowing an index access of the table + with field1 by the keys value of field1. + */ + while ((item= it++)) + { + add_key_field(key_fields, *and_level, cond_func, item->field, + TRUE, &const_item, 1, usable_tables, sargables); + } + } + else + { + /* + Consider all pairs of different fields included into item_equal. + For each of them (field1, field1) consider the equality + field1=field2 as a condition allowing an index access of the table + with field1 by the keys value of field2. + */ + Item_equal_iterator fi(*item_equal); + while ((item= fi++)) + { + Field *field= item->field; + while ((item= it++)) + { + if (!field->eq(item->field)) + { + add_key_field(key_fields, *and_level, cond_func, field, + TRUE, (Item **) &item, 1, usable_tables, + sargables); + } + } + it.rewind(); + } } break; } - return; } /* @@ -2524,7 +3246,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) if (key_field->eq_func && !(key_field->optimize & KEY_OPTIMIZE_EXISTS)) { - for (uint key=0 ; key < form->keys ; key++) + for (uint key=0 ; key < form->s->keys ; key++) { if (!(form->keys_in_use_for_query.is_set(key))) continue; @@ -2544,6 +3266,7 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array,KEY_FIELD *key_field) keyuse.used_tables=key_field->val->used_tables(); keyuse.optimize= key_field->optimize & KEY_OPTIMIZE_REF_OR_NULL; keyuse.null_rejecting= key_field->null_rejecting; + keyuse.cond_guard= key_field->cond_guard; VOID(insert_dynamic(keyuse_array,(gptr) &keyuse)); } } @@ -2574,14 +3297,14 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array, Item_func *arg0=(Item_func *)(func->arguments()[0]), *arg1=(Item_func *)(func->arguments()[1]); if (arg1->const_item() && - ((functype == Item_func::GE_FUNC && arg1->val()> 0) || - (functype == Item_func::GT_FUNC && arg1->val()>=0)) && + ((functype == Item_func::GE_FUNC && arg1->val_real() > 0) || + (functype == Item_func::GT_FUNC && arg1->val_real() >=0)) && arg0->type() == Item::FUNC_ITEM && arg0->functype() == Item_func::FT_FUNC) cond_func=(Item_func_match *) arg0; else if (arg0->const_item() && - ((functype == Item_func::LE_FUNC && arg0->val()> 0) || - (functype == Item_func::LT_FUNC && arg0->val()>=0)) && + ((functype == Item_func::LE_FUNC && arg0->val_real() > 0) || + (functype == Item_func::LT_FUNC && arg0->val_real() >=0)) && arg1->type() == Item::FUNC_ITEM && arg1->functype() == Item_func::FT_FUNC) cond_func=(Item_func_match *) arg1; @@ -2636,6 +3359,59 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) /* + Add to KEY_FIELD array all 'ref' access candidates within nested join + + SYNPOSIS + add_key_fields_for_nj() + nested_join_table IN Nested join pseudo-table to process + end INOUT End of the key field array + and_level INOUT And-level + sargables IN/OUT Array of found sargable candidates + + DESCRIPTION + This function populates KEY_FIELD array with entries generated from the + ON condition of the given nested join, and does the same for nested joins + contained within this nested join. + + NOTES + We can add accesses to the tables that are direct children of this nested + join (1), and are not inner tables w.r.t their neighbours (2). + + Example for #1 (outer brackets pair denotes nested join this function is + invoked for): + ... LEFT JOIN (t1 LEFT JOIN (t2 ... ) ) ON cond + Example for #2: + ... LEFT JOIN (t1 LEFT JOIN t2 ) ON cond + In examples 1-2 for condition cond, we can add 'ref' access candidates to + t1 only. + Example #3: + ... LEFT JOIN (t1, t2 LEFT JOIN t3 ON inner_cond) ON cond + Here we can add 'ref' access candidates for t1 and t2, but not for t3. +*/ + +static void add_key_fields_for_nj(JOIN *join, TABLE_LIST *nested_join_table, + KEY_FIELD **end, uint *and_level, + SARGABLE_PARAM **sargables) +{ + List_iterator<TABLE_LIST> li(nested_join_table->nested_join->join_list); + table_map tables= 0; + TABLE_LIST *table; + DBUG_ASSERT(nested_join_table->nested_join); + + while ((table= li++)) + { + if (table->nested_join) + add_key_fields_for_nj(join, table, end, and_level, sargables); + else + if (!table->on_expr) + tables |= table->table->map; + } + add_key_fields(join, end, and_level, nested_join_table->on_expr, tables, + sargables); +} + + +/* Update keyuse array with all possible keys we can use to fetch rows SYNOPSIS @@ -2646,9 +3422,10 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) tables Number of tables in join cond WHERE condition (note that the function analyzes join_tab[i]->on_expr too) - normal_tables tables not inner w.r.t some outer join (ones for which + normal_tables Tables not inner w.r.t some outer join (ones for which we can make ref access based the WHERE clause) select_lex current SELECT + sargables OUT Array of found sargable candidates RETURN 0 - OK @@ -2657,23 +3434,56 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, - uint tables, COND *cond, table_map normal_tables, - SELECT_LEX *select_lex) + uint tables, COND *cond, COND_EQUAL *cond_equal, + table_map normal_tables, SELECT_LEX *select_lex, + SARGABLE_PARAM **sargables) { uint and_level,i,found_eq_constant; KEY_FIELD *key_fields, *end, *field; - - if (!(key_fields=(KEY_FIELD*) - thd->alloc(sizeof(key_fields[0])* - (thd->lex->current_select->cond_count+1)*2))) + uint sz; + uint m= 1; + + if (cond_equal && cond_equal->max_members) + m= cond_equal->max_members; + + /* + We use the same piece of memory to store both KEY_FIELD + and SARGABLE_PARAM structure. + KEY_FIELD values are placed at the beginning this memory + while SARGABLE_PARAM values are put at the end. + All predicates that are used to fill arrays of KEY_FIELD + and SARGABLE_PARAM structures have at most 2 arguments + except BETWEEN predicates that have 3 arguments and + IN predicates. + This any predicate if it's not BETWEEN/IN can be used + directly to fill at most 2 array elements, either of KEY_FIELD + or SARGABLE_PARAM type. For a BETWEEN predicate 3 elements + can be filled as this predicate is considered as + saragable with respect to each of its argument. + An IN predicate can require at most 1 element as currently + it is considered as sargable only for its first argument. + Multiple equality can add elements that are filled after + substitution of field arguments by equal fields. There + can be not more than cond_equal->max_members such substitutions. + */ + sz= max(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))* + (((thd->lex->current_select->cond_count+1)*2 + + thd->lex->current_select->between_count)*m+1); + if (!(key_fields=(KEY_FIELD*) thd->alloc(sz))) return TRUE; /* purecov: inspected */ and_level= 0; field= end= key_fields; + *sargables= (SARGABLE_PARAM *) key_fields + + (sz - sizeof((*sargables)[0].field))/sizeof(SARGABLE_PARAM); + /* set a barrier for the array of SARGABLE_PARAM */ + (*sargables)[0].field= 0; + if (my_init_dynamic_array(keyuse,sizeof(KEYUSE),20,64)) return TRUE; if (cond) { - add_key_fields(&end,&and_level,cond,normal_tables); + add_key_fields(join_tab->join, &end, &and_level, cond, normal_tables, + sargables); for (; field != end ; field++) { add_key_part(keyuse,field); @@ -2685,12 +3495,33 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, } for (i=0 ; i < tables ; i++) { - if (join_tab[i].on_expr) + /* + Block the creation of keys for inner tables of outer joins. + Here only the outer joins that can not be converted to + inner joins are left and all nests that can be eliminated + are flattened. + In the future when we introduce conditional accesses + for inner tables in outer joins these keys will be taken + into account as well. + */ + if (*join_tab[i].on_expr_ref) + add_key_fields(join_tab->join, &end, &and_level, + *join_tab[i].on_expr_ref, + join_tab[i].table->map, sargables); + } + + /* Process ON conditions for the nested joins */ + { + List_iterator<TABLE_LIST> li(*join_tab->join->join_list); + TABLE_LIST *table; + while ((table= li++)) { - add_key_fields(&end,&and_level,join_tab[i].on_expr, - join_tab[i].table->map); + if (table->nested_join) + add_key_fields_for_nj(join_tab->join, table, &end, &and_level, + sargables); } } + /* fill keyuse with found key parts */ for ( ; field != end ; field++) add_key_part(keyuse,field); @@ -2701,23 +3532,27 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, } /* - Special treatment for ft-keys. - Remove the following things from KEYUSE: + Sort the array of possible keys and remove the following key parts: - ref if there is a keypart which is a ref and a const. - - keyparts without previous keyparts. + (e.g. if there is a key(a,b) and the clause is a=3 and b=7 and b=t2.d, + then we skip the key part corresponding to b=t2.d) + - keyparts without previous keyparts + (e.g. if there is a key(a,b,c) but only b < 5 (or a=2 and c < 3) is + used in the query, we drop the partial key parts from consideration). + Special treatment for ft-keys. */ if (keyuse->elements) { - KEYUSE end,*prev,*save_pos,*use; + KEYUSE key_end,*prev,*save_pos,*use; qsort(keyuse->buffer,keyuse->elements,sizeof(KEYUSE), (qsort_cmp) sort_keyuse); - bzero((char*) &end,sizeof(end)); /* Add for easy testing */ - VOID(insert_dynamic(keyuse,(gptr) &end)); + bzero((char*) &key_end,sizeof(key_end)); /* Add for easy testing */ + VOID(insert_dynamic(keyuse,(gptr) &key_end)); use=save_pos=dynamic_element(keyuse,0,KEYUSE*); - prev=&end; + prev= &key_end; found_eq_constant=0; for (i=0 ; i < keyuse->elements-1 ; i++,use++) { @@ -2745,14 +3580,14 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, save_pos++; } i=(uint) (save_pos-(KEYUSE*) keyuse->buffer); - VOID(set_dynamic(keyuse,(gptr) &end,i)); + VOID(set_dynamic(keyuse,(gptr) &key_end,i)); keyuse->elements=i; } return FALSE; } /* - Update some values in keyuse for faster find_best_combination() loop + Update some values in keyuse for faster choose_plan() loop */ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) @@ -2793,6 +3628,68 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) } +/* + Discover the indexes that can be used for GROUP BY or DISTINCT queries. + + SYNOPSIS + add_group_and_distinct_keys() + join + join_tab + + DESCRIPTION + If the query has a GROUP BY clause, find all indexes that contain all + GROUP BY fields, and add those indexes to join->const_keys. + If the query has a DISTINCT clause, find all indexes that contain all + SELECT fields, and add those indexes to join->const_keys. + This allows later on such queries to be processed by a + QUICK_GROUP_MIN_MAX_SELECT. + + RETURN + None +*/ + +static void +add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab) +{ + List<Item_field> indexed_fields; + List_iterator<Item_field> indexed_fields_it(indexed_fields); + ORDER *cur_group; + Item_field *cur_item; + key_map possible_keys(0); + + if (join->group_list) + { /* Collect all query fields referenced in the GROUP clause. */ + for (cur_group= join->group_list; cur_group; cur_group= cur_group->next) + (*cur_group->item)->walk(&Item::collect_item_field_processor, + (byte*) &indexed_fields); + } + else if (join->select_distinct) + { /* Collect all query fields referenced in the SELECT clause. */ + List<Item> &select_items= join->fields_list; + List_iterator<Item> select_items_it(select_items); + Item *item; + while ((item= select_items_it++)) + item->walk(&Item::collect_item_field_processor, (byte*) &indexed_fields); + } + else + return; + + if (indexed_fields.elements == 0) + return; + + /* Intersect the keys of all group fields. */ + cur_item= indexed_fields_it++; + possible_keys.merge(cur_item->field->part_of_key); + while ((cur_item= indexed_fields_it++)) + { + possible_keys.intersect(cur_item->field->part_of_key); + } + + if (!possible_keys.is_clear_all()) + join_tab->const_keys.merge(possible_keys); +} + + /***************************************************************************** Go through all combinations of not marked tables and find the one which uses least records @@ -2820,24 +3717,1136 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key) } +/* + Find the best access path for an extension of a partial execution plan and + add this path to the plan. + + SYNOPSIS + best_access_path() + join pointer to the structure providing all context info + for the query + s the table to be joined by the function + thd thread for the connection that submitted the query + remaining_tables set of tables not included into the partial plan yet + idx the length of the partial plan + record_count estimate for the number of records returned by the partial + plan + read_time the cost of the partial plan + + DESCRIPTION + The function finds the best access path to table 's' from the passed + partial plan where an access path is the general term for any means to + access the data in 's'. An access path may use either an index or a scan, + whichever is cheaper. The input partial plan is passed via the array + 'join->positions' of length 'idx'. The chosen access method for 's' and its + cost are stored in 'join->positions[idx]'. + + RETURN + None +*/ + +static void +best_access_path(JOIN *join, + JOIN_TAB *s, + THD *thd, + table_map remaining_tables, + uint idx, + double record_count, + double read_time) +{ + KEYUSE *best_key= 0; + uint best_max_key_part= 0; + my_bool found_constraint= 0; + double best= DBL_MAX; + double best_time= DBL_MAX; + double records= DBL_MAX; + double tmp; + ha_rows rec; + DBUG_ENTER("best_access_path"); + + if (s->keyuse) + { /* Use key if possible */ + TABLE *table= s->table; + KEYUSE *keyuse,*start_key=0; + double best_records= DBL_MAX; + uint max_key_part=0; + + /* Test how we can use keys */ + rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE; // Assumed records/key + for (keyuse=s->keyuse ; keyuse->table == table ;) + { + key_part_map found_part= 0; + table_map found_ref= 0; + uint key= keyuse->key; + KEY *keyinfo= table->key_info+key; + bool ft_key= (keyuse->keypart == FT_KEYPART); + /* Bitmap of keyparts where the ref access is over 'keypart=const': */ + key_part_map const_part= 0; + /* The or-null keypart in ref-or-null access: */ + key_part_map ref_or_null_part= 0; + + /* Calculate how many key segments of the current key we can use */ + start_key= keyuse; + do + { /* for each keypart */ + uint keypart= keyuse->keypart; + table_map best_part_found_ref= 0; + double best_prev_record_reads= DBL_MAX; + do + { + if (!(remaining_tables & keyuse->used_tables) && + !(ref_or_null_part && (keyuse->optimize & + KEY_OPTIMIZE_REF_OR_NULL))) + { + found_part|= keyuse->keypart_map; + if (!(keyuse->used_tables & ~join->const_table_map)) + const_part|= keyuse->keypart_map; + double tmp2= prev_record_reads(join, (found_ref | + keyuse->used_tables)); + if (tmp2 < best_prev_record_reads) + { + best_part_found_ref= keyuse->used_tables & ~join->const_table_map; + best_prev_record_reads= tmp2; + } + if (rec > keyuse->ref_table_rows) + rec= keyuse->ref_table_rows; + /* + If there is one 'key_column IS NULL' expression, we can + use this ref_or_null optimisation of this field + */ + if (keyuse->optimize & KEY_OPTIMIZE_REF_OR_NULL) + ref_or_null_part |= keyuse->keypart_map; + } + keyuse++; + } while (keyuse->table == table && keyuse->key == key && + keyuse->keypart == keypart); + found_ref|= best_part_found_ref; + } while (keyuse->table == table && keyuse->key == key); + + /* + Assume that that each key matches a proportional part of table. + */ + if (!found_part && !ft_key) + continue; // Nothing usable found + + if (rec < MATCHING_ROWS_IN_OTHER_TABLE) + rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables + + /* + ft-keys require special treatment + */ + if (ft_key) + { + /* + Really, there should be records=0.0 (yes!) + but 1.0 would be probably safer + */ + tmp= prev_record_reads(join, found_ref); + records= 1.0; + } + else + { + found_constraint= 1; + /* + Check if we found full key + */ + if (found_part == PREV_BITS(uint,keyinfo->key_parts) && + !ref_or_null_part) + { /* use eq key */ + max_key_part= (uint) ~0; + if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) + { + tmp = prev_record_reads(join, found_ref); + records=1.0; + } + else + { + if (!found_ref) + { /* We found a const key */ + /* + ReuseRangeEstimateForRef-1: + We get here if we've found a ref(const) (c_i are constants): + "(keypart1=c1) AND ... AND (keypartN=cN)" [ref_const_cond] + + If range optimizer was able to construct a "range" + access on this index, then its condition "quick_cond" was + eqivalent to ref_const_cond (*), and we can re-use E(#rows) + from the range optimizer. + + Proof of (*): By properties of range and ref optimizers + quick_cond will be equal or tighther than ref_const_cond. + ref_const_cond already covers "smallest" possible interval - + a singlepoint interval over all keyparts. Therefore, + quick_cond is equivalent to ref_const_cond (if it was an + empty interval we wouldn't have got here). + */ + if (table->quick_keys.is_set(key)) + records= (double) table->quick_rows[key]; + else + { + /* quick_range couldn't use key! */ + records= (double) s->records/rec; + } + } + else + { + if (!(records=keyinfo->rec_per_key[keyinfo->key_parts-1])) + { /* Prefer longer keys */ + records= + ((double) s->records / (double) rec * + (1.0 + + ((double) (table->s->max_key_length-keyinfo->key_length) / + (double) table->s->max_key_length))); + if (records < 2.0) + records=2.0; /* Can't be as good as a unique */ + } + /* + ReuseRangeEstimateForRef-2: We get here if we could not reuse + E(#rows) from range optimizer. Make another try: + + If range optimizer produced E(#rows) for a prefix of the ref + access we're considering, and that E(#rows) is lower then our + current estimate, make an adjustment. The criteria of when we + can make an adjustment is a special case of the criteria used + in ReuseRangeEstimateForRef-3. + */ + if (table->quick_keys.is_set(key) && + const_part & (1 << table->quick_key_parts[key]) && + table->quick_n_ranges[key] == 1 && + records > (double) table->quick_rows[key]) + { + records= (double) table->quick_rows[key]; + } + } + /* Limit the number of matched rows */ + tmp= records; + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); + if (table->used_keys.is_set(key)) + { + /* we can use only index tree */ + uint keys_per_block= table->file->block_size/2/ + (keyinfo->key_length+table->file->ref_length)+1; + tmp= record_count*(tmp+keys_per_block-1)/keys_per_block; + } + else + tmp= record_count*min(tmp,s->worst_seeks); + } + } + else + { + /* + Use as much key-parts as possible and a uniq key is better + than a not unique key + Set tmp to (previous record count) * (records / combination) + */ + if ((found_part & 1) && + (!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) || + found_part == PREV_BITS(uint,keyinfo->key_parts))) + { + max_key_part= max_part_bit(found_part); + /* + ReuseRangeEstimateForRef-3: + We're now considering a ref[or_null] access via + (t.keypart1=e1 AND ... AND t.keypartK=eK) [ OR + (same-as-above but with one cond replaced + with "t.keypart_i IS NULL")] (**) + + Try re-using E(#rows) from "range" optimizer: + We can do so if "range" optimizer used the same intervals as + in (**). The intervals used by range optimizer may be not + available at this point (as "range" access might have choosen to + create quick select over another index), so we can't compare + them to (**). We'll make indirect judgements instead. + The sufficient conditions for re-use are: + (C1) All e_i in (**) are constants, i.e. found_ref==FALSE. (if + this is not satisfied we have no way to know which ranges + will be actually scanned by 'ref' until we execute the + join) + (C2) max #key parts in 'range' access == K == max_key_part (this + is apparently a necessary requirement) + + We also have a property that "range optimizer produces equal or + tighter set of scan intervals than ref(const) optimizer". Each + of the intervals in (**) are "tightest possible" intervals when + one limits itself to using keyparts 1..K (which we do in #2). + From here it follows that range access used either one, or + both of the (I1) and (I2) intervals: + + (t.keypart1=c1 AND ... AND t.keypartK=eK) (I1) + (same-as-above but with one cond replaced + with "t.keypart_i IS NULL") (I2) + + The remaining part is to exclude the situation where range + optimizer used one interval while we're considering + ref-or-null and looking for estimate for two intervals. This + is done by last limitation: + + (C3) "range optimizer used (have ref_or_null?2:1) intervals" + */ + if (table->quick_keys.is_set(key) && !found_ref && //(C1) + table->quick_key_parts[key] == max_key_part && //(C2) + table->quick_n_ranges[key] == 1+test(ref_or_null_part)) //(C3) + { + tmp= records= (double) table->quick_rows[key]; + } + else + { + /* Check if we have statistic about the distribution */ + if ((records= keyinfo->rec_per_key[max_key_part-1])) + tmp= records; + else + { + /* + Assume that the first key part matches 1% of the file + and that the whole key matches 10 (duplicates) or 1 + (unique) records. + Assume also that more key matches proportionally more + records + This gives the formula: + records = (x * (b-a) + a*c-b)/(c-1) + + b = records matched by whole key + a = records matched by first key part (1% of all records?) + c = number of key parts in key + x = used key parts (1 <= x <= c) + */ + double rec_per_key; + if (!(rec_per_key=(double) + keyinfo->rec_per_key[keyinfo->key_parts-1])) + rec_per_key=(double) s->records/rec+1; + + if (!s->records) + tmp = 0; + else if (rec_per_key/(double) s->records >= 0.01) + tmp = rec_per_key; + else + { + double a=s->records*0.01; + if (keyinfo->key_parts > 1) + tmp= (max_key_part * (rec_per_key - a) + + a*keyinfo->key_parts - rec_per_key)/ + (keyinfo->key_parts-1); + else + tmp= a; + set_if_bigger(tmp,1.0); + } + records = (ulong) tmp; + } + + if (ref_or_null_part) + { + /* We need to do two key searches to find key */ + tmp *= 2.0; + records *= 2.0; + } + + /* + ReuseRangeEstimateForRef-4: We get here if we could not reuse + E(#rows) from range optimizer. Make another try: + + If range optimizer produced E(#rows) for a prefix of the ref + access we're considering, and that E(#rows) is lower then our + current estimate, make the adjustment. + + The decision whether we can re-use the estimate from the range + optimizer is the same as in ReuseRangeEstimateForRef-3, + applied to first table->quick_key_parts[key] key parts. + */ + if (table->quick_keys.is_set(key) && + table->quick_key_parts[key] <= max_key_part && + const_part & (1 << table->quick_key_parts[key]) && + table->quick_n_ranges[key] == 1 + test(ref_or_null_part & + const_part) && + records > (double) table->quick_rows[key]) + { + tmp= records= (double) table->quick_rows[key]; + } + } + + /* Limit the number of matched rows */ + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); + if (table->used_keys.is_set(key)) + { + /* we can use only index tree */ + uint keys_per_block= table->file->block_size/2/ + (keyinfo->key_length+table->file->ref_length)+1; + tmp= record_count*(tmp+keys_per_block-1)/keys_per_block; + } + else + tmp= record_count*min(tmp,s->worst_seeks); + } + else + tmp= best_time; // Do nothing + } + } /* not ft_key */ + if (tmp < best_time - records/(double) TIME_FOR_COMPARE) + { + best_time= tmp + records/(double) TIME_FOR_COMPARE; + best= tmp; + best_records= records; + best_key= start_key; + best_max_key_part= max_key_part; + } + } + records= best_records; + } + + /* + Don't test table scan if it can't be better. + Prefer key lookup if we would use the same key for scanning. + + Don't do a table scan on InnoDB tables, if we can read the used + parts of the row from any of the used index. + This is because table scans uses index and we would not win + anything by using a table scan. + + A word for word translation of the below if-statement in psergey's + understanding: we check if we should use table scan if: + (1) The found 'ref' access produces more records than a table scan + (or index scan, or quick select), or 'ref' is more expensive than + any of them. + (2) This doesn't hold: the best way to perform table scan is to to perform + 'range' access using index IDX, and the best way to perform 'ref' + access is to use the same index IDX, with the same or more key parts. + (note: it is not clear how this rule is/should be extended to + index_merge quick selects) + (3) See above note about InnoDB. + (4) NOT ("FORCE INDEX(...)" is used for table and there is 'ref' access + path, but there is no quick select) + If the condition in the above brackets holds, then the only possible + "table scan" access method is ALL/index (there is no quick select). + Since we have a 'ref' access path, and FORCE INDEX instructs us to + choose it over ALL/index, there is no need to consider a full table + scan. + */ + if ((records >= s->found_records || best > s->read_time) && // (1) + !(s->quick && best_key && s->quick->index == best_key->key && // (2) + best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2) + !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3) + ! s->table->used_keys.is_clear_all() && best_key) && // (3) + !(s->table->force_index && best_key && !s->quick)) // (4) + { // Check full join + ha_rows rnd_records= s->found_records; + /* + If there is a restriction on the table, assume that 25% of the + rows can be skipped on next part. + This is to force tables that this table depends on before this + table + */ + if (found_constraint) + rnd_records-= rnd_records/4; + + /* + Range optimizer never proposes a RANGE if it isn't better + than FULL: so if RANGE is present, it's always preferred to FULL. + Here we estimate its cost. + */ + if (s->quick) + { + /* + For each record we: + - read record range through 'quick' + - skip rows which does not satisfy WHERE constraints + */ + tmp= record_count * + (s->quick->read_time + + (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE); + } + else + { + /* Estimate cost of reading table. */ + tmp= s->table->file->scan_time(); + if (s->table->map & join->outer_join) // Can't use join cache + { + /* + For each record we have to: + - read the whole table record + - skip rows which does not satisfy join condition + */ + tmp= record_count * + (tmp + + (s->records - rnd_records)/(double) TIME_FOR_COMPARE); + } + else + { + /* We read the table as many times as join buffer becomes full. */ + tmp*= (1.0 + floor((double) cache_record_length(join,idx) * + record_count / + (double) thd->variables.join_buff_size)); + /* + We don't make full cartesian product between rows in the scanned + table and existing records because we skip all rows from the + scanned table, which does not satisfy join condition when + we read the table (see flush_cached_records for details). Here we + take into account cost to read and skip these records. + */ + tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; + } + } + + /* + We estimate the cost of evaluating WHERE clause for found records + as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus + tmp give us total cost of using TABLE SCAN + */ + if (best == DBL_MAX || + (tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records < + best + record_count/(double) TIME_FOR_COMPARE*records)) + { + /* + If the table has a range (s->quick is set) make_join_select() + will ensure that this will be used + */ + best= tmp; + records= rows2double(rnd_records); + best_key= 0; + } + } + + /* Update the cost information for the current partial plan */ + join->positions[idx].records_read= records; + join->positions[idx].read_time= best; + join->positions[idx].key= best_key; + join->positions[idx].table= s; + + if (!best_key && + idx == join->const_tables && + s->table == join->sort_by_table && + join->unit->select_limit_cnt >= records) + join->sort_by_table= (TABLE*) 1; // Must use temporary table + + DBUG_VOID_RETURN; +} + + +/* + Selects and invokes a search strategy for an optimal query plan. + + SYNOPSIS + choose_plan() + join pointer to the structure providing all context info for + the query + join_tables set of the tables in the query + + DESCRIPTION + The function checks user-configurable parameters that control the search + strategy for an optimal plan, selects the search method and then invokes + it. Each specific optimization procedure stores the final optimal plan in + the array 'join->best_positions', and the cost of the plan in + 'join->best_read'. + + RETURN + None +*/ + +static void +choose_plan(JOIN *join, table_map join_tables) +{ + uint search_depth= join->thd->variables.optimizer_search_depth; + uint prune_level= join->thd->variables.optimizer_prune_level; + bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN); + DBUG_ENTER("choose_plan"); + + join->cur_embedding_map= 0; + reset_nj_counters(join->join_list); + /* + if (SELECT_STRAIGHT_JOIN option is set) + reorder tables so dependent tables come after tables they depend + on, otherwise keep tables in the order they were specified in the query + else + Apply heuristic: pre-sort all access plans with respect to the number of + records accessed. + */ + qsort(join->best_ref + join->const_tables, join->tables - join->const_tables, + sizeof(JOIN_TAB*), straight_join?join_tab_cmp_straight:join_tab_cmp); + + if (straight_join) + { + optimize_straight_join(join, join_tables); + } + else + { + if (search_depth == MAX_TABLES+2) + { /* + TODO: 'MAX_TABLES+2' denotes the old implementation of find_best before + the greedy version. Will be removed when greedy_search is approved. + */ + join->best_read= DBL_MAX; + find_best(join, join_tables, join->const_tables, 1.0, 0.0); + } + else + { + if (search_depth == 0) + /* Automatically determine a reasonable value for 'search_depth' */ + search_depth= determine_search_depth(join); + greedy_search(join, join_tables, search_depth, prune_level); + } + } + + /* + Store the cost of this query into a user variable + Don't update last_query_cost for 'show status' command + */ + if (join->thd->lex->orig_sql_command != SQLCOM_SHOW_STATUS) + join->thd->status_var.last_query_cost= join->best_read; + DBUG_VOID_RETURN; +} + + +/* + Compare two JOIN_TAB objects based on the number of accessed records. + + SYNOPSIS + join_tab_cmp() + ptr1 pointer to first JOIN_TAB object + ptr2 pointer to second JOIN_TAB object + + RETURN + 1 if first is bigger + -1 if second is bigger + 0 if equal +*/ + +static int +join_tab_cmp(const void* ptr1, const void* ptr2) +{ + JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; + JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + + if (jt1->dependent & jt2->table->map) + return 1; + if (jt2->dependent & jt1->table->map) + return -1; + if (jt1->found_records > jt2->found_records) + return 1; + if (jt1->found_records < jt2->found_records) + return -1; + return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0); +} + + +/* + Same as join_tab_cmp, but for use with SELECT_STRAIGHT_JOIN. +*/ + +static int +join_tab_cmp_straight(const void* ptr1, const void* ptr2) +{ + JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; + JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + + if (jt1->dependent & jt2->table->map) + return 1; + if (jt2->dependent & jt1->table->map) + return -1; + return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0); +} + +/* + Heuristic procedure to automatically guess a reasonable degree of + exhaustiveness for the greedy search procedure. + + SYNOPSIS + determine_search_depth() + join pointer to the structure providing all context info for the query + + DESCRIPTION + The procedure estimates the optimization time and selects a search depth + big enough to result in a near-optimal QEP, that doesn't take too long to + find. If the number of tables in the query exceeds some constant, then + search_depth is set to this constant. + + NOTES + This is an extremely simplistic implementation that serves as a stub for a + more advanced analysis of the join. Ideally the search depth should be + determined by learning from previous query optimizations, because it will + depend on the CPU power (and other factors). + + RETURN + A positive integer that specifies the search depth (and thus the + exhaustiveness) of the depth-first search algorithm used by + 'greedy_search'. +*/ + +static uint +determine_search_depth(JOIN *join) +{ + uint table_count= join->tables - join->const_tables; + uint search_depth; + /* TODO: this value should be determined dynamically, based on statistics: */ + uint max_tables_for_exhaustive_opt= 7; + + if (table_count <= max_tables_for_exhaustive_opt) + search_depth= table_count+1; // use exhaustive for small number of tables + else + /* + TODO: this value could be determined by some mapping of the form: + depth : table_count -> [max_tables_for_exhaustive_opt..MAX_EXHAUSTIVE] + */ + search_depth= max_tables_for_exhaustive_opt; // use greedy search + + return search_depth; +} + + +/* + Select the best ways to access the tables in a query without reordering them. + + SYNOPSIS + optimize_straight_join() + join pointer to the structure providing all context info for + the query + join_tables set of the tables in the query + + DESCRIPTION + Find the best access paths for each query table and compute their costs + according to their order in the array 'join->best_ref' (thus without + reordering the join tables). The function calls sequentially + 'best_access_path' for each table in the query to select the best table + access method. The final optimal plan is stored in the array + 'join->best_positions', and the corresponding cost in 'join->best_read'. + + NOTES + This function can be applied to: + - queries with STRAIGHT_JOIN + - internally to compute the cost of an arbitrary QEP + Thus 'optimize_straight_join' can be used at any stage of the query + optimization process to finalize a QEP as it is. + + RETURN + None +*/ + +static void +optimize_straight_join(JOIN *join, table_map join_tables) +{ + JOIN_TAB *s; + uint idx= join->const_tables; + double record_count= 1.0; + double read_time= 0.0; + + for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) + { + /* Find the best access method from 's' to the current partial plan */ + best_access_path(join, s, join->thd, join_tables, idx, + record_count, read_time); + /* compute the cost of the new plan extended with 's' */ + record_count*= join->positions[idx].records_read; + read_time+= join->positions[idx].read_time; + join_tables&= ~(s->table->map); + ++idx; + } + + read_time+= record_count / (double) TIME_FOR_COMPARE; + if (join->sort_by_table && + join->sort_by_table != join->positions[join->const_tables].table->table) + read_time+= record_count; // We have to make a temp table + memcpy((gptr) join->best_positions, (gptr) join->positions, + sizeof(POSITION)*idx); + join->best_read= read_time; +} + + +/* + Find a good, possibly optimal, query execution plan (QEP) by a greedy search. + + SYNOPSIS + join pointer to the structure providing all context info + for the query + remaining_tables set of tables not included into the partial plan yet + search_depth controlls the exhaustiveness of the search + prune_level the pruning heuristics that should be applied during + search + + DESCRIPTION + The search procedure uses a hybrid greedy/exhaustive search with controlled + exhaustiveness. The search is performed in N = card(remaining_tables) + steps. Each step evaluates how promising is each of the unoptimized tables, + selects the most promising table, and extends the current partial QEP with + that table. Currenly the most 'promising' table is the one with least + expensive extension. + There are two extreme cases: + 1. When (card(remaining_tables) < search_depth), the estimate finds the best + complete continuation of the partial QEP. This continuation can be + used directly as a result of the search. + 2. When (search_depth == 1) the 'best_extension_by_limited_search' + consideres the extension of the current QEP with each of the remaining + unoptimized tables. + All other cases are in-between these two extremes. Thus the parameter + 'search_depth' controlls the exhaustiveness of the search. The higher the + value, the longer the optimizaton time and possibly the better the + resulting plan. The lower the value, the fewer alternative plans are + estimated, but the more likely to get a bad QEP. + + All intermediate and final results of the procedure are stored in 'join': + join->positions modified for every partial QEP that is explored + join->best_positions modified for the current best complete QEP + join->best_read modified for the current best complete QEP + join->best_ref might be partially reordered + The final optimal plan is stored in 'join->best_positions', and its + corresponding cost in 'join->best_read'. + + NOTES + The following pseudocode describes the algorithm of 'greedy_search': + + procedure greedy_search + input: remaining_tables + output: pplan; + { + pplan = <>; + do { + (t, a) = best_extension(pplan, remaining_tables); + pplan = concat(pplan, (t, a)); + remaining_tables = remaining_tables - t; + } while (remaining_tables != {}) + return pplan; + } + + where 'best_extension' is a placeholder for a procedure that selects the + most "promising" of all tables in 'remaining_tables'. + Currently this estimate is performed by calling + 'best_extension_by_limited_search' to evaluate all extensions of the + current QEP of size 'search_depth', thus the complexity of 'greedy_search' + mainly depends on that of 'best_extension_by_limited_search'. + + If 'best_extension()' == 'best_extension_by_limited_search()', then the + worst-case complexity of this algorithm is <= + O(N*N^search_depth/search_depth). When serch_depth >= N, then the + complexity of greedy_search is O(N!). + + In the future, 'greedy_search' might be extended to support other + implementations of 'best_extension', e.g. some simpler quadratic procedure. + + RETURN + None +*/ + static void -find_best_combination(JOIN *join, table_map rest_tables) +greedy_search(JOIN *join, + table_map remaining_tables, + uint search_depth, + uint prune_level) { - DBUG_ENTER("find_best_combination"); - join->best_read=DBL_MAX; - find_best(join,rest_tables, join->const_tables,1.0,0.0); + double record_count= 1.0; + double read_time= 0.0; + uint idx= join->const_tables; // index into 'join->best_ref' + uint best_idx; + uint size_remain; // cardinality of remaining_tables + POSITION best_pos; + JOIN_TAB *best_table; // the next plan node to be added to the curr QEP + + DBUG_ENTER("greedy_search"); + + /* number of tables that remain to be optimized */ + size_remain= my_count_bits(remaining_tables); + + do { + /* Find the extension of the current QEP with the lowest cost */ + join->best_read= DBL_MAX; + best_extension_by_limited_search(join, remaining_tables, idx, record_count, + read_time, search_depth, prune_level); + + if (size_remain <= search_depth) + { + /* + 'join->best_positions' contains a complete optimal extension of the + current partial QEP. + */ + DBUG_EXECUTE("opt", print_plan(join, join->tables, + record_count, read_time, read_time, + "optimal");); + DBUG_VOID_RETURN; + } + + /* select the first table in the optimal extension as most promising */ + best_pos= join->best_positions[idx]; + best_table= best_pos.table; + /* + Each subsequent loop of 'best_extension_by_limited_search' uses + 'join->positions' for cost estimates, therefore we have to update its + value. + */ + join->positions[idx]= best_pos; + + /* find the position of 'best_table' in 'join->best_ref' */ + best_idx= idx; + JOIN_TAB *pos= join->best_ref[best_idx]; + while (pos && best_table != pos) + pos= join->best_ref[++best_idx]; + DBUG_ASSERT((pos != NULL)); // should always find 'best_table' + /* move 'best_table' at the first free position in the array of joins */ + swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]); + + /* compute the cost of the new plan extended with 'best_table' */ + record_count*= join->positions[idx].records_read; + read_time+= join->positions[idx].read_time; + + remaining_tables&= ~(best_table->table->map); + --size_remain; + ++idx; + + DBUG_EXECUTE("opt", print_plan(join, join->tables, + record_count, read_time, read_time, + "extended");); + } while (TRUE); +} + + +/* + Find a good, possibly optimal, query execution plan (QEP) by a possibly + exhaustive search. + + SYNOPSIS + best_extension_by_limited_search() + join pointer to the structure providing all context info for + the query + remaining_tables set of tables not included into the partial plan yet + idx length of the partial QEP in 'join->positions'; + since a depth-first search is used, also corresponds to + the current depth of the search tree; + also an index in the array 'join->best_ref'; + record_count estimate for the number of records returned by the best + partial plan + read_time the cost of the best partial plan + search_depth maximum depth of the recursion and thus size of the found + optimal plan (0 < search_depth <= join->tables+1). + prune_level pruning heuristics that should be applied during + optimization + (values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS) + + DESCRIPTION + The procedure searches for the optimal ordering of the query tables in set + 'remaining_tables' of size N, and the corresponding optimal access paths to + each table. The choice of a table order and an access path for each table + constitutes a query execution plan (QEP) that fully specifies how to + execute the query. + + The maximal size of the found plan is controlled by the parameter + 'search_depth'. When search_depth == N, the resulting plan is complete and + can be used directly as a QEP. If search_depth < N, the found plan consists + of only some of the query tables. Such "partial" optimal plans are useful + only as input to query optimization procedures, and cannot be used directly + to execute a query. + + The algorithm begins with an empty partial plan stored in 'join->positions' + and a set of N tables - 'remaining_tables'. Each step of the algorithm + evaluates the cost of the partial plan extended by all access plans for + each of the relations in 'remaining_tables', expands the current partial + plan with the access plan that results in lowest cost of the expanded + partial plan, and removes the corresponding relation from + 'remaining_tables'. The algorithm continues until it either constructs a + complete optimal plan, or constructs an optimal plartial plan with size = + search_depth. + + The final optimal plan is stored in 'join->best_positions'. The + corresponding cost of the optimal plan is in 'join->best_read'. + + NOTES + The procedure uses a recursive depth-first search where the depth of the + recursion (and thus the exhaustiveness of the search) is controlled by the + parameter 'search_depth'. + + The pseudocode below describes the algorithm of + 'best_extension_by_limited_search'. The worst-case complexity of this + algorithm is O(N*N^search_depth/search_depth). When serch_depth >= N, then + the complexity of greedy_search is O(N!). + + procedure best_extension_by_limited_search( + pplan in, // in, partial plan of tables-joined-so-far + pplan_cost, // in, cost of pplan + remaining_tables, // in, set of tables not referenced in pplan + best_plan_so_far, // in/out, best plan found so far + best_plan_so_far_cost,// in/out, cost of best_plan_so_far + search_depth) // in, maximum size of the plans being considered + { + for each table T from remaining_tables + { + // Calculate the cost of using table T as above + cost = complex-series-of-calculations; + + // Add the cost to the cost so far. + pplan_cost+= cost; + + if (pplan_cost >= best_plan_so_far_cost) + // pplan_cost already too great, stop search + continue; + + pplan= expand pplan by best_access_method; + remaining_tables= remaining_tables - table T; + if (remaining_tables is not an empty set + and + search_depth > 1) + { + best_extension_by_limited_search(pplan, pplan_cost, + remaining_tables, + best_plan_so_far, + best_plan_so_far_cost, + search_depth - 1); + } + else + { + best_plan_so_far_cost= pplan_cost; + best_plan_so_far= pplan; + } + } + } + + IMPLEMENTATION + When 'best_extension_by_limited_search' is called for the first time, + 'join->best_read' must be set to the largest possible value (e.g. DBL_MAX). + The actual implementation provides a way to optionally use pruning + heuristic (controlled by the parameter 'prune_level') to reduce the search + space by skipping some partial plans. + The parameter 'search_depth' provides control over the recursion + depth, and thus the size of the resulting optimal plan. + + RETURN + None +*/ + +static void +best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, + double record_count, + double read_time, + uint search_depth, + uint prune_level) +{ + THD *thd= join->thd; + if (thd->killed) // Abort + return; + + DBUG_ENTER("best_extension_by_limited_search"); + + /* + 'join' is a partial plan with lower cost than the best plan so far, + so continue expanding it further with the tables in 'remaining_tables'. + */ + JOIN_TAB *s; + double best_record_count= DBL_MAX; + double best_read_time= DBL_MAX; + + DBUG_EXECUTE("opt", print_plan(join, idx, record_count, read_time, read_time, + "part_plan");); + + for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) + { + table_map real_table_bit= s->table->map; + if ((remaining_tables & real_table_bit) && + !(remaining_tables & s->dependent) && + (!idx || !check_interleaving_with_nj(join->positions[idx-1].table, s))) + { + double current_record_count, current_read_time; + + /* Find the best access method from 's' to the current partial plan */ + best_access_path(join, s, thd, remaining_tables, idx, + record_count, read_time); + /* Compute the cost of extending the plan with 's' */ + current_record_count= record_count * join->positions[idx].records_read; + current_read_time= read_time + join->positions[idx].read_time; + + /* Expand only partial plans with lower cost than the best QEP so far */ + if ((current_read_time + + current_record_count / (double) TIME_FOR_COMPARE) >= join->best_read) + { + DBUG_EXECUTE("opt", print_plan(join, idx+1, + current_record_count, + read_time, + (current_read_time + + current_record_count / + (double) TIME_FOR_COMPARE), + "prune_by_cost");); + restore_prev_nj_state(s); + continue; + } + + /* + Prune some less promising partial plans. This heuristic may miss + the optimal QEPs, thus it results in a non-exhaustive search. + */ + if (prune_level == 1) + { + if (best_record_count > current_record_count || + best_read_time > current_read_time || + idx == join->const_tables && // 's' is the first table in the QEP + s->table == join->sort_by_table) + { + if (best_record_count >= current_record_count && + best_read_time >= current_read_time && + /* TODO: What is the reasoning behind this condition? */ + (!(s->key_dependent & remaining_tables) || + join->positions[idx].records_read < 2.0)) + { + best_record_count= current_record_count; + best_read_time= current_read_time; + } + } + else + { + DBUG_EXECUTE("opt", print_plan(join, idx+1, + current_record_count, + read_time, + current_read_time, + "pruned_by_heuristic");); + restore_prev_nj_state(s); + continue; + } + } + + if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) ) + { /* Recursively expand the current partial plan */ + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + best_extension_by_limited_search(join, + remaining_tables & ~real_table_bit, + idx + 1, + current_record_count, + current_read_time, + search_depth - 1, + prune_level); + if (thd->killed) + DBUG_VOID_RETURN; + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + } + else + { /* + 'join' is either the best partial QEP with 'search_depth' relations, + or the best complete QEP so far, whichever is smaller. + */ + current_read_time+= current_record_count / (double) TIME_FOR_COMPARE; + if (join->sort_by_table && + join->sort_by_table != + join->positions[join->const_tables].table->table) + /* We have to make a temp table */ + current_read_time+= current_record_count; + if ((search_depth == 1) || (current_read_time < join->best_read)) + { + memcpy((gptr) join->best_positions, (gptr) join->positions, + sizeof(POSITION) * (idx + 1)); + join->best_read= current_read_time - 0.001; + } + DBUG_EXECUTE("opt", print_plan(join, idx+1, + current_record_count, + read_time, + current_read_time, + "full_plan");); + } + restore_prev_nj_state(s); + } + } DBUG_VOID_RETURN; } +/* + TODO: this function is here only temporarily until 'greedy_search' is + tested and accepted. +*/ static void find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, double read_time) { - ha_rows rec; - double tmp; THD *thd= join->thd; - if (!rest_tables) { DBUG_PRINT("best",("read_time: %g record_count: %g",read_time, @@ -2852,7 +4861,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, { memcpy((gptr) join->best_positions,(gptr) join->positions, sizeof(POSITION)*idx); - join->best_read=read_time; + join->best_read= read_time - 0.001; } return; } @@ -2864,348 +4873,18 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, for (JOIN_TAB **pos=join->best_ref+idx ; (s=*pos) ; pos++) { table_map real_table_bit=s->table->map; - if ((rest_tables & real_table_bit) && !(rest_tables & s->dependent)) - { - double best,best_time,records; - best=best_time=records=DBL_MAX; - KEYUSE *best_key=0; - uint best_max_key_part=0; - my_bool found_constraint= 0; - - if (s->keyuse) - { /* Use key if possible */ - TABLE *table=s->table; - KEYUSE *keyuse,*start_key=0; - double best_records=DBL_MAX; - uint max_key_part=0; - - /* Test how we can use keys */ - rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE; // Assumed records/key - for (keyuse=s->keyuse ; keyuse->table == table ;) - { - key_part_map found_part=0; - table_map found_ref=0; - uint key=keyuse->key; - KEY *keyinfo=table->key_info+key; - bool ft_key=(keyuse->keypart == FT_KEYPART); - uint found_ref_or_null= 0; - - /* Calculate how many key segments of the current key we can use */ - start_key=keyuse; - do - { - uint keypart=keyuse->keypart; - table_map best_part_found_ref= 0; - double best_prev_record_reads= DBL_MAX; - do - { - if (!(rest_tables & keyuse->used_tables) && - !(found_ref_or_null & keyuse->optimize)) - { - found_part|=keyuse->keypart_map; - double tmp= prev_record_reads(join, - (found_ref | - keyuse->used_tables)); - if (tmp < best_prev_record_reads) - { - best_part_found_ref= keyuse->used_tables; - best_prev_record_reads= tmp; - } - if (rec > keyuse->ref_table_rows) - rec= keyuse->ref_table_rows; - /* - If there is one 'key_column IS NULL' expression, we can - use this ref_or_null optimisation of this field - */ - found_ref_or_null|= (keyuse->optimize & - KEY_OPTIMIZE_REF_OR_NULL); - } - keyuse++; - } while (keyuse->table == table && keyuse->key == key && - keyuse->keypart == keypart); - found_ref|= best_part_found_ref; - } while (keyuse->table == table && keyuse->key == key); - - /* - Assume that that each key matches a proportional part of table. - */ - if (!found_part && !ft_key) - continue; // Nothing usable found - if (rec < MATCHING_ROWS_IN_OTHER_TABLE) - rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables - - /* - ft-keys require special treatment - */ - if (ft_key) - { - /* - Really, there should be records=0.0 (yes!) - but 1.0 would be probably safer - */ - tmp=prev_record_reads(join,found_ref); - records=1.0; - } - else - { - found_constraint= 1; - /* - Check if we found full key - */ - if (found_part == PREV_BITS(uint,keyinfo->key_parts) && - !found_ref_or_null) - { /* use eq key */ - max_key_part= (uint) ~0; - if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY | - HA_END_SPACE_KEY)) == HA_NOSAME) - { - tmp=prev_record_reads(join,found_ref); - records=1.0; - } - else - { - if (!found_ref) - { // We found a const key - if (table->quick_keys.is_set(key)) - records= (double) table->quick_rows[key]; - else - { - /* quick_range couldn't use key! */ - records= (double) s->records/rec; - } - } - else - { - if (!(records=keyinfo->rec_per_key[keyinfo->key_parts-1])) - { // Prefere longer keys - records= - ((double) s->records / (double) rec * - (1.0 + - ((double) (table->max_key_length-keyinfo->key_length) / - (double) table->max_key_length))); - if (records < 2.0) - records=2.0; // Can't be as good as a unique - } - } - /* Limit the number of matched rows */ - tmp= records; - set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys.is_set(key)) - { - /* we can use only index tree */ - uint keys_per_block= table->file->block_size/2/ - (keyinfo->key_length+table->file->ref_length)+1; - tmp=record_count*(tmp+keys_per_block-1)/keys_per_block; - } - else - tmp=record_count*min(tmp,s->worst_seeks); - } - } - else - { - /* - Use as much key-parts as possible and a uniq key is better - than a not unique key - Set tmp to (previous record count) * (records / combination) - */ - if ((found_part & 1) && - (!(table->file->index_flags(key,0,0) & HA_ONLY_WHOLE_INDEX) || - found_part == PREV_BITS(uint,keyinfo->key_parts))) - { - max_key_part=max_part_bit(found_part); - /* - Check if quick_range could determinate how many rows we - will match - */ - if (table->quick_keys.is_set(key) && - table->quick_key_parts[key] == max_key_part) - tmp=records= (double) table->quick_rows[key]; - else - { - /* Check if we have statistic about the distribution */ - if ((records=keyinfo->rec_per_key[max_key_part-1])) - tmp=records; - else - { - /* - Assume that the first key part matches 1% of the file - and that the hole key matches 10 (duplicates) or 1 - (unique) records. - Assume also that more key matches proportionally more - records - This gives the formula: - records= (x * (b-a) + a*c-b)/(c-1) - - b = records matched by whole key - a = records matched by first key part (10% of all records?) - c = number of key parts in key - x = used key parts (1 <= x <= c) - */ - double rec_per_key; - rec_per_key= keyinfo->rec_per_key[keyinfo->key_parts-1] ? - (double) keyinfo->rec_per_key[keyinfo->key_parts-1] : - (double) s->records/rec+1; - if (!s->records) - tmp=0; - else if (rec_per_key/(double) s->records >= 0.01) - tmp=rec_per_key; - else - { - double a=s->records*0.01; - tmp=(max_key_part * (rec_per_key - a) + - a*keyinfo->key_parts - rec_per_key)/ - (keyinfo->key_parts-1); - set_if_bigger(tmp,1.0); - } - records=(ulong) tmp; - } - /* - If quick_select was used on a part of this key, we know - the maximum number of rows that the key can match. - */ - if (table->quick_keys.is_set(key) && - table->quick_key_parts[key] <= max_key_part && - records > (double) table->quick_rows[key]) - tmp= records= (double) table->quick_rows[key]; - else if (found_ref_or_null) - { - /* We need to do two key searches to find key */ - tmp*= 2.0; - records*= 2.0; - } - } - /* Limit the number of matched rows */ - set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); - if (table->used_keys.is_set(key)) - { - /* we can use only index tree */ - uint keys_per_block= table->file->block_size/2/ - (keyinfo->key_length+table->file->ref_length)+1; - tmp=record_count*(tmp+keys_per_block-1)/keys_per_block; - } - else - tmp=record_count*min(tmp,s->worst_seeks); - } - else - tmp=best_time; // Do nothing - } - } /* not ft_key */ - if (tmp < best_time - records/(double) TIME_FOR_COMPARE) - { - best_time=tmp + records/(double) TIME_FOR_COMPARE; - best=tmp; - best_records=records; - best_key=start_key; - best_max_key_part=max_key_part; - } - } - records=best_records; - } - + if ((rest_tables & real_table_bit) && !(rest_tables & s->dependent) && + (!idx|| !check_interleaving_with_nj(join->positions[idx-1].table, s))) + { + double records, best; + best_access_path(join, s, thd, rest_tables, idx, record_count, + read_time); + records= join->positions[idx].records_read; + best= join->positions[idx].read_time; /* - Don't test table scan if it can't be better. - Prefer key lookup if we would use the same key for scanning. - - Don't do a table scan on InnoDB tables, if we can read the used - parts of the row from any of the used index. - This is because table scans uses index and we would not win - anything by using a table scan. - */ - if ((records >= s->found_records || best > s->read_time) && - !(s->quick && best_key && s->quick->index == best_key->key && - best_max_key_part >= s->table->quick_key_parts[best_key->key]) && - !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && - ! s->table->used_keys.is_clear_all() && best_key) && - !(s->table->force_index && best_key)) - { // Check full join - ha_rows rnd_records= s->found_records; - /* - If there is a restriction on the table, assume that 25% of the - rows can be skipped on next part. - This is to force tables that this table depends on before this - table - */ - if (found_constraint) - rnd_records-= rnd_records/4; - - /* - Range optimizer never proposes a RANGE if it isn't better - than FULL: so if RANGE is present, it's always preferred to FULL. - Here we estimate its cost. - */ - if (s->quick) - { - /* - For each record we: - - read record range through 'quick' - - skip rows which does not satisfy WHERE constraints - */ - tmp= record_count * - (s->quick->read_time + - (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE); - } - else - { - /* Estimate cost of reading table. */ - tmp= s->table->file->scan_time(); - if (s->on_expr) // Can't use join cache - { - /* - For each record we have to: - - read the whole table record - - skip rows which does not satisfy join condition - */ - tmp= record_count * - (tmp + - (s->records - rnd_records)/(double) TIME_FOR_COMPARE); - } - else - { - /* We read the table as many times as join buffer becomes full. */ - tmp*= (1.0 + floor((double) cache_record_length(join,idx) * - record_count / - (double) thd->variables.join_buff_size)); - /* - We don't make full cartesian product between rows in the scanned - table and existing records because we skip all rows from the - scanned table, which does not satisfy join condition when - we read the table (see flush_cached_records for details). Here we - take into account cost to read and skip these records. - */ - tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; - } - } - - /* - We estimate the cost of evaluating WHERE clause for found records - as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus - tmp give us total cost of using TABLE SCAN - */ - if (best == DBL_MAX || - (tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records < - best + record_count/(double) TIME_FOR_COMPARE*records)) - { - /* - If the table has a range (s->quick is set) make_join_select() - will ensure that this will be used - */ - best=tmp; - records= rows2double(rnd_records); - best_key=0; - } - } - join->positions[idx].records_read= records; - join->positions[idx].key=best_key; - join->positions[idx].table= s; - if (!best_key && idx == join->const_tables && - s->table == join->sort_by_table && - join->unit->select_limit_cnt >= records) - join->sort_by_table= (TABLE*) 1; // Must use temporary table - - /* Go to the next level only if there hasn't been a better key on this level! This will cut down the search for a lot simple cases! - */ + */ double current_record_count=record_count*records; double current_read_time=read_time+best; if (best_record_count > current_record_count || @@ -3226,6 +4905,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, return; swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); } + restore_prev_nj_state(s); if (join->select_options & SELECT_STRAIGHT_JOIN) break; // Don't test all combinations } @@ -3257,13 +4937,13 @@ static void calc_used_field_length(THD *thd, JOIN_TAB *join_tab) } } if (null_fields) - rec_length+=(join_tab->table->null_fields+7)/8; + rec_length+=(join_tab->table->s->null_fields+7)/8; if (join_tab->table->maybe_null) rec_length+=sizeof(my_bool); if (blobs) { uint blob_length=(uint) (join_tab->table->file->mean_rec_length- - (join_tab->table->reclength- rec_length)); + (join_tab->table->s->reclength- rec_length)); rec_length+=(uint) max(4,blob_length); } join_tab->used_fields=fields; @@ -3322,11 +5002,12 @@ get_best_combination(JOIN *join) KEYUSE *keyuse; uint table_count; THD *thd=join->thd; + DBUG_ENTER("get_best_combination"); table_count=join->tables; if (!(join->join_tab=join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*table_count))) - return TRUE; + DBUG_RETURN(TRUE); join->full_join=0; @@ -3338,8 +5019,9 @@ get_best_combination(JOIN *join) form=join->table[tablenr]=j->table; used_tables|= form->map; form->reginfo.join_tab=j; - if (!j->on_expr) + if (!*j->on_expr_ref) form->reginfo.not_exists_optimize=0; // Only with LEFT JOIN + DBUG_PRINT("info",("type: %d", j->type)); if (j->type == JT_CONST) continue; // Handled in make_join_stat.. @@ -3355,13 +5037,13 @@ get_best_combination(JOIN *join) join->full_join=1; } else if (create_ref_for_key(join, j, keyuse, used_tables)) - return TRUE; // Something went wrong + DBUG_RETURN(TRUE); // Something went wrong } for (i=0 ; i < table_count ; i++) join->map2table[join->join_tab[i].table->tablenr]=join->join_tab+i; update_depend_map(join); - return 0; + DBUG_RETURN(0); } @@ -3374,6 +5056,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, uint keyparts,length,key; TABLE *table; KEY *keyinfo; + DBUG_ENTER("create_ref_for_key"); /* Use best key from find_best */ table=j->table; @@ -3421,9 +5104,10 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, if (!(j->ref.key_buff= (byte*) thd->calloc(ALIGN_SIZE(length)*2)) || !(j->ref.key_copy= (store_key**) thd->alloc((sizeof(store_key*) * (keyparts+1)))) || - !(j->ref.items= (Item**) thd->alloc(sizeof(Item*)*keyparts))) + !(j->ref.items= (Item**) thd->alloc(sizeof(Item*)*keyparts)) || + !(j->ref.cond_guards= (bool**) thd->alloc(sizeof(uint*)*keyparts))) { - return TRUE; + DBUG_RETURN(TRUE); } j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length); j->ref.key_err=1; @@ -3436,8 +5120,10 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, if (ftkey) { j->ref.items[0]=((Item_func*)(keyuse->val))->key_item(); + /* Predicates pushed down into subquery can't be used FT access */ + j->ref.cond_guards[0]= NULL; if (keyuse->used_tables) - return TRUE; // not supported yet. SerG + DBUG_RETURN(TRUE); // not supported yet. SerG j->type=JT_FT; } @@ -3452,6 +5138,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, uint maybe_null= test(keyinfo->key_part[i].null_bit); j->ref.items[i]=keyuse->val; // Save for cond removal + j->ref.cond_guards[i]= keyuse->cond_guard; if (keyuse->null_rejecting) j->ref.null_rejecting |= 1 << i; keyuse_uses_no_tables= keyuse_uses_no_tables && !keyuse->used_tables; @@ -3463,7 +5150,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, maybe_null ? (char*) key_buff : 0, keyinfo->key_part[i].length, keyuse->val); if (thd->is_fatal_error) - return TRUE; + DBUG_RETURN(TRUE); tmp.copy(); } else @@ -3472,7 +5159,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, &keyinfo->key_part[i], (char*) key_buff,maybe_null); /* - Remeber if we are going to use REF_OR_NULL + Remember if we are going to use REF_OR_NULL But only if field _really_ can be null i.e. we force JT_REF instead of JT_REF_OR_NULL in case if field can't be null */ @@ -3483,7 +5170,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, } /* not ftkey */ *ref_key=0; // end_marker if (j->type == JT_FT) - return 0; + DBUG_RETURN(0); if (j->type == JT_CONST) j->table->const_table= 1; else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY | @@ -3507,7 +5194,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, } else j->type=JT_EQ_REF; - return 0; + DBUG_RETURN(0); } @@ -3525,13 +5212,15 @@ get_store_key(THD *thd, KEYUSE *keyuse, table_map used_tables, key_part->length, keyuse->val); } - else if (keyuse->val->type() == Item::FIELD_ITEM) + else if (keyuse->val->type() == Item::FIELD_ITEM || + (keyuse->val->type() == Item::REF_ITEM && + ((Item_ref*)keyuse->val)->ref_type() == Item_ref::OUTER_REF) ) return new store_key_field(thd, key_part->field, key_buff + maybe_null, maybe_null ? key_buff : 0, key_part->length, - ((Item_field*) keyuse->val)->field, + ((Item_field*) keyuse->val->real_item())->field, keyuse->val->full_name()); return new store_key_item(thd, key_part->field, @@ -3550,7 +5239,7 @@ bool store_val_in_field(Field *field, Item *item, enum_check_fields check_flag) { bool error; - THD *thd=current_thd; + THD *thd= field->table->in_use; ha_rows cuted_fields=thd->cuted_fields; /* we should restore old value of count_cuted_fields because @@ -3570,10 +5259,30 @@ make_simple_join(JOIN *join,TABLE *tmp_table) { TABLE **tableptr; JOIN_TAB *join_tab; + DBUG_ENTER("make_simple_join"); + + /* + Reuse TABLE * and JOIN_TAB if already allocated by a previous call + to this function through JOIN::exec (may happen for sub-queries). + */ + if (!join->table_reexec) + { + if (!(join->table_reexec= (TABLE**) join->thd->alloc(sizeof(TABLE*)))) + DBUG_RETURN(TRUE); /* purecov: inspected */ + if (join->tmp_join) + join->tmp_join->table_reexec= join->table_reexec; + } + if (!join->join_tab_reexec) + { + if (!(join->join_tab_reexec= + (JOIN_TAB*) join->thd->alloc(sizeof(JOIN_TAB)))) + DBUG_RETURN(TRUE); /* purecov: inspected */ + if (join->tmp_join) + join->tmp_join->join_tab_reexec= join->join_tab_reexec; + } + tableptr= join->table_reexec; + join_tab= join->join_tab_reexec; - if (!(tableptr=(TABLE**) join->thd->alloc(sizeof(TABLE*))) || - !(join_tab=(JOIN_TAB*) join->thd->alloc(sizeof(JOIN_TAB)))) - return TRUE; join->join_tab=join_tab; join->table=tableptr; tableptr[0]=tmp_table; join->tables=1; @@ -3597,15 +5306,18 @@ make_simple_join(JOIN *join,TABLE *tmp_table) join_tab->keys.init(); join_tab->keys.set_all(); /* test everything in quick */ join_tab->info=0; - join_tab->on_expr=0; + join_tab->on_expr_ref=0; + join_tab->last_inner= 0; + join_tab->first_unmatched= 0; join_tab->ref.key = -1; join_tab->not_used_in_distinct=0; join_tab->read_first_record= join_init_read_record; join_tab->join=join; + join_tab->ref.key_parts= 0; bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record)); tmp_table->status=0; tmp_table->null_row=0; - return FALSE; + DBUG_RETURN(FALSE); } @@ -3711,10 +5423,10 @@ static void add_not_null_conds(JOIN *join) when it is called from make_join_select after this function is called. */ - if (notnull->fix_fields(join->thd, join->tables_list, ¬null)) + if (notnull->fix_fields(join->thd, ¬null)) DBUG_VOID_RETURN; DBUG_EXECUTE("where",print_where(notnull, - referred_tab->table->table_name);); + referred_tab->table->alias);); add_cond_and_fix(&referred_tab->select_cond, notnull); } } @@ -3723,28 +5435,189 @@ static void add_not_null_conds(JOIN *join) DBUG_VOID_RETURN; } +/* + Build a predicate guarded by match variables for embedding outer joins + + SYNOPSIS + add_found_match_trig_cond() + tab the first inner table for most nested outer join + cond the predicate to be guarded + root_tab the first inner table to stop + + DESCRIPTION + The function recursively adds guards for predicate cond + assending from tab to the first inner table next embedding + nested outer join and so on until it reaches root_tab + (root_tab can be 0). + + RETURN VALUE + pointer to the guarded predicate, if success + 0, otherwise +*/ + +static COND* +add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab) +{ + COND *tmp; + if (tab == root_tab || !cond) + return cond; + if ((tmp= add_found_match_trig_cond(tab->first_upper, cond, root_tab))) + { + tmp= new Item_func_trig_cond(tmp, &tab->found); + } + if (tmp) + { + tmp->quick_fix_field(); + tmp->update_used_tables(); + } + return tmp; +} + + +/* + Fill in outer join related info for the execution plan structure + + SYNOPSIS + make_outerjoin_info() + join - reference to the info fully describing the query + + DESCRIPTION + For each outer join operation left after simplification of the + original query the function set up the following pointers in the linear + structure join->join_tab representing the selected execution plan. + The first inner table t0 for the operation is set to refer to the last + inner table tk through the field t0->last_inner. + Any inner table ti for the operation are set to refer to the first + inner table ti->first_inner. + The first inner table t0 for the operation is set to refer to the + first inner table of the embedding outer join operation, if there is any, + through the field t0->first_upper. + The on expression for the outer join operation is attached to the + corresponding first inner table through the field t0->on_expr_ref. + Here ti are structures of the JOIN_TAB type. + + EXAMPLE + For the query: + SELECT * FROM t1 + LEFT JOIN + (t2, t3 LEFT JOIN t4 ON t3.a=t4.a) + ON (t1.a=t2.a AND t1.b=t3.b) + WHERE t1.c > 5, + given the execution plan with the table order t1,t2,t3,t4 + is selected, the following references will be set; + t4->last_inner=[t4], t4->first_inner=[t4], t4->first_upper=[t2] + t2->last_inner=[t4], t2->first_inner=t3->first_inner=[t2], + on expression (t1.a=t2.a AND t1.b=t3.b) will be attached to + *t2->on_expr_ref, while t3.a=t4.a will be attached to *t4->on_expr_ref. + + NOTES + The function assumes that the simplification procedure has been + already applied to the join query (see simplify_joins). + This function can be called only after the execution plan + has been chosen. +*/ + +static void +make_outerjoin_info(JOIN *join) +{ + DBUG_ENTER("make_outerjoin_info"); + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + TABLE *table=tab->table; + TABLE_LIST *tbl= table->pos_in_table_list; + TABLE_LIST *embedding= tbl->embedding; + + if (tbl->outer_join) + { + /* + Table tab is the only one inner table for outer join. + (Like table t4 for the table reference t3 LEFT JOIN t4 ON t3.a=t4.a + is in the query above.) + */ + tab->last_inner= tab->first_inner= tab; + tab->on_expr_ref= &tbl->on_expr; + tab->cond_equal= tbl->cond_equal; + if (embedding) + tab->first_upper= embedding->nested_join->first_nested; + } + for ( ; embedding ; embedding= embedding->embedding) + { + NESTED_JOIN *nested_join= embedding->nested_join; + if (!nested_join->counter) + { + /* + Table tab is the first inner table for nested_join. + Save reference to it in the nested join structure. + */ + nested_join->first_nested= tab; + tab->on_expr_ref= &embedding->on_expr; + tab->cond_equal= tbl->cond_equal; + if (embedding->embedding) + tab->first_upper= embedding->embedding->nested_join->first_nested; + } + if (!tab->first_inner) + tab->first_inner= nested_join->first_nested; + if (++nested_join->counter < nested_join->join_list.elements) + break; + /* Table tab is the last inner table for nested join. */ + nested_join->first_nested->last_inner= tab; + } + } + DBUG_VOID_RETURN; +} + + static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { + THD *thd= join->thd; DBUG_ENTER("make_join_select"); if (select) { add_not_null_conds(join); table_map used_tables; - if (join->tables > 1) - cond->update_used_tables(); // Tablenr may have changed - if (join->const_tables == join->tables && - join->thd->lex->current_select->master_unit() == - &join->thd->lex->unit) // not upper level SELECT - join->const_table_map|=RAND_TABLE_BIT; - { // Check const tables - COND *const_cond= - make_cond_for_table(cond,join->const_table_map,(table_map) 0); - DBUG_EXECUTE("where",print_where(const_cond,"constants");); - if (const_cond && !const_cond->val_int()) - { - DBUG_PRINT("info",("Found impossible WHERE condition")); - DBUG_RETURN(1); // Impossible const condition + if (cond) /* Because of QUICK_GROUP_MIN_MAX_SELECT */ + { /* there may be a select without a cond. */ + if (join->tables > 1) + cond->update_used_tables(); // Tablenr may have changed + if (join->const_tables == join->tables && + thd->lex->current_select->master_unit() == + &thd->lex->unit) // not upper level SELECT + join->const_table_map|=RAND_TABLE_BIT; + { // Check const tables + COND *const_cond= + make_cond_for_table(cond, + join->const_table_map, + (table_map) 0); + DBUG_EXECUTE("where",print_where(const_cond,"constants");); + for (JOIN_TAB *tab= join->join_tab+join->const_tables; + tab < join->join_tab+join->tables ; tab++) + { + if (*tab->on_expr_ref) + { + JOIN_TAB *cond_tab= tab->first_inner; + COND *tmp= make_cond_for_table(*tab->on_expr_ref, + join->const_table_map, + ( table_map) 0); + if (!tmp) + continue; + tmp= new Item_func_trig_cond(tmp, &cond_tab->not_null_compl); + if (!tmp) + DBUG_RETURN(1); + tmp->quick_fix_field(); + cond_tab->select_cond= !cond_tab->select_cond ? tmp : + new Item_cond_and(cond_tab->select_cond,tmp); + if (!cond_tab->select_cond) + DBUG_RETURN(1); + cond_tab->select_cond->quick_fix_field(); + } + } + if (const_cond && !const_cond->val_int()) + { + DBUG_PRINT("info",("Found impossible WHERE condition")); + DBUG_RETURN(1); // Impossible const condition + } } } used_tables=((select->const_tables=join->const_table_map) | @@ -3752,14 +5625,17 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) for (uint i=join->const_tables ; i < join->tables ; i++) { JOIN_TAB *tab=join->join_tab+i; + JOIN_TAB *first_inner_tab= tab->first_inner; table_map current_map= tab->table->map; + bool use_quick_range=0; + COND *tmp; + /* Following force including random expression in last table condition. It solve problem with select like SELECT * FROM t1 WHERE rand() > 0.5 */ if (i == join->tables-1) current_map|= OUTER_REF_TABLE_BIT | RAND_TABLE_BIT; - bool use_quick_range=0; used_tables|=current_map; if (tab->type == JT_REF && tab->quick && @@ -3775,32 +5651,83 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) join->best_positions[i].records_read= rows2double(tab->quick->records); } - COND *tmp=make_cond_for_table(cond,used_tables,current_map); - if (!tmp && tab->quick) + tmp= NULL; + if (cond) + tmp= make_cond_for_table(cond,used_tables,current_map); + if (cond && !tmp && tab->quick) { // Outer join - /* - Hack to handle the case where we only refer to a table - in the ON part of an OUTER JOIN. - */ - tmp=new Item_int((longlong) 1,1); // Always true + if (tab->type != JT_ALL) + { + /* + Don't use the quick method + We come here in the case where we have 'key=constant' and + the test is removed by make_cond_for_table() + */ + delete tab->quick; + tab->quick= 0; + } + else + { + /* + Hack to handle the case where we only refer to a table + in the ON part of an OUTER JOIN. In this case we want the code + below to check if we should use 'quick' instead. + */ + DBUG_PRINT("info", ("Item_int")); + tmp= new Item_int((longlong) 1,1); // Always true + DBUG_PRINT("info", ("Item_int 0x%lx", (ulong)tmp)); + } + } - if (tmp) + if (tmp || !cond) { + DBUG_EXECUTE("where",print_where(tmp,tab->table->alias);); SQL_SELECT *sel=tab->select=(SQL_SELECT*) - join->thd->memdup((gptr) select, sizeof(SQL_SELECT)); + thd->memdup((gptr) select, sizeof(SQL_SELECT)); if (!sel) DBUG_RETURN(1); // End of memory - add_cond_and_fix(&tab->select_cond, tmp); - sel->cond= tab->select_cond; + /* + If tab is an inner table of an outer join operation, + add a match guard to the pushed down predicate. + The guard will turn the predicate on only after + the first match for outer tables is encountered. + */ + if (cond) + { + /* + Because of QUICK_GROUP_MIN_MAX_SELECT there may be a select without + a cond, so neutralize the hack above. + */ + if (!(tmp= add_found_match_trig_cond(first_inner_tab, tmp, 0))) + DBUG_RETURN(1); + tab->select_cond=sel->cond=tmp; + /* Push condition to storage engine if this is enabled + and the condition is not guarded */ + tab->table->file->pushed_cond= NULL; + if (thd->variables.engine_condition_pushdown) + { + COND *push_cond= + make_cond_for_table(tmp, current_map, current_map); + if (push_cond) + { + /* Push condition to handler */ + if (!tab->table->file->cond_push(push_cond)) + tab->table->file->pushed_cond= push_cond; + } + } + } + else + tab->select_cond= sel->cond= NULL; + sel->head=tab->table; - DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); + DBUG_EXECUTE("where",print_where(tmp,tab->table->alias);); if (tab->quick) { /* Use quick key read if it's a constant and it's not used with key reading */ if (tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF && tab->type != JT_FT && (tab->type != JT_REF || - (uint) tab->ref.key == tab->quick->index)) + (uint) tab->ref.key == tab->quick->index)) { sel->quick=tab->quick; // Use value from get_quick_... sel->quick_keys.clear_all(); @@ -3831,7 +5758,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) the index if we are using limit and this is the first table */ - if ((!tab->keys.is_subset(tab->const_keys) && i > 0) || + if (cond && + (!tab->keys.is_subset(tab->const_keys) && i > 0) || (!tab->const_keys.is_clear_all() && i == join->const_tables && join->unit->select_limit_cnt < join->best_positions[i].records_read && @@ -3839,7 +5767,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { /* Join with outer join condition */ COND *orig_cond=sel->cond; - sel->cond= and_conds(sel->cond, tab->on_expr); + sel->cond= and_conds(sel->cond, *tab->on_expr_ref); /* We can't call sel->cond->fix_fields, @@ -3851,7 +5779,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) if (sel->cond && !sel->cond->fixed) sel->cond->quick_fix_field(); - if (sel->test_quick_select(join->thd, tab->keys, + if (sel->test_quick_select(thd, tab->keys, used_tables & ~ current_map, (join->select_options & OPTION_FOUND_ROWS ? @@ -3863,8 +5791,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) we have to check isn't it only "impossible ON" instead */ sel->cond=orig_cond; - if (!tab->on_expr || - sel->test_quick_select(join->thd, tab->keys, + if (!*tab->on_expr_ref || + sel->test_quick_select(thd, tab->keys, used_tables & ~ current_map, (join->select_options & OPTION_FOUND_ROWS ? @@ -3898,30 +5826,94 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } if (i != join->const_tables && tab->use_quick != 2) { /* Read with cache */ - if ((tmp=make_cond_for_table(cond, + if (cond && + (tmp=make_cond_for_table(cond, join->const_table_map | current_map, current_map))) { DBUG_EXECUTE("where",print_where(tmp,"cache");); tab->cache.select=(SQL_SELECT*) - join->thd->memdup((gptr) sel, sizeof(SQL_SELECT)); + thd->memdup((gptr) sel, sizeof(SQL_SELECT)); tab->cache.select->cond=tmp; tab->cache.select->read_tables=join->const_table_map; } } } } + + /* + Push down all predicates from on expressions. + Each of these predicated are guarded by a variable + that turns if off just before null complemented row for + outer joins is formed. Thus, the predicates from an + 'on expression' are guaranteed not to be checked for + the null complemented row. + */ + JOIN_TAB *last_tab= tab; + while (first_inner_tab && first_inner_tab->last_inner == last_tab) + { + /* + Table tab is the last inner table of an outer join. + An on expression is always attached to it. + */ + COND *on_expr= *first_inner_tab->on_expr_ref; + + table_map used_tables2= (join->const_table_map | + OUTER_REF_TABLE_BIT | RAND_TABLE_BIT); + for (tab= join->join_tab+join->const_tables; tab <= last_tab ; tab++) + { + current_map= tab->table->map; + used_tables2|= current_map; + COND *tmp_cond= make_cond_for_table(on_expr, used_tables2, + current_map); + if (tmp_cond) + { + JOIN_TAB *cond_tab= tab < first_inner_tab ? first_inner_tab : tab; + /* + First add the guards for match variables of + all embedding outer join operations. + */ + if (!(tmp_cond= add_found_match_trig_cond(cond_tab->first_inner, + tmp_cond, + first_inner_tab))) + DBUG_RETURN(1); + /* + Now add the guard turning the predicate off for + the null complemented row. + */ + DBUG_PRINT("info", ("Item_func_trig_cond")); + tmp_cond= new Item_func_trig_cond(tmp_cond, + &first_inner_tab-> + not_null_compl); + DBUG_PRINT("info", ("Item_func_trig_cond 0x%lx", + (ulong) tmp_cond)); + if (tmp_cond) + tmp_cond->quick_fix_field(); + /* Add the predicate to other pushed down predicates */ + DBUG_PRINT("info", ("Item_cond_and")); + cond_tab->select_cond= !cond_tab->select_cond ? tmp_cond : + new Item_cond_and(cond_tab->select_cond, + tmp_cond); + DBUG_PRINT("info", ("Item_cond_and 0x%lx", + (ulong)cond_tab->select_cond)); + if (!cond_tab->select_cond) + DBUG_RETURN(1); + cond_tab->select_cond->quick_fix_field(); + } + } + first_inner_tab= first_inner_tab->first_upper; + } } } DBUG_RETURN(0); } - static void -make_join_readinfo(JOIN *join, uint options) +make_join_readinfo(JOIN *join, ulonglong options) { uint i; + bool statistics= test(!(join->select_options & SELECT_DESCRIBE)); bool ordered_set= 0; DBUG_ENTER("make_join_readinfo"); @@ -4023,7 +6015,7 @@ make_join_readinfo(JOIN *join, uint options) */ table->status=STATUS_NO_RECORD; if (i != join->const_tables && !(options & SELECT_NO_JOIN_CACHE) && - tab->use_quick != 2 && !tab->on_expr && !ordered_set) + tab->use_quick != 2 && !tab->first_inner && !ordered_set) { if ((options & SELECT_DESCRIBE) || !join_init_cache(join->thd,join->join_tab+join->const_tables, @@ -4038,7 +6030,8 @@ make_join_readinfo(JOIN *join, uint options) join->thd->server_status|=SERVER_QUERY_NO_GOOD_INDEX_USED; tab->read_first_record= join_init_quick_read_record; if (statistics) - statistic_increment(select_range_check_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_range_check_count, + &LOCK_status); } else { @@ -4048,13 +6041,15 @@ make_join_readinfo(JOIN *join, uint options) if (tab->select && tab->select->quick) { if (statistics) - statistic_increment(select_range_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_range_count, + &LOCK_status); } else { join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) - statistic_increment(select_scan_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_scan_count, + &LOCK_status); } } else @@ -4062,18 +6057,21 @@ make_join_readinfo(JOIN *join, uint options) if (tab->select && tab->select->quick) { if (statistics) - statistic_increment(select_full_range_join_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_full_range_join_count, + &LOCK_status); } else { join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) - statistic_increment(select_full_join_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_full_join_count, + &LOCK_status); } } if (!table->no_keyread) { if (tab->select && tab->select->quick && + tab->select->quick->index != MAX_KEY && //not index_merge table->used_keys.is_set(tab->select->quick->index)) { table->key_read=1; @@ -4126,7 +6124,8 @@ bool error_if_full_join(JOIN *join) { if (tab->type == JT_ALL && (!tab->select || !tab->select->quick)) { - my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0)); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); return(1); } } @@ -4168,28 +6167,124 @@ void JOIN_TAB::cleanup() /* + Partially cleanup JOIN after it has executed: close index or rnd read + (table cursors), free quick selects. + + DESCRIPTION + This function is called in the end of execution of a JOIN, before the used + tables are unlocked and closed. + + For a join that is resolved using a temporary table, the first sweep is + performed against actual tables and an intermediate result is inserted + into the temprorary table. + The last sweep is performed against the temporary table. Therefore, + the base tables and associated buffers used to fill the temporary table + are no longer needed, and this function is called to free them. + + For a join that is performed without a temporary table, this function + is called after all rows are sent, but before EOF packet is sent. + + For a simple SELECT with no subqueries this function performs a full + cleanup of the JOIN and calls mysql_unlock_read_tables to free used base + tables. + + If a JOIN is executed for a subquery or if it has a subquery, we can't + do the full cleanup and need to do a partial cleanup only. + o If a JOIN is not the top level join, we must not unlock the tables + because the outer select may not have been evaluated yet, and we + can't unlock only selected tables of a query. + + o Additionally, if this JOIN corresponds to a correlated subquery, we + should not free quick selects and join buffers because they will be + needed for the next execution of the correlated subquery. + + o However, if this is a JOIN for a [sub]select, which is not + a correlated subquery itself, but has subqueries, we can free it + fully and also free JOINs of all its subqueries. The exception + is a subquery in SELECT list, e.g: + SELECT a, (select max(b) from t1) group by c + This subquery will not be evaluated at first sweep and its value will + not be inserted into the temporary table. Instead, it's evaluated + when selecting from the temporary table. Therefore, it can't be freed + here even though it's not correlated. +*/ + +void JOIN::join_free() +{ + SELECT_LEX_UNIT *tmp_unit; + SELECT_LEX *sl; + /* + Optimization: if not EXPLAIN and we are done with the JOIN, + free all tables. + */ + bool full= (!select_lex->uncacheable && !thd->lex->describe); + bool can_unlock= full; + DBUG_ENTER("JOIN::join_free"); + + cleanup(full); + + for (tmp_unit= select_lex->first_inner_unit(); + tmp_unit; + tmp_unit= tmp_unit->next_unit()) + for (sl= tmp_unit->first_select(); sl; sl= sl->next_select()) + { + Item_subselect *subselect= sl->master_unit()->item; + bool full_local= full && (!subselect || subselect->is_evaluated()); + /* + If this join is evaluated, we can fully clean it up and clean up all + its underlying joins even if they are correlated -- they will not be + used any more anyway. + If this join is not yet evaluated, we still must clean it up to + close its table cursors -- it may never get evaluated, as in case of + ... HAVING FALSE OR a IN (SELECT ...)) + but all table cursors must be closed before the unlock. + */ + sl->cleanup_all_joins(full_local); + /* Can't unlock if at least one JOIN is still needed */ + can_unlock= can_unlock && full_local; + } + + /* + We are not using tables anymore + Unlock all tables. We may be in an INSERT .... SELECT statement. + */ + if (can_unlock && lock && thd->lock && + !(select_options & SELECT_NO_UNLOCK) && + !select_lex->subquery_in_having && + (select_lex == (thd->lex->unit.fake_select_lex ? + thd->lex->unit.fake_select_lex : &thd->lex->select_lex))) + { + /* + TODO: unlock tables even if the join isn't top level select in the + tree. + */ + mysql_unlock_read_tables(thd, lock); // Don't free join->lock + lock= 0; + } + + DBUG_VOID_RETURN; +} + + +/* Free resources of given join SYNOPSIS - JOIN::join_free() + JOIN::cleanup() fill - true if we should free all resources, call with full==1 should be last, before it this function can be called with full==0 NOTE: with subquery this function definitely will be called several times, but even for simple query it can be called several times. */ -void -JOIN::join_free(bool full) -{ - JOIN_TAB *tab,*end; - DBUG_ENTER("JOIN::join_free"); - full= full || (!select_lex->uncacheable && - !thd->lex->subqueries && - !thd->lex->describe); // do not cleanup too early on EXPLAIN +void JOIN::cleanup(bool full) +{ + DBUG_ENTER("JOIN::cleanup"); if (table) { + JOIN_TAB *tab,*end; /* Only a sorted table may be cached. This sorted table is always the first non const table in join->table @@ -4197,17 +6292,7 @@ JOIN::join_free(bool full) if (tables > const_tables) // Test for not-const tables { free_io_cache(table[const_tables]); - filesort_free_buffers(table[const_tables]); - } - - for (SELECT_LEX_UNIT *unit= select_lex->first_inner_unit(); unit; - unit= unit->next_unit()) - { - JOIN *join; - for (SELECT_LEX *sl= unit->first_select_in_union(); sl; - sl= sl->next_select()) - if ((join= sl->join)) - join->join_free(full); + filesort_free_buffers(table[const_tables],full); } if (full) @@ -4226,25 +6311,14 @@ JOIN::join_free(bool full) } } } - /* We are not using tables anymore Unlock all tables. We may be in an INSERT .... SELECT statement. */ - if (full && lock && thd->lock && !(select_options & SELECT_NO_UNLOCK) && - !select_lex->subquery_in_having) - { - // TODO: unlock tables even if the join isn't top level select in the tree - if (select_lex == (thd->lex->unit.fake_select_lex ? - thd->lex->unit.fake_select_lex : &thd->lex->select_lex)) - { - mysql_unlock_read_tables(thd, lock); // Don't free join->lock - lock=0; - } - } - if (full) { + if (tmp_join) + tmp_table_param.copy_field= 0; group_fields.delete_elements(); /* We can't call delete_elements() on copy_funcs as this will cause @@ -4293,7 +6367,8 @@ eq_ref_table(JOIN *join, ORDER *start_order, JOIN_TAB *tab) if (tab->cached_eq_ref_table) // If cached return tab->eq_ref_table; tab->cached_eq_ref_table=1; - if (tab->type == JT_CONST) // We can skip const tables + /* We can skip const tables only if not an outer table */ + if (tab->type == JT_CONST && !tab->first_inner) return (tab->eq_ref_table=1); /* purecov: inspected */ if (tab->type != JT_EQ_REF || tab->table->maybe_null) return (tab->eq_ref_table=0); // We must use this @@ -4389,7 +6464,8 @@ static void update_depend_map(JOIN *join, ORDER *order) order->item[0]->update_used_tables(); order->depend_map=depend_map=order->item[0]->used_tables(); // Not item_sum(), RAND() and no reference to table outside of sub select - if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT))) + if (!(order->depend_map & (OUTER_REF_TABLE_BIT | RAND_TABLE_BIT)) + && !order->item[0]->with_sum_func) { for (JOIN_TAB **tab=join->map2table; depend_map ; @@ -4438,7 +6514,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, DBUG_ENTER("remove_const"); prev_ptr= &first_order; - *simple_order= join->join_tab[join->const_tables].on_expr ? 0 : 1; + *simple_order= *join->join_tab[join->const_tables].on_expr_ref ? 0 : 1; /* NOTE: A variable of not_const_tables ^ first_table; breaks gcc 2.7 */ @@ -4494,9 +6570,8 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, static int return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables, - List<Item> &fields, bool send_row, uint select_options, - const char *info, Item *having, Procedure *procedure, - SELECT_LEX_UNIT *unit) + List<Item> &fields, bool send_row, ulonglong select_options, + const char *info, Item *having) { DBUG_ENTER("return_zero_rows"); @@ -4506,16 +6581,17 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables, DBUG_RETURN(0); } - join->join_free(0); + join->join_free(); if (send_row) { - for (TABLE_LIST *table=tables; table ; table=table->next) + for (TABLE_LIST *table= tables; table; table= table->next_leaf) mark_as_null_row(table->table); // All fields are NULL if (having && having->val_int() == 0) send_row=0; } - if (!(result->send_fields(fields,1))) + if (!(result->send_fields(fields, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))) { if (send_row) { @@ -4556,22 +6632,1072 @@ static void clear_tables(JOIN *join) class COND_CMP :public ilink { public: - static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } static void operator delete(void *ptr __attribute__((unused)), - size_t size __attribute__((unused))) {} /*lint -e715 */ + size_t size __attribute__((unused))) + { TRASH(ptr, size); } Item *and_level; Item_func *cmp_func; COND_CMP(Item *a,Item_func *b) :and_level(a),cmp_func(b) {} }; -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class I_List<COND_CMP>; template class I_List_iterator<COND_CMP>; template class List<Item_func_match>; template class List_iterator<Item_func_match>; #endif + +/* + Find the multiple equality predicate containing a field + + SYNOPSIS + find_item_equal() + cond_equal multiple equalities to search in + field field to look for + inherited_fl :out set up to TRUE if multiple equality is found + on upper levels (not on current level of cond_equal) + + DESCRIPTION + The function retrieves the multiple equalities accessed through + the con_equal structure from current level and up looking for + an equality containing field. It stops retrieval as soon as the equality + is found and set up inherited_fl to TRUE if it's found on upper levels. + + RETURN + Item_equal for the found multiple equality predicate if a success; + NULL - otherwise. +*/ + +Item_equal *find_item_equal(COND_EQUAL *cond_equal, Field *field, + bool *inherited_fl) +{ + Item_equal *item= 0; + bool in_upper_level= FALSE; + while (cond_equal) + { + List_iterator_fast<Item_equal> li(cond_equal->current_level); + while ((item= li++)) + { + if (item->contains(field)) + goto finish; + } + in_upper_level= TRUE; + cond_equal= cond_equal->upper_levels; + } + in_upper_level= FALSE; +finish: + *inherited_fl= in_upper_level; + return item; +} + + +/* + Check whether an equality can be used to build multiple equalities + + SYNOPSIS + check_simple_equality() + left_item left term of the quality to be checked + right_item right term of the equality to be checked + item equality item if the equality originates from a condition + predicate, 0 if the equality is the result of row elimination + cond_equal multiple equalities that must hold together with the equality + + DESCRIPTION + This function first checks whether the equality (left_item=right_item) + is a simple equality i.e. the one that equates a field with another field + or a constant (field=field_item or field=const_item). + If this is the case the function looks for a multiple equality + in the lists referenced directly or indirectly by cond_equal inferring + the given simple equality. If it doesn't find any, it builds a multiple + equality that covers the predicate, i.e. the predicate can be inferred + from this multiple equality. + The built multiple equality could be obtained in such a way: + create a binary multiple equality equivalent to the predicate, then + merge it, if possible, with one of old multiple equalities. + This guarantees that the set of multiple equalities covering equality + predicates will be minimal. + + EXAMPLE + For the where condition + WHERE a=b AND b=c AND + (b=2 OR f=e) + the check_equality will be called for the following equality + predicates a=b, b=c, b=2 and f=e. + For a=b it will be called with *cond_equal=(0,[]) and will transform + *cond_equal into (0,[Item_equal(a,b)]). + For b=c it will be called with *cond_equal=(0,[Item_equal(a,b)]) + and will transform *cond_equal into CE=(0,[Item_equal(a,b,c)]). + For b=2 it will be called with *cond_equal=(ptr(CE),[]) + and will transform *cond_equal into (ptr(CE),[Item_equal(2,a,b,c)]). + For f=e it will be called with *cond_equal=(ptr(CE), []) + and will transform *cond_equal into (ptr(CE),[Item_equal(f,e)]). + + NOTES + Now only fields that have the same type definitions (verified by + the Field::eq_def method) are placed to the same multiple equalities. + Because of this some equality predicates are not eliminated and + can be used in the constant propagation procedure. + We could weeken the equlity test as soon as at least one of the + equal fields is to be equal to a constant. It would require a + more complicated implementation: we would have to store, in + general case, its own constant for each fields from the multiple + equality. But at the same time it would allow us to get rid + of constant propagation completely: it would be done by the call + to build_equal_items_for_cond. + + IMPLEMENTATION + The implementation does not follow exactly the above rules to + build a new multiple equality for the equality predicate. + If it processes the equality of the form field1=field2, it + looks for multiple equalities me1 containig field1 and me2 containing + field2. If only one of them is found the fuction expands it with + the lacking field. If multiple equalities for both fields are + found they are merged. If both searches fail a new multiple equality + containing just field1 and field2 is added to the existing + multiple equalities. + If the function processes the predicate of the form field1=const, + it looks for a multiple equality containing field1. If found, the + function checks the constant of the multiple equality. If the value + is unknown, it is setup to const. Otherwise the value is compared with + const and the evaluation of the equality predicate is performed. + When expanding/merging equality predicates from the upper levels + the function first copies them for the current level. It looks + acceptable, as this happens rarely. The implementation without + copying would be much more complicated. + + RETURN + TRUE if the predicate is a simple equality predicate to be used + for building multiple equalities + FALSE otherwise +*/ + +static bool check_simple_equality(Item *left_item, Item *right_item, + Item *item, COND_EQUAL *cond_equal) +{ + if (left_item->type() == Item::REF_ITEM && + ((Item_ref*)left_item)->ref_type() == Item_ref::VIEW_REF) + { + if (((Item_ref*)left_item)->depended_from) + return FALSE; + left_item= left_item->real_item(); + } + if (right_item->type() == Item::REF_ITEM && + ((Item_ref*)right_item)->ref_type() == Item_ref::VIEW_REF) + { + if (((Item_ref*)right_item)->depended_from) + return FALSE; + right_item= right_item->real_item(); + } + if (left_item->type() == Item::FIELD_ITEM && + right_item->type() == Item::FIELD_ITEM && + !((Item_field*)left_item)->depended_from && + !((Item_field*)right_item)->depended_from) + { + /* The predicate the form field1=field2 is processed */ + + Field *left_field= ((Item_field*) left_item)->field; + Field *right_field= ((Item_field*) right_item)->field; + + if (!left_field->eq_def(right_field)) + return FALSE; + + /* Search for multiple equalities containing field1 and/or field2 */ + bool left_copyfl, right_copyfl; + Item_equal *left_item_equal= + find_item_equal(cond_equal, left_field, &left_copyfl); + Item_equal *right_item_equal= + find_item_equal(cond_equal, right_field, &right_copyfl); + + /* As (NULL=NULL) != TRUE we can't just remove the predicate f=f */ + if (left_field->eq(right_field)) /* f = f */ + return (!(left_field->maybe_null() && !left_item_equal)); + + if (left_item_equal && left_item_equal == right_item_equal) + { + /* + The equality predicate is inference of one of the existing + multiple equalities, i.e the condition is already covered + by upper level equalities + */ + return TRUE; + } + + /* Copy the found multiple equalities at the current level if needed */ + if (left_copyfl) + { + /* left_item_equal of an upper level contains left_item */ + left_item_equal= new Item_equal(left_item_equal); + cond_equal->current_level.push_back(left_item_equal); + } + if (right_copyfl) + { + /* right_item_equal of an upper level contains right_item */ + right_item_equal= new Item_equal(right_item_equal); + cond_equal->current_level.push_back(right_item_equal); + } + + if (left_item_equal) + { + /* left item was found in the current or one of the upper levels */ + if (! right_item_equal) + left_item_equal->add((Item_field *) right_item); + else + { + /* Merge two multiple equalities forming a new one */ + left_item_equal->merge(right_item_equal); + /* Remove the merged multiple equality from the list */ + List_iterator<Item_equal> li(cond_equal->current_level); + while ((li++) != right_item_equal); + li.remove(); + } + } + else + { + /* left item was not found neither the current nor in upper levels */ + if (right_item_equal) + right_item_equal->add((Item_field *) left_item); + else + { + /* None of the fields was found in multiple equalities */ + Item_equal *item_equal= new Item_equal((Item_field *) left_item, + (Item_field *) right_item); + cond_equal->current_level.push_back(item_equal); + } + } + return TRUE; + } + + { + /* The predicate of the form field=const/const=field is processed */ + Item *const_item= 0; + Item_field *field_item= 0; + if (left_item->type() == Item::FIELD_ITEM && + !((Item_field*)left_item)->depended_from && + right_item->const_item()) + { + field_item= (Item_field*) left_item; + const_item= right_item; + } + else if (right_item->type() == Item::FIELD_ITEM && + !((Item_field*)right_item)->depended_from && + left_item->const_item()) + { + field_item= (Item_field*) right_item; + const_item= left_item; + } + + if (const_item && + field_item->result_type() == const_item->result_type()) + { + bool copyfl; + + if (field_item->result_type() == STRING_RESULT) + { + CHARSET_INFO *cs= ((Field_str*) field_item->field)->charset(); + if (!item) + { + Item_func_eq *eq_item; + if ((eq_item= new Item_func_eq(left_item, right_item))) + return FALSE; + eq_item->set_cmp_func(); + eq_item->quick_fix_field(); + item= eq_item; + } + if ((cs != ((Item_func *) item)->compare_collation()) || + !cs->coll->propagate(cs, 0, 0)) + return FALSE; + } + + Item_equal *item_equal = find_item_equal(cond_equal, + field_item->field, ©fl); + if (copyfl) + { + item_equal= new Item_equal(item_equal); + cond_equal->current_level.push_back(item_equal); + } + if (item_equal) + { + /* + The flag cond_false will be set to 1 after this, if item_equal + already contains a constant and its value is not equal to + the value of const_item. + */ + item_equal->add(const_item); + } + else + { + item_equal= new Item_equal(const_item, field_item); + cond_equal->current_level.push_back(item_equal); + } + return TRUE; + } + } + return FALSE; +} + + +/* + Convert row equalities into a conjunction of regular equalities + + SYNOPSIS + check_row_equality() + left_row left term of the row equality to be processed + right_row right term of the row equality to be processed + cond_equal multiple equalities that must hold together with the predicate + eq_list results of conversions of row equalities that are not simple + enough to form multiple equalities + + DESCRIPTION + The function converts a row equality of the form (E1,...,En)=(E'1,...,E'n) + into a list of equalities E1=E'1,...,En=E'n. For each of these equalities + Ei=E'i the function checks whether it is a simple equality or a row equality. + If it is a simple equality it is used to expand multiple equalities of + cond_equal. If it is a row equality it converted to a sequence of equalities + between row elements. If Ei=E'i is neither a simple equality nor a row + equality the item for this predicate is added to eq_list. + + RETURN + TRUE if conversion has succeeded (no fatal error) + FALSE otherwise +*/ + +static bool check_row_equality(Item *left_row, Item_row *right_row, + COND_EQUAL *cond_equal, List<Item>* eq_list) +{ + uint n= left_row->cols(); + for (uint i= 0 ; i < n; i++) + { + bool is_converted; + Item *left_item= left_row->element_index(i); + Item *right_item= right_row->element_index(i); + if (left_item->type() == Item::ROW_ITEM && + right_item->type() == Item::ROW_ITEM) + is_converted= check_row_equality((Item_row *) left_item, + (Item_row *) right_item, + cond_equal, eq_list); + else + is_converted= check_simple_equality(left_item, right_item, 0, cond_equal); + + if (!is_converted) + { + Item_func_eq *eq_item; + if (!(eq_item= new Item_func_eq(left_item, right_item))) + return FALSE; + eq_item->set_cmp_func(); + eq_item->quick_fix_field(); + eq_list->push_back(eq_item); + } + } + return TRUE; +} + + +/* + Eliminate row equalities and form multiple equalities predicates + + SYNOPSIS + check_equality() + item predicate to process + cond_equal multiple equalities that must hold together with the predicate + eq_list results of conversions of row equalities that are not simple + enough to form multiple equalities + + DESCRIPTION + This function checks whether the item is a simple equality + i.e. the one that equates a field with another field or a constant + (field=field_item or field=constant_item), or, a row equality. + For a simple equality the function looks for a multiple equality + in the lists referenced directly or indirectly by cond_equal inferring + the given simple equality. If it doesn't find any, it builds/expands + multiple equality that covers the predicate. + Row equalities are eliminated substituted for conjunctive regular equalities + which are treated in the same way as original equality predicates. + + RETURN + TRUE if re-writing rules have been applied + FALSE otherwise, i.e. + if the predicate is not an equality, + or, if the equality is neither a simple one nor a row equality, + or, if the procedure fails by a fatal error. +*/ + +static bool check_equality(Item *item, COND_EQUAL *cond_equal, + List<Item> *eq_list) +{ + if (item->type() == Item::FUNC_ITEM && + ((Item_func*) item)->functype() == Item_func::EQ_FUNC) + { + Item *left_item= ((Item_func*) item)->arguments()[0]; + Item *right_item= ((Item_func*) item)->arguments()[1]; + + if (left_item->type() == Item::ROW_ITEM && + right_item->type() == Item::ROW_ITEM) + return check_row_equality((Item_row *) left_item, + (Item_row *) right_item, + cond_equal, eq_list); + else + return check_simple_equality(left_item, right_item, item, cond_equal); + } + return FALSE; +} + + +/* + Replace all equality predicates in a condition by multiple equality items + + SYNOPSIS + build_equal_items_for_cond() + cond condition(expression) where to make replacement + inherited path to all inherited multiple equality items + + DESCRIPTION + At each 'and' level the function detects items for equality predicates + and replaced them by a set of multiple equality items of class Item_equal, + taking into account inherited equalities from upper levels. + If an equality predicate is used not in a conjunction it's just + replaced by a multiple equality predicate. + For each 'and' level the function set a pointer to the inherited + multiple equalities in the cond_equal field of the associated + object of the type Item_cond_and. + The function also traverses the cond tree and and for each field reference + sets a pointer to the multiple equality item containing the field, if there + is any. If this multiple equality equates fields to a constant the + function replaces the field reference by the constant in the cases + when the field is not of a string type or when the field reference is + just an argument of a comparison predicate. + The function also determines the maximum number of members in + equality lists of each Item_cond_and object assigning it to + cond_equal->max_members of this object and updating accordingly + the upper levels COND_EQUAL structures. + + NOTES + Multiple equality predicate =(f1,..fn) is equivalent to the conjuction of + f1=f2, .., fn-1=fn. It substitutes any inference from these + equality predicates that is equivalent to the conjunction. + Thus, =(a1,a2,a3) can substitute for ((a1=a3) AND (a2=a3) AND (a2=a1)) as + it is equivalent to ((a1=a2) AND (a2=a3)). + The function always makes a substitution of all equality predicates occured + in a conjuction for a minimal set of multiple equality predicates. + This set can be considered as a canonical representation of the + sub-conjunction of the equality predicates. + E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by + (=(t1.a,t2.b,t3.c) AND t2.b>5), not by + (=(t1.a,t2.b) AND =(t1.a,t3.c) AND t2.b>5); + while (t1.a=t2.b AND t2.b>5 AND t3.c=t4.d) is replaced by + (=(t1.a,t2.b) AND =(t3.c=t4.d) AND t2.b>5), + but if additionally =(t4.d,t2.b) is inherited, it + will be replaced by (=(t1.a,t2.b,t3.c,t4.d) AND t2.b>5) + + IMPLEMENTATION + The function performs the substitution in a recursive descent by + the condtion tree, passing to the next AND level a chain of multiple + equality predicates which have been built at the upper levels. + The Item_equal items built at the level are attached to other + non-equality conjucts as a sublist. The pointer to the inherited + multiple equalities is saved in the and condition object (Item_cond_and). + This chain allows us for any field reference occurence easyly to find a + multiple equality that must be held for this occurence. + For each AND level we do the following: + - scan it for all equality predicate (=) items + - join them into disjoint Item_equal() groups + - process the included OR conditions recursively to do the same for + lower AND levels. + We need to do things in this order as lower AND levels need to know about + all possible Item_equal objects in upper levels. + + RETURN + pointer to the transformed condition +*/ + +static COND *build_equal_items_for_cond(COND *cond, + COND_EQUAL *inherited) +{ + Item_equal *item_equal; + uint members; + COND_EQUAL cond_equal; + cond_equal.upper_levels= inherited; + + if (cond->type() == Item::COND_ITEM) + { + List<Item> eq_list; + bool and_level= ((Item_cond*) cond)->functype() == + Item_func::COND_AND_FUNC; + List<Item> *args= ((Item_cond*) cond)->argument_list(); + + List_iterator<Item> li(*args); + Item *item; + + if (and_level) + { + /* + Retrieve all conjucts of this level detecting the equality + that are subject to substitution by multiple equality items and + removing each such predicate from the conjunction after having + found/created a multiple equality whose inference the predicate is. + */ + while ((item= li++)) + { + /* + PS/SP note: we can safely remove a node from AND-OR + structure here because it's restored before each + re-execution of any prepared statement/stored procedure. + */ + if (check_equality(item, &cond_equal, &eq_list)) + li.remove(); + } + + List_iterator_fast<Item_equal> it(cond_equal.current_level); + while ((item_equal= it++)) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + members= item_equal->members(); + if (cond_equal.max_members < members) + cond_equal.max_members= members; + } + members= cond_equal.max_members; + if (inherited && inherited->max_members < members) + { + do + { + inherited->max_members= members; + inherited= inherited->upper_levels; + } + while (inherited); + } + + ((Item_cond_and*)cond)->cond_equal= cond_equal; + inherited= &(((Item_cond_and*)cond)->cond_equal); + } + /* + Make replacement of equality predicates for lower levels + of the condition expression. + */ + li.rewind(); + while ((item= li++)) + { + Item *new_item; + if ((new_item = build_equal_items_for_cond(item, inherited))!= item) + { + /* This replacement happens only for standalone equalities */ + /* + This is ok with PS/SP as the replacement is done for + arguments of an AND/OR item, which are restored for each + execution of PS/SP. + */ + li.replace(new_item); + } + } + if (and_level) + { + args->concat(&eq_list); + args->concat((List<Item> *)&cond_equal.current_level); + } + } + else if (cond->type() == Item::FUNC_ITEM) + { + List<Item> eq_list; + /* + If an equality predicate forms the whole and level, + we call it standalone equality and it's processed here. + E.g. in the following where condition + WHERE a=5 AND (b=5 or a=c) + (b=5) and (a=c) are standalone equalities. + In general we can't leave alone standalone eqalities: + for WHERE a=b AND c=d AND (b=c OR d=5) + b=c is replaced by =(a,b,c,d). + */ + if (check_equality(cond, &cond_equal, &eq_list)) + { + int n= cond_equal.current_level.elements + eq_list.elements; + if (n == 0) + return new Item_int((longlong) 1,1); + else if (n == 1) + { + if ((item_equal= cond_equal.current_level.pop())) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + return item_equal; + } + else + return eq_list.pop(); + } + else + { + /* + Here a new AND level must be created. It can happen only + when a row equality is processed as a standalone predicate. + */ + Item_cond_and *and_cond= new Item_cond_and(eq_list); + and_cond->quick_fix_field(); + List<Item> *args= and_cond->argument_list(); + List_iterator_fast<Item_equal> it(cond_equal.current_level); + while ((item_equal= it++)) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + members= item_equal->members(); + if (cond_equal.max_members < members) + cond_equal.max_members= members; + } + and_cond->cond_equal= cond_equal; + args->concat((List<Item> *)&cond_equal.current_level); + + return and_cond; + } + } + /* + For each field reference in cond, not from equal item predicates, + set a pointer to the multiple equality it belongs to (if there is any) + as soon the field is not of a string type or the field reference is + an argument of a comparison predicate. + */ + byte *is_subst_valid= (byte *) 1; + cond= cond->compile(&Item::subst_argument_checker, + &is_subst_valid, + &Item::equal_fields_propagator, + (byte *) inherited); + cond->update_used_tables(); + } + return cond; +} + + +/* + Build multiple equalities for a condition and all on expressions that + inherit these multiple equalities + + SYNOPSIS + build_equal_items() + thd Thread handler + cond condition to build the multiple equalities for + inherited path to all inherited multiple equality items + join_list list of join tables to which the condition refers to + cond_equal_ref :out pointer to the structure to place built equalities in + + DESCRIPTION + The function first applies the build_equal_items_for_cond function + to build all multiple equalities for condition cond utilizing equalities + referred through the parameter inherited. The extended set of + equalities is returned in the structure referred by the cond_equal_ref + parameter. After this the function calls itself recursively for + all on expressions whose direct references can be found in join_list + and who inherit directly the multiple equalities just having built. + + NOTES + The on expression used in an outer join operation inherits all equalities + from the on expression of the embedding join, if there is any, or + otherwise - from the where condition. + This fact is not obvious, but presumably can be proved. + Consider the following query: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t2.a=t4.a + WHERE t1.a=t2.a; + If the on expression in the query inherits =(t1.a,t2.a), then we + can build the multiple equality =(t1.a,t2.a,t3.a,t4.a) that infers + the equality t3.a=t4.a. Although the on expression + t1.a=t3.a AND t2.a=t4.a AND t3.a=t4.a is not equivalent to the one + in the query the latter can be replaced by the former: the new query + will return the same result set as the original one. + + Interesting that multiple equality =(t1.a,t2.a,t3.a,t4.a) allows us + to use t1.a=t3.a AND t3.a=t4.a under the on condition: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a + WHERE t1.a=t2.a + This query equivalent to: + SELECT * FROM (t1 LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a),t2 + WHERE t1.a=t2.a + Similarly the original query can be rewritten to the query: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t2.a=t4.a AND t3.a=t4.a + WHERE t1.a=t2.a + that is equivalent to: + SELECT * FROM (t2 LEFT JOIN (t3,t4)ON t2.a=t4.a AND t3.a=t4.a), t1 + WHERE t1.a=t2.a + Thus, applying equalities from the where condition we basically + can get more freedom in performing join operations. + Althogh we don't use this property now, it probably makes sense to use + it in the future. + + RETURN + pointer to the transformed condition containing multiple equalities +*/ + +static COND *build_equal_items(THD *thd, COND *cond, + COND_EQUAL *inherited, + List<TABLE_LIST> *join_list, + COND_EQUAL **cond_equal_ref) +{ + COND_EQUAL *cond_equal= 0; + + if (cond) + { + cond= build_equal_items_for_cond(cond, inherited); + cond->update_used_tables(); + if (cond->type() == Item::COND_ITEM && + ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + cond_equal= &((Item_cond_and*) cond)->cond_equal; + else if (cond->type() == Item::FUNC_ITEM && + ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) + { + cond_equal= new COND_EQUAL; + cond_equal->current_level.push_back((Item_equal *) cond); + } + } + if (cond_equal) + { + cond_equal->upper_levels= inherited; + inherited= cond_equal; + } + *cond_equal_ref= cond_equal; + + if (join_list) + { + TABLE_LIST *table; + List_iterator<TABLE_LIST> li(*join_list); + + while ((table= li++)) + { + if (table->on_expr) + { + List<TABLE_LIST> *nested_join_list= table->nested_join ? + &table->nested_join->join_list : NULL; + /* + We can modify table->on_expr because its old value will + be restored before re-execution of PS/SP. + */ + table->on_expr= build_equal_items(thd, table->on_expr, inherited, + nested_join_list, + &table->cond_equal); + } + } + } + + return cond; +} + + +/* + Compare field items by table order in the execution plan + + SYNOPSIS + compare_fields_by_table_order() + field1 first field item to compare + field2 second field item to compare + table_join_idx index to tables determining table order + + DESCRIPTION + field1 considered as better than field2 if the table containing + field1 is accessed earlier than the table containing field2. + The function finds out what of two fields is better according + this criteria. + + RETURN + 1, if field1 is better than field2 + -1, if field2 is better than field1 + 0, otherwise +*/ + +static int compare_fields_by_table_order(Item_field *field1, + Item_field *field2, + void *table_join_idx) +{ + int cmp= 0; + bool outer_ref= 0; + if (field2->used_tables() & OUTER_REF_TABLE_BIT) + { + outer_ref= 1; + cmp= -1; + } + if (field2->used_tables() & OUTER_REF_TABLE_BIT) + { + outer_ref= 1; + cmp++; + } + if (outer_ref) + return cmp; + JOIN_TAB **idx= (JOIN_TAB **) table_join_idx; + cmp= idx[field2->field->table->tablenr]-idx[field1->field->table->tablenr]; + return cmp < 0 ? -1 : (cmp ? 1 : 0); +} + + +/* + Generate minimal set of simple equalities equivalent to a multiple equality + + SYNOPSIS + eliminate_item_equal() + cond condition to add the generated equality to + upper_levels structure to access multiple equality of upper levels + item_equal multiple equality to generate simple equality from + + DESCRIPTION + The function retrieves the fields of the multiple equality item + item_equal and for each field f: + - if item_equal contains const it generates the equality f=const_item; + - otherwise, if f is not the first field, generates the equality + f=item_equal->get_first(). + All generated equality are added to the cond conjunction. + + NOTES + Before generating an equality function checks that it has not + been generated for multiple equalities of the upper levels. + E.g. for the following where condition + WHERE a=5 AND ((a=b AND b=c) OR c>4) + the upper level AND condition will contain =(5,a), + while the lower level AND condition will contain =(5,a,b,c). + When splitting =(5,a,b,c) into a separate equality predicates + we should omit 5=a, as we have it already in the upper level. + The following where condition gives us a more complicated case: + WHERE t1.a=t2.b AND t3.c=t4.d AND (t2.b=t3.c OR t4.e>5 ...) AND ... + Given the tables are accessed in the order t1->t2->t3->t4 for + the selected query execution plan the lower level multiple + equality =(t1.a,t2.b,t3.c,t4.d) formally should be converted to + t1.a=t2.b AND t1.a=t3.c AND t1.a=t4.d. But t1.a=t2.a will be + generated for the upper level. Also t3.c=t4.d will be generated there. + So only t1.a=t3.c should be left in the lower level. + If cond is equal to 0, then not more then one equality is generated + and a pointer to it is returned as the result of the function. + + RETURN + The condition with generated simple equalities or + a pointer to the simple generated equality, if success. + 0, otherwise. +*/ + +static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, + Item_equal *item_equal) +{ + List<Item> eq_list; + Item_func_eq *eq_item= 0; + if (((Item *) item_equal)->const_item() && !item_equal->val_int()) + return new Item_int((longlong) 0,1); + Item *item_const= item_equal->get_const(); + Item_equal_iterator it(*item_equal); + Item *head; + if (item_const) + head= item_const; + else + { + head= item_equal->get_first(); + it++; + } + Item_field *item_field; + while ((item_field= it++)) + { + Item_equal *upper= item_field->find_item_equal(upper_levels); + Item_field *item= item_field; + if (upper) + { + if (item_const && upper->get_const()) + item= 0; + else + { + Item_equal_iterator li(*item_equal); + while ((item= li++) != item_field) + { + if (item->find_item_equal(upper_levels) == upper) + break; + } + } + } + if (item == item_field) + { + if (eq_item) + eq_list.push_back(eq_item); + eq_item= new Item_func_eq(item_field, head); + if (!eq_item) + return 0; + eq_item->set_cmp_func(); + eq_item->quick_fix_field(); + } + } + + if (!cond && !eq_list.head()) + { + if (!eq_item) + return new Item_int((longlong) 1,1); + return eq_item; + } + + if (eq_item) + eq_list.push_back(eq_item); + if (!cond) + cond= new Item_cond_and(eq_list); + else + { + DBUG_ASSERT(cond->type() == Item::COND_ITEM); + ((Item_cond *) cond)->add_at_head(&eq_list); + } + + cond->quick_fix_field(); + cond->update_used_tables(); + + return cond; +} + + +/* + Substitute every field reference in a condition by the best equal field + and eliminate all multiple equality predicates + + SYNOPSIS + substitute_for_best_equal_field() + cond condition to process + cond_equal multiple equalities to take into consideration + table_join_idx index to tables determining field preference + + DESCRIPTION + The function retrieves the cond condition and for each encountered + multiple equality predicate it sorts the field references in it + according to the order of tables specified by the table_join_idx + parameter. Then it eliminates the multiple equality predicate it + replacing it by the conjunction of simple equality predicates + equating every field from the multiple equality to the first + field in it, or to the constant, if there is any. + After this the function retrieves all other conjuncted + predicates substitute every field reference by the field reference + to the first equal field or equal constant if there are any. + + NOTES + At the first glance full sort of fields in multiple equality + seems to be an overkill. Yet it's not the case due to possible + new fields in multiple equality item of lower levels. We want + the order in them to comply with the order of upper levels. + + RETURN + The transformed condition +*/ + +static COND* substitute_for_best_equal_field(COND *cond, + COND_EQUAL *cond_equal, + void *table_join_idx) +{ + Item_equal *item_equal; + + if (cond->type() == Item::COND_ITEM) + { + List<Item> *cond_list= ((Item_cond*) cond)->argument_list(); + + bool and_level= ((Item_cond*) cond)->functype() == + Item_func::COND_AND_FUNC; + if (and_level) + { + cond_equal= &((Item_cond_and *) cond)->cond_equal; + cond_list->disjoin((List<Item> *) &cond_equal->current_level); + + List_iterator_fast<Item_equal> it(cond_equal->current_level); + while ((item_equal= it++)) + { + item_equal->sort(&compare_fields_by_table_order, table_join_idx); + } + } + + List_iterator<Item> li(*cond_list); + Item *item; + while ((item= li++)) + { + Item *new_item =substitute_for_best_equal_field(item, cond_equal, + table_join_idx); + /* + This works OK with PS/SP re-execution as changes are made to + the arguments of AND/OR items only + */ + if (new_item != item) + li.replace(new_item); + } + + if (and_level) + { + List_iterator_fast<Item_equal> it(cond_equal->current_level); + while ((item_equal= it++)) + { + cond= eliminate_item_equal(cond, cond_equal->upper_levels, item_equal); + // This occurs when eliminate_item_equal() founds that cond is + // always false and substitutes it with Item_int 0. + // Due to this, value of item_equal will be 0, so just return it. + if (cond->type() != Item::COND_ITEM) + break; + } + } + if (cond->type() == Item::COND_ITEM && + !((Item_cond*)cond)->argument_list()->elements) + cond= new Item_int((int32)cond->val_bool()); + + } + else if (cond->type() == Item::FUNC_ITEM && + ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) + { + item_equal= (Item_equal *) cond; + item_equal->sort(&compare_fields_by_table_order, table_join_idx); + if (cond_equal && cond_equal->current_level.head() == item_equal) + cond_equal= 0; + return eliminate_item_equal(0, cond_equal, item_equal); + } + else + cond->transform(&Item::replace_equal_field, 0); + return cond; +} + + +/* + Check appearance of new constant items in multiple equalities + of a condition after reading a constant table + + SYNOPSIS + update_const_equal_items() + cond condition whose multiple equalities are to be checked + table constant table that has been read + + DESCRIPTION + The function retrieves the cond condition and for each encountered + multiple equality checks whether new constants have appeared after + reading the constant (single row) table tab. If so it adjusts + the multiple equality appropriately. +*/ + +static void update_const_equal_items(COND *cond, JOIN_TAB *tab) +{ + if (!(cond->used_tables() & tab->table->map)) + return; + + if (cond->type() == Item::COND_ITEM) + { + List<Item> *cond_list= ((Item_cond*) cond)->argument_list(); + List_iterator_fast<Item> li(*cond_list); + Item *item; + while ((item= li++)) + update_const_equal_items(item, tab); + } + else if (cond->type() == Item::FUNC_ITEM && + ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) + { + Item_equal *item_equal= (Item_equal *) cond; + bool contained_const= item_equal->get_const() != NULL; + item_equal->update_const(); + if (!contained_const && item_equal->get_const()) + { + /* Update keys for range analysis */ + Item_equal_iterator it(*item_equal); + Item_field *item_field; + while ((item_field= it++)) + { + Field *field= item_field->field; + JOIN_TAB *stat= field->table->reginfo.join_tab; + key_map possible_keys= field->key_start; + possible_keys.intersect(field->table->keys_in_use_for_query); + stat[0].const_keys.merge(possible_keys); + } + } + } +} + + /* change field = field to field = const for each found field = const in the and_level @@ -4603,11 +7729,12 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, Item_func::Functype functype= func->functype(); if (right_item->eq(field,0) && left_item != value && + right_item->cmp_context == field->cmp_context && (left_item->result_type() != STRING_RESULT || value->result_type() != STRING_RESULT || left_item->collation.collation == value->collation.collation)) { - Item *tmp=value->new_item(); + Item *tmp=value->clone_item(); tmp->collation.set(right_item->collation); if (tmp) @@ -4626,11 +7753,12 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, } } else if (left_item->eq(field,0) && right_item != value && + left_item->cmp_context == field->cmp_context && (right_item->result_type() != STRING_RESULT || value->result_type() != STRING_RESULT || right_item->collation.collation == value->collation.collation)) { - Item *tmp=value->new_item(); + Item *tmp= value->clone_item(); tmp->collation.set(left_item->collation); if (tmp) @@ -4658,7 +7786,7 @@ change_cond_ref_to_const(THD *thd, I_List<COND_CMP> *save_list, SYNOPSIS remove_additional_cond() - conds - condition for processing + conds Condition for processing RETURN VALUES new conditions @@ -4748,27 +7876,543 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, } +/* + Simplify joins replacing outer joins by inner joins whenever it's possible + + SYNOPSIS + simplify_joins() + join reference to the query info + join_list list representation of the join to be converted + conds conditions to add on expressions for converted joins + top true <=> conds is the where condition + + DESCRIPTION + The function, during a retrieval of join_list, eliminates those + outer joins that can be converted into inner join, possibly nested. + It also moves the on expressions for the converted outer joins + and from inner joins to conds. + The function also calculates some attributes for nested joins: + - used_tables + - not_null_tables + - dep_tables. + - on_expr_dep_tables + The first two attributes are used to test whether an outer join can + be substituted for an inner join. The third attribute represents the + relation 'to be dependent on' for tables. If table t2 is dependent + on table t1, then in any evaluated execution plan table access to + table t2 must precede access to table t2. This relation is used also + to check whether the query contains invalid cross-references. + The forth attribute is an auxiliary one and is used to calculate + dep_tables. + As the attribute dep_tables qualifies possibles orders of tables in the + execution plan, the dependencies required by the straight join + modifiers are reflected in this attribute as well. + The function also removes all braces that can be removed from the join + expression without changing its meaning. + + NOTES + An outer join can be replaced by an inner join if the where condition + or the on expression for an embedding nested join contains a conjunctive + predicate rejecting null values for some attribute of the inner tables. + + E.g. in the query: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a WHERE t2.b < 5 + the predicate t2.b < 5 rejects nulls. + The query is converted first to: + SELECT * FROM t1 INNER JOIN t2 ON t2.a=t1.a WHERE t2.b < 5 + then to the equivalent form: + SELECT * FROM t1, t2 ON t2.a=t1.a WHERE t2.b < 5 AND t2.a=t1.a. + + Similarly the following query: + SELECT * from t1 LEFT JOIN (t2, t3) ON t2.a=t1.a t3.b=t1.b + WHERE t2.c < 5 + is converted to: + SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a t3.b=t1.b + + One conversion might trigger another: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a + LEFT JOIN t3 ON t3.b=t2.b + WHERE t3 IS NOT NULL => + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a, t3 + WHERE t3 IS NOT NULL AND t3.b=t2.b => + SELECT * FROM t1, t2, t3 + WHERE t3 IS NOT NULL AND t3.b=t2.b AND t2.a=t1.a + + The function removes all unnecessary braces from the expression + produced by the conversions. + E.g. SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b + finally is converted to: + SELECT * FROM t1, t2, t3 WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b + + It also will remove braces from the following queries: + SELECT * from (t1 LEFT JOIN t2 ON t2.a=t1.a) LEFT JOIN t3 ON t3.b=t2.b + SELECT * from (t1, (t2,t3)) WHERE t1.a=t2.a AND t2.b=t3.b. + + The benefit of this simplification procedure is that it might return + a query for which the optimizer can evaluate execution plan with more + join orders. With a left join operation the optimizer does not + consider any plan where one of the inner tables is before some of outer + tables. + + IMPLEMENTATION. + The function is implemented by a recursive procedure. On the recursive + ascent all attributes are calculated, all outer joins that can be + converted are replaced and then all unnecessary braces are removed. + As join list contains join tables in the reverse order sequential + elimination of outer joins does not require extra recursive calls. + + EXAMPLES + Here is an example of a join query with invalid cross references: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t3.a LEFT JOIN t3 ON t3.b=t1.b + + RETURN VALUE + The new condition, if success + 0, otherwise +*/ + static COND * -optimize_cond(THD *thd, COND *conds, Item::cond_result *cond_value) +simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) { - SELECT_LEX *select= thd->lex->current_select; + TABLE_LIST *table; + NESTED_JOIN *nested_join; + TABLE_LIST *prev_table= 0; + List_iterator<TABLE_LIST> li(*join_list); + DBUG_ENTER("simplify_joins"); + + /* + Try to simplify join operations from join_list. + The most outer join operation is checked for conversion first. + */ + while ((table= li++)) + { + table_map used_tables; + table_map not_null_tables= (table_map) 0; + + if ((nested_join= table->nested_join)) + { + /* + If the element of join_list is a nested join apply + the procedure to its nested join list first. + */ + if (table->on_expr) + { + Item *expr= table->on_expr; + /* + If an on expression E is attached to the table, + check all null rejected predicates in this expression. + If such a predicate over an attribute belonging to + an inner table of an embedded outer join is found, + the outer join is converted to an inner join and + the corresponding on expression is added to E. + */ + expr= simplify_joins(join, &nested_join->join_list, + expr, FALSE); + + if (!table->prep_on_expr || expr != table->on_expr) + { + DBUG_ASSERT(expr); + + table->on_expr= expr; + table->prep_on_expr= expr->copy_andor_structure(join->thd); + } + } + nested_join->used_tables= (table_map) 0; + nested_join->not_null_tables=(table_map) 0; + conds= simplify_joins(join, &nested_join->join_list, conds, top); + used_tables= nested_join->used_tables; + not_null_tables= nested_join->not_null_tables; + } + else + { + if (!table->prep_on_expr) + table->prep_on_expr= table->on_expr; + used_tables= table->table->map; + if (conds) + not_null_tables= conds->not_null_tables(); + } + + if (table->embedding) + { + table->embedding->nested_join->used_tables|= used_tables; + table->embedding->nested_join->not_null_tables|= not_null_tables; + } + + if (!table->outer_join || (used_tables & not_null_tables)) + { + /* + For some of the inner tables there are conjunctive predicates + that reject nulls => the outer join can be replaced by an inner join. + */ + table->outer_join= 0; + if (table->on_expr) + { + /* Add on expression to the where condition. */ + if (conds) + { + conds= and_conds(conds, table->on_expr); + conds->top_level_item(); + /* conds is always a new item as both cond and on_expr existed */ + DBUG_ASSERT(!conds->fixed); + conds->fix_fields(join->thd, &conds); + } + else + conds= table->on_expr; + table->prep_on_expr= table->on_expr= 0; + } + } + + if (!top) + continue; + + /* + Only inner tables of non-convertible outer joins + remain with on_expr. + */ + if (table->on_expr) + { + table->dep_tables|= table->on_expr->used_tables(); + if (table->embedding) + { + table->dep_tables&= ~table->embedding->nested_join->used_tables; + /* + Embedding table depends on tables used + in embedded on expressions. + */ + table->embedding->on_expr_dep_tables|= table->on_expr->used_tables(); + } + else + table->dep_tables&= ~table->table->map; + } + + if (prev_table) + { + /* The order of tables is reverse: prev_table follows table */ + if (prev_table->straight) + prev_table->dep_tables|= used_tables; + if (prev_table->on_expr) + { + prev_table->dep_tables|= table->on_expr_dep_tables; + table_map prev_used_tables= prev_table->nested_join ? + prev_table->nested_join->used_tables : + prev_table->table->map; + /* + If on expression contains only references to inner tables + we still make the inner tables dependent on the outer tables. + It would be enough to set dependency only on one outer table + for them. Yet this is really a rare case. + */ + if (!(prev_table->on_expr->used_tables() & ~prev_used_tables)) + prev_table->dep_tables|= used_tables; + } + } + prev_table= table; + } + + /* Flatten nested joins that can be flattened. */ + li.rewind(); + while ((table= li++)) + { + nested_join= table->nested_join; + if (nested_join && !table->on_expr) + { + TABLE_LIST *tbl; + List_iterator<TABLE_LIST> it(nested_join->join_list); + while ((tbl= it++)) + { + tbl->embedding= table->embedding; + tbl->join_list= table->join_list; + } + li.replace(nested_join->join_list); + } + } + DBUG_RETURN(conds); +} + + +/* + Assign each nested join structure a bit in nested_join_map + + SYNOPSIS + build_bitmap_for_nested_joins() + join Join being processed + join_list List of tables + first_unused Number of first unused bit in nested_join_map before the + call + + DESCRIPTION + Assign each nested join structure (except "confluent" ones - those that + embed only one element) a bit in nested_join_map. + + NOTE + This function is called after simplify_joins(), when there are no + redundant nested joins, #non_confluent_nested_joins <= #tables_in_join so + we will not run out of bits in nested_join_map. + + RETURN + First unused bit in nested_join_map after the call. +*/ + +static uint build_bitmap_for_nested_joins(List<TABLE_LIST> *join_list, + uint first_unused) +{ + List_iterator<TABLE_LIST> li(*join_list); + TABLE_LIST *table; + DBUG_ENTER("build_bitmap_for_nested_joins"); + while ((table= li++)) + { + NESTED_JOIN *nested_join; + if ((nested_join= table->nested_join)) + { + /* + It is guaranteed by simplify_joins() function that a nested join + that has only one child represents a single table VIEW (and the child + is an underlying table). We don't assign bits to such nested join + structures because + 1. it is redundant (a "sequence" of one table cannot be interleaved + with anything) + 2. we could run out bits in nested_join_map otherwise. + */ + if (nested_join->join_list.elements != 1) + { + nested_join->nj_map= (nested_join_map) 1 << first_unused++; + first_unused= build_bitmap_for_nested_joins(&nested_join->join_list, + first_unused); + } + } + } + DBUG_RETURN(first_unused); +} + + +/* + Set NESTED_JOIN::counter=0 in all nested joins in passed list + + SYNOPSIS + reset_nj_counters() + join_list List of nested joins to process. It may also contain base + tables which will be ignored. + + DESCRIPTION + Recursively set NESTED_JOIN::counter=0 for all nested joins contained in + the passed join_list. +*/ + +static void reset_nj_counters(List<TABLE_LIST> *join_list) +{ + List_iterator<TABLE_LIST> li(*join_list); + TABLE_LIST *table; + DBUG_ENTER("reset_nj_counters"); + while ((table= li++)) + { + NESTED_JOIN *nested_join; + if ((nested_join= table->nested_join)) + { + nested_join->counter= 0; + reset_nj_counters(&nested_join->join_list); + } + } + DBUG_VOID_RETURN; +} + + +/* + Check interleaving with an inner tables of an outer join for extension table + + SYNOPSIS + check_interleaving_with_nj() + join Join being processed + last_tab Last table in current partial join order (this function is + not called for empty partial join orders) + next_tab Table we're going to extend the current partial join with + + DESCRIPTION + Check if table next_tab can be added to current partial join order, and + if yes, record that it has been added. + + The function assumes that both current partial join order and its + extension with next_tab are valid wrt table dependencies. + + IMPLEMENTATION + LIMITATIONS ON JOIN ORDER + The nested [outer] joins executioner algorithm imposes these limitations + on join order: + 1. "Outer tables first" - any "outer" table must be before any + corresponding "inner" table. + 2. "No interleaving" - tables inside a nested join must form a continuous + sequence in join order (i.e. the sequence must not be interrupted by + tables that are outside of this nested join). + + #1 is checked elsewhere, this function checks #2 provided that #1 has + been already checked. + + WHY NEED NON-INTERLEAVING + Consider an example: + + select * from t0 join t1 left join (t2 join t3) on cond1 + + The join order "t1 t2 t0 t3" is invalid: + + table t0 is outside of the nested join, so WHERE condition for t0 is + attached directly to t0 (without triggers, and it may be used to access + t0). Applying WHERE(t0) to (t2,t0,t3) record is invalid as we may miss + combinations of (t1, t2, t3) that satisfy condition cond1, and produce a + null-complemented (t1, t2.NULLs, t3.NULLs) row, which should not have + been produced. + + If table t0 is not between t2 and t3, the problem doesn't exist: + * If t0 is located after (t2,t3), WHERE(t0) is applied after nested join + processing has finished. + * If t0 is located before (t2,t3), predicates like WHERE_cond(t0, t2) are + wrapped into condition triggers, which takes care of correct nested + join processing. + + HOW IT IS IMPLEMENTED + The limitations on join order can be rephrased as follows: for valid + join order one must be able to: + 1. write down the used tables in the join order on one line. + 2. for each nested join, put one '(' and one ')' on the said line + 3. write "LEFT JOIN" and "ON (...)" where appropriate + 4. get a query equivalent to the query we're trying to execute. + + Calls to check_interleaving_with_nj() are equivalent to writing the + above described line from left to right. + A single check_interleaving_with_nj(A,B) call is equivalent to writing + table B and appropriate brackets on condition that table A and + appropriate brackets is the last what was written. Graphically the + transition is as follows: + + +---- current position + | + ... last_tab ))) | ( next_tab ) )..) | ... + X Y Z | + +- need to move to this + position. + + Notes about the position: + The caller guarantees that there is no more then one X-bracket by + checking "!(remaining_tables & s->dependent)" before calling this + function. X-bracket may have a pair in Y-bracket. + + When "writing" we store/update this auxilary info about the current + position: + 1. join->cur_embedding_map - bitmap of pairs of brackets (aka nested + joins) we've opened but didn't close. + 2. {each NESTED_JOIN structure not simplified away}->counter - number + of this nested join's children that have already been added to to + the partial join order. + + RETURN + FALSE Join order extended, nested joins info about current join order + (see NOTE section) updated. + TRUE Requested join order extension not allowed. +*/ + +static bool check_interleaving_with_nj(JOIN_TAB *last_tab, JOIN_TAB *next_tab) +{ + TABLE_LIST *next_emb= next_tab->table->pos_in_table_list->embedding; + JOIN *join= last_tab->join; + + if (join->cur_embedding_map & ~next_tab->embedding_map) + { + /* + next_tab is outside of the "pair of brackets" we're currently in. + Cannot add it. + */ + return TRUE; + } + + /* + Do update counters for "pairs of brackets" that we've left (marked as + X,Y,Z in the above picture) + */ + for (;next_emb; next_emb= next_emb->embedding) + { + next_emb->nested_join->counter++; + if (next_emb->nested_join->counter == 1) + { + /* + next_emb is the first table inside a nested join we've "entered". In + the picture above, we're looking at the 'X' bracket. Don't exit yet as + X bracket might have Y pair bracket. + */ + join->cur_embedding_map |= next_emb->nested_join->nj_map; + } + + if (next_emb->nested_join->join_list.elements != + next_emb->nested_join->counter) + break; + + /* + We're currently at Y or Z-bracket as depicted in the above picture. + Mark that we've left it and continue walking up the brackets hierarchy. + */ + join->cur_embedding_map &= ~next_emb->nested_join->nj_map; + } + return FALSE; +} + + +/* + Nested joins perspective: Remove the last table from the join order + + SYNOPSIS + restore_prev_nj_state() + last join table to remove, it is assumed to be the last in current + partial join order. + + DESCRIPTION + Remove the last table from the partial join order and update the nested + joins counters and join->cur_embedding_map. It is ok to call this + function for the first table in join order (for which + check_interleaving_with_nj has not been called) +*/ + +static void restore_prev_nj_state(JOIN_TAB *last) +{ + TABLE_LIST *last_emb= last->table->pos_in_table_list->embedding; + JOIN *join= last->join; + while (last_emb && !(--last_emb->nested_join->counter)) + { + join->cur_embedding_map &= last_emb->nested_join->nj_map; + last_emb= last_emb->embedding; + } +} + + +static COND * +optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list, + Item::cond_result *cond_value) +{ + THD *thd= join->thd; DBUG_ENTER("optimize_cond"); - if (conds) + + if (!conds) + *cond_value= Item::COND_TRUE; + else { + /* + Build all multiple equality predicates and eliminate equality + predicates that can be inferred from these multiple equalities. + For each reference of a field included into a multiple equality + that occurs in a function set a pointer to the multiple equality + predicate. Substitute a constant instead of this field if the + multiple equality contains a constant. + */ DBUG_EXECUTE("where", print_where(conds, "original");); + conds= build_equal_items(join->thd, conds, NULL, join_list, + &join->cond_equal); + DBUG_EXECUTE("where",print_where(conds,"after equal_items");); + /* change field = field to field = const for each found field = const */ propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds); /* Remove all instances of item == item Remove all and-levels where CONST item != CONST item */ - DBUG_EXECUTE("where", print_where(conds, "after const change");); - conds= remove_eq_conds(thd, conds, cond_value); - DBUG_EXECUTE("info", print_where(conds, "after remove");); + DBUG_EXECUTE("where",print_where(conds,"after const change");); + conds= remove_eq_conds(thd, conds, cond_value) ; + DBUG_EXECUTE("info",print_where(conds,"after remove");); } - else - *cond_value= Item::COND_TRUE; - DBUG_RETURN(conds); } @@ -4862,7 +8506,7 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) Field *field=((Item_field*) args[0])->field; if (field->flags & AUTO_INCREMENT_FLAG && !field->table->maybe_null && (thd->options & OPTION_AUTO_IS_NULL) && - thd->current_insert_id && thd->substitute_null_with_insert_id) + thd->current_insert_id && thd->substitute_null_with_insert_id) { #ifdef HAVE_QUERY_CACHE query_cache_abort(&thd->net); @@ -4871,17 +8515,22 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) if ((new_cond= new Item_func_eq(args[0], new Item_int("last_insert_id()", thd->current_insert_id, - 21)))) + MY_INT64_NUM_DECIMAL_DIGITS)))) { /* - Set THD::last_insert_id_used manually, as this statement - uses LAST_INSERT_ID() in a sense, and should issue - LAST_INSERT_ID_EVENT. + Set THD::last_insert_id_used_bin_log manually, as this + statement uses LAST_INSERT_ID() in a sense, and should + issue LAST_INSERT_ID_EVENT. */ - thd->last_insert_id_used= TRUE; + thd->last_insert_id_used_bin_log= TRUE; cond=new_cond; - cond->fix_fields(thd, 0, &cond); + /* + Item_func_eq can't be fixed after creation so we do not check + cond->fixed, also it do not need tables so we use 0 as second + argument. + */ + cond->fix_fields(thd, &cond); } thd->substitute_null_with_insert_id= FALSE; // Clear for next request } @@ -4895,10 +8544,20 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) if ((new_cond= new Item_func_eq(args[0],new Item_int("0", 0, 2)))) { cond=new_cond; - cond->fix_fields(thd, 0, &cond); + /* + Item_func_eq can't be fixed after creation so we do not check + cond->fixed, also it do not need tables so we use 0 as second + argument. + */ + cond->fix_fields(thd, &cond); } } } + if (cond->const_item()) + { + *cond_value= eval_const_cond(cond) ? Item::COND_TRUE : Item::COND_FALSE; + return (COND*) 0; + } } else if (cond->const_item()) { @@ -4920,6 +8579,46 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) return cond; // Point at next and level } +/* + Check if equality can be used in removing components of GROUP BY/DISTINCT + + SYNOPSIS + test_if_equality_guarantees_uniqueness() + l the left comparison argument (a field if any) + r the right comparison argument (a const of any) + + DESCRIPTION + Checks if an equality predicate can be used to take away + DISTINCT/GROUP BY because it is known to be true for exactly one + distinct value (e.g. <expr> == <const>). + Arguments must be of the same type because e.g. + <string_field> = <int_const> may match more than 1 distinct value from + the column. + We must take into consideration and the optimization done for various + string constants when compared to dates etc (see Item_int_with_ref) as + well as the collation of the arguments. + + RETURN VALUE + TRUE can be used + FALSE cannot be used +*/ +static bool +test_if_equality_guarantees_uniqueness(Item *l, Item *r) +{ + return r->const_item() && + /* elements must be of the same result type */ + (r->result_type() == l->result_type() || + /* or dates compared to longs */ + (((l->type() == Item::FIELD_ITEM && + ((Item_field *)l)->field->can_be_compared_as_longlong()) || + (l->type() == Item::FUNC_ITEM && + ((Item_func *)l)->result_as_longlong())) && + r->result_type() == INT_RESULT)) + /* and must have the same collation if compared as strings */ + && (l->result_type() != STRING_RESULT || + l->collation.collation == r->collation.collation); +} + /* Return 1 if the item is a const value in all the WHERE clause */ @@ -4956,7 +8655,7 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) Item *right_item= ((Item_func*) cond)->arguments()[1]; if (left_item->eq(comp_item,1)) { - if (right_item->const_item()) + if (test_if_equality_guarantees_uniqueness (left_item, right_item)) { if (*const_item) return right_item->eq(*const_item, 1); @@ -4966,7 +8665,7 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) } else if (right_item->eq(comp_item,1)) { - if (left_item->const_item()) + if (test_if_equality_guarantees_uniqueness (right_item, left_item)) { if (*const_item) return left_item->eq(*const_item, 1); @@ -4978,7 +8677,6 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) return 0; } - /**************************************************************************** Create internal temporary table ****************************************************************************/ @@ -4991,7 +8689,6 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) thd Thread handler org_field field from which new field will be created name New field name - item Item to create a field for table Temporary table item !=NULL if item->result_field should point to new field. This is relevant for how fill_record() is going to work: @@ -5007,29 +8704,37 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) new_created field */ -static Field* create_tmp_field_from_field(THD *thd, Field* org_field, - const char *name, TABLE *table, - Item_field *item, - uint convert_blob_length) +Field* create_tmp_field_from_field(THD *thd, Field* org_field, + const char *name, TABLE *table, + Item_field *item, uint convert_blob_length) { Field *new_field; - if (convert_blob_length && org_field->flags & BLOB_FLAG) - new_field= new Field_varstring(convert_blob_length, org_field->maybe_null(), + /* + Make sure that the blob fits into a Field_varstring which has + 2-byte lenght. + */ + if (convert_blob_length && convert_blob_length < UINT_MAX16 && + (org_field->flags & BLOB_FLAG)) + new_field= new Field_varstring(convert_blob_length, + org_field->maybe_null(), org_field->field_name, table, org_field->charset()); else - new_field= org_field->new_field(thd->mem_root, table); + new_field= org_field->new_field(thd->mem_root, table, + table == org_field->table); if (new_field) { if (item) item->result_field= new_field; else new_field->field_name= name; + new_field->flags|= (org_field->flags & NO_DEFAULT_VALUE_FLAG); if (org_field->maybe_null() || (item && item->maybe_null)) new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join - if (org_field->type() == FIELD_TYPE_VAR_STRING) - table->db_create_options|= HA_OPTION_PACK_RECORD; + if (org_field->type() == MYSQL_TYPE_VAR_STRING || + org_field->type() == MYSQL_TYPE_VARCHAR) + table->s->db_create_options|= HA_OPTION_PACK_RECORD; else if (org_field->type() == FIELD_TYPE_DOUBLE) ((Field_double *) new_field)->not_fixed= TRUE; } @@ -5060,7 +8765,8 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, 0 on error new_created field */ -static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, + +static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, Item ***copy_func, bool modify_item, uint convert_blob_length) { @@ -5074,37 +8780,45 @@ static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, item->name, table, item->decimals, TRUE); break; case INT_RESULT: - new_field=new Field_longlong(item->max_length, maybe_null, - item->name, table, item->unsigned_flag); + /* Select an integer type with the minimal fit precision */ + if (item->max_length > MY_INT32_NUM_DECIMAL_DIGITS) + new_field=new Field_longlong(item->max_length, maybe_null, + item->name, table, item->unsigned_flag); + else + new_field=new Field_long(item->max_length, maybe_null, + item->name, table, item->unsigned_flag); break; case STRING_RESULT: DBUG_ASSERT(item->collation.collation); enum enum_field_types type; /* - DATE/TIME fields have STRING_RESULT result type. To preserve - type they needed to be handled separately. + DATE/TIME and GEOMETRY fields have STRING_RESULT result type. + To preserve type they needed to be handled separately. */ if ((type= item->field_type()) == MYSQL_TYPE_DATETIME || - type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE) + type == MYSQL_TYPE_TIME || type == MYSQL_TYPE_DATE || + type == MYSQL_TYPE_TIMESTAMP || type == MYSQL_TYPE_GEOMETRY) new_field= item->tmp_table_field_from_field_type(table); - else if (item->max_length/item->collation.collation->mbmaxlen > - CONVERT_IF_BIGGER_TO_BLOB) - { - if (convert_blob_length) - new_field= new Field_varstring(convert_blob_length, maybe_null, - item->name, table, - item->collation.collation); - else - new_field= new Field_blob(item->max_length, maybe_null, item->name, - table, item->collation.collation); - } + /* + Make sure that the blob fits into a Field_varstring which has + 2-byte lenght. + */ + else if (item->max_length/item->collation.collation->mbmaxlen > 255 && + convert_blob_length < UINT_MAX16 && convert_blob_length) + new_field= new Field_varstring(convert_blob_length, maybe_null, + item->name, table, + item->collation.collation); else - new_field= new Field_string(item->max_length, maybe_null, item->name, - table, item->collation.collation); + new_field= item->make_string_field(table); + new_field->set_derivation(item->collation.derivation); + break; + case DECIMAL_RESULT: + new_field= new Field_new_decimal(item->max_length, maybe_null, item->name, + table, item->decimals, item->unsigned_flag); break; - case ROW_RESULT: - default: + case ROW_RESULT: + default: // This case should never be choosen DBUG_ASSERT(0); new_field= 0; // to satisfy compiler (uninitialized variable) @@ -5117,6 +8831,36 @@ static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, return new_field; } + +/* + Create field for information schema table + + SYNOPSIS + create_tmp_field_for_schema() + thd Thread handler + table Temporary table + item Item to create a field for + + RETURN + 0 on error + new_created field +*/ + +Field *create_tmp_field_for_schema(THD *thd, Item *item, TABLE *table) +{ + if (item->field_type() == MYSQL_TYPE_VARCHAR) + { + if (item->max_length > MAX_FIELD_VARCHARLENGTH / + item->collation.collation->mbmaxlen) + return new Field_blob(item->max_length, item->maybe_null, + item->name, table, item->collation.collation); + return new Field_varstring(item->max_length, item->maybe_null, item->name, + table, item->collation.collation); + } + return item->tmp_table_field_from_field_type(table); +} + + /* Create field for temporary table @@ -5130,6 +8874,7 @@ static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, in this array from_field if field will be created using other field as example, pointer example field will be written here + default_field If field has a default value field, store it here group 1 if we are going to do a relative group by on result modify_item 1 if item->result_field should point to new item. This is relevent for how fill_record() is going to @@ -5148,82 +8893,75 @@ static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, Item ***copy_func, Field **from_field, - bool group, bool modify_item, uint convert_blob_length, - bool make_copy_field) + Field **default_field, + bool group, bool modify_item, + bool table_cant_handle_bit_fields, + bool make_copy_field, + uint convert_blob_length) { + Field *result; + Item::Type orig_type= type; + Item *orig_item= 0; + + if (type != Item::FIELD_ITEM && + item->real_item()->type() == Item::FIELD_ITEM) + { + orig_item= item; + item= item->real_item(); + type= Item::FIELD_ITEM; + } switch (type) { case Item::SUM_FUNC_ITEM: { Item_sum *item_sum=(Item_sum*) item; - bool maybe_null=item_sum->maybe_null; - switch (item_sum->sum_func()) { - case Item_sum::AVG_FUNC: /* Place for sum & count */ - if (group) - return new Field_string(sizeof(double)+sizeof(longlong), - 0, item->name,table,&my_charset_bin); - else - return new Field_double(item_sum->max_length, maybe_null, - item->name, table, item_sum->decimals, TRUE); - case Item_sum::VARIANCE_FUNC: /* Place for sum & count */ - case Item_sum::STD_FUNC: - if (group) - return new Field_string(sizeof(double)*2+sizeof(longlong), - 0, item->name,table,&my_charset_bin); - else - return new Field_double(item_sum->max_length, maybe_null, - item->name, table, item_sum->decimals, TRUE); - case Item_sum::UNIQUE_USERS_FUNC: - return new Field_long(9,maybe_null,item->name,table,1); - case Item_sum::MIN_FUNC: - case Item_sum::MAX_FUNC: - if (item_sum->args[0]->type() == Item::FIELD_ITEM) - { - *from_field= ((Item_field*) item_sum->args[0])->field; - return create_tmp_field_from_field(thd, *from_field, item->name, table, - NULL, convert_blob_length); - } - /* fall through */ - default: - switch (item_sum->result_type()) { - case REAL_RESULT: - return new Field_double(item_sum->max_length, maybe_null, - item->name, table, item_sum->decimals, TRUE); - case INT_RESULT: - return new Field_longlong(item_sum->max_length,maybe_null, - item->name,table,item->unsigned_flag); - case STRING_RESULT: - if (item_sum->max_length/item_sum->collation.collation->mbmaxlen > - CONVERT_IF_BIGGER_TO_BLOB) - { - if (convert_blob_length) - return new Field_varstring(convert_blob_length, maybe_null, - item->name, table, - item->collation.collation); - else - return new Field_blob(item_sum->max_length, maybe_null, item->name, - table, item->collation.collation); - } - return new Field_string(item_sum->max_length,maybe_null, - item->name,table,item->collation.collation); - case ROW_RESULT: - default: - // This case should never be choosen - DBUG_ASSERT(0); - thd->fatal_error(); - return 0; - } - } - /* We never come here */ + result= item_sum->create_tmp_field(group, table, convert_blob_length); + if (!result) + thd->fatal_error(); + return result; } case Item::FIELD_ITEM: case Item::DEFAULT_VALUE_ITEM: { Item_field *field= (Item_field*) item; - return create_tmp_field_from_field(thd, (*from_field= field->field), - item->name, table, - modify_item ? (Item_field*) item : NULL, - convert_blob_length); - } + bool orig_modify= modify_item; + if (orig_type == Item::REF_ITEM) + modify_item= 0; + /* + If item have to be able to store NULLs but underlaid field can't do it, + create_tmp_field_from_field() can't be used for tmp field creation. + */ + if (field->maybe_null && !field->field->maybe_null()) + { + result= create_tmp_field_from_item(thd, item, table, NULL, + modify_item, convert_blob_length); + *from_field= field->field; + if (result && modify_item) + field->result_field= result; + } + else if (table_cant_handle_bit_fields && field->field->type() == + FIELD_TYPE_BIT) + { + *from_field= field->field; + result= create_tmp_field_from_item(thd, item, table, copy_func, + modify_item, convert_blob_length); + if (result && modify_item) + field->result_field= result; + } + else + result= create_tmp_field_from_field(thd, (*from_field= field->field), + orig_item ? orig_item->name : + item->name, + table, + modify_item ? field : + NULL, + convert_blob_length); + if (orig_type == Item::REF_ITEM && orig_modify) + ((Item_ref*)orig_item)->set_result_field(result); + if (field->field->eq_def(result)) + *default_field= field->field; + return result; + } + /* Fall through */ case Item::FUNC_ITEM: case Item::COND_ITEM: case Item::FIELD_AVG_ITEM: @@ -5233,6 +8971,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, case Item::PROC_ITEM: case Item::INT_ITEM: case Item::REAL_ITEM: + case Item::DECIMAL_ITEM: case Item::STRING_ITEM: case Item::REF_ITEM: case Item::NULL_ITEM: @@ -5242,11 +8981,13 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, DBUG_ASSERT(((Item_result_field*)item)->result_field); *from_field= ((Item_result_field*)item)->result_field; } - return create_tmp_field_from_item(thd, item, table, (make_copy_field ? 0 : - copy_func), modify_item, - convert_blob_length); - case Item::TYPE_HOLDER: - return ((Item_type_holder *)item)->make_field_by_type(table); + return create_tmp_field_from_item(thd, item, table, + (make_copy_field ? 0 : copy_func), + modify_item, convert_blob_length); + case Item::TYPE_HOLDER: + result= ((Item_type_holder *)item)->make_field_by_type(table); + result->set_derivation(item->collation.derivation); + return result; default: // Dosen't have to be stored return 0; } @@ -5255,56 +8996,93 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, /* Create a temp table according to a field list. - Set distinct if duplicates could be removed - Given fields field pointers are changed to point at tmp_table - for send_fields + + SYNOPSIS + create_tmp_table() + thd thread handle + param a description used as input to create the table + fields list of items that will be used to define + column types of the table (also see NOTES) + group TODO document + distinct should table rows be distinct + save_sum_fields see NOTES + select_options + rows_limit + table_alias possible name of the temporary table that can be used + for name resolving; can be "". + + DESCRIPTION + Given field pointers are changed to point at tmp_table for + send_fields. The table object is self contained: it's + allocated in its own memory root, as well as Field objects + created for table columns. + This function will replace Item_sum items in 'fields' list with + corresponding Item_field items, pointing at the fields in the + temporary table, unless this was prohibited by TRUE + value of argument save_sum_fields. The Item_field objects + are created in THD memory root. */ +#define STRING_TOTAL_LENGTH_TO_PACK_ROWS 128 +#define AVG_STRING_LENGTH_TO_PACK_ROWS 64 +#define RATIO_TO_PACK_ROWS 2 +#define MIN_STRING_LENGTH_TO_PACK_ROWS 10 + TABLE * create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, ORDER *group, bool distinct, bool save_sum_fields, - ulong select_options, ha_rows rows_limit, + ulonglong select_options, ha_rows rows_limit, char *table_alias) { + MEM_ROOT *mem_root_save, own_root; TABLE *table; - uint i,field_count,reclength,null_count,null_pack_length, - hidden_null_count, hidden_null_pack_length, hidden_field_count, - blob_count,group_null_items; - bool using_unique_constraint=0; + uint i,field_count,null_count,null_pack_length; + uint copy_func_count= param->func_count; + uint hidden_null_count, hidden_null_pack_length, hidden_field_count; + uint blob_count,group_null_items, string_count; + uint temp_pool_slot=MY_BIT_NONE; + ulong reclength, string_total_length; + bool using_unique_constraint= 0; + bool use_packed_rows= 0; bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS); char *tmpname,path[FN_REFLEN]; byte *pos,*group_buff; uchar *null_flags; - Field **reg_field, **from_field, **blob_field; + Field **reg_field, **from_field, **default_field; + uint *blob_field; Copy_field *copy=0; KEY *keyinfo; KEY_PART_INFO *key_part_info; Item **copy_func; MI_COLUMNDEF *recinfo; - uint temp_pool_slot=MY_BIT_NONE; + uint total_uneven_bit_length= 0; bool force_copy_fields= param->force_copy_fields; DBUG_ENTER("create_tmp_table"); DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d", (int) distinct, (int) save_sum_fields, (ulong) rows_limit,test(group))); - statistic_increment(created_tmp_tables, &LOCK_status); + statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status); - if (use_temp_pool) + if (use_temp_pool && !(test_flags & TEST_KEEP_TMP_TABLES)) temp_pool_slot = bitmap_set_next(&temp_pool); if (temp_pool_slot != MY_BIT_NONE) // we got a slot sprintf(path, "%s_%lx_%i", tmp_file_prefix, current_pid, temp_pool_slot); - else // if we run out of slots or we are not using tempool + else + { + /* if we run out of slots or we are not using tempool */ sprintf(path,"%s%lx_%lx_%x", tmp_file_prefix,current_pid, thd->thread_id, thd->tmp_table++); + } + /* + No need to change table name to lower case as we are only creating + MyISAM or HEAP tables here + */ fn_format(path, path, mysql_tmpdir, "", MY_REPLACE_EXT|MY_UNPACK_FILENAME); - if (lower_case_table_names) - my_casedn_str(files_charset_info, path); - if (group) { if (!param->quick_group) @@ -5312,7 +9090,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, else for (ORDER *tmp=group ; tmp ; tmp=tmp->next) { (*tmp->item)->marker=4; // Store null in key - if ((*tmp->item)->max_length >= MAX_CHAR_WIDTH) + if ((*tmp->item)->max_length >= CONVERT_IF_BIGGER_TO_BLOB) using_unique_constraint=1; } if (param->group_length >= MAX_BLOB_WIDTH) @@ -5323,31 +9101,46 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, field_count=param->field_count+param->func_count+param->sum_func_count; hidden_field_count=param->hidden_field_count; - if (!my_multi_malloc(MYF(MY_WME), - &table,sizeof(*table), - ®_field, sizeof(Field*)*(field_count+1), - &blob_field, sizeof(Field*)*(field_count+1), - &from_field, sizeof(Field*)*field_count, - ©_func,sizeof(*copy_func)*(param->func_count+1), - ¶m->keyinfo,sizeof(*param->keyinfo), - &key_part_info, - sizeof(*key_part_info)*(param->group_parts+1), - ¶m->start_recinfo, - sizeof(*param->recinfo)*(field_count*2+4), - &tmpname,(uint) strlen(path)+1, - &group_buff,group && ! using_unique_constraint ? - param->group_length : 0, - NullS)) + + /* + When loose index scan is employed as access method, it already + computes all groups and the result of all aggregate functions. We + make space for the items of the aggregate function in the list of + functions TMP_TABLE_PARAM::items_to_copy, so that the values of + these items are stored in the temporary table. + */ + if (param->precomputed_group_by) + copy_func_count+= param->sum_func_count; + + init_sql_alloc(&own_root, TABLE_ALLOC_BLOCK_SIZE, 0); + + if (!multi_alloc_root(&own_root, + &table, sizeof(*table), + ®_field, sizeof(Field*) * (field_count+1), + &default_field, sizeof(Field*) * (field_count), + &blob_field, sizeof(uint)*(field_count+1), + &from_field, sizeof(Field*)*field_count, + ©_func, sizeof(*copy_func)*(copy_func_count+1), + ¶m->keyinfo, sizeof(*param->keyinfo), + &key_part_info, + sizeof(*key_part_info)*(param->group_parts+1), + ¶m->start_recinfo, + sizeof(*param->recinfo)*(field_count*2+4), + &tmpname, (uint) strlen(path)+1, + &group_buff, group && ! using_unique_constraint ? + param->group_length : 0, + NullS)) { if (temp_pool_slot != MY_BIT_NONE) bitmap_clear_bit(&temp_pool, temp_pool_slot); DBUG_RETURN(NULL); /* purecov: inspected */ } - if (!(param->copy_field=copy=new Copy_field[field_count])) + /* Copy_field belongs to TMP_TABLE_PARAM, allocate it in THD mem_root */ + if (!(param->copy_field= copy= new (thd->mem_root) Copy_field[field_count])) { if (temp_pool_slot != MY_BIT_NONE) bitmap_clear_bit(&temp_pool, temp_pool_slot); - my_free((gptr) table,MYF(0)); /* purecov: inspected */ + free_root(&own_root, MYF(0)); /* purecov: inspected */ DBUG_RETURN(NULL); /* purecov: inspected */ } param->items_to_copy= copy_func; @@ -5356,30 +9149,42 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, bzero((char*) table,sizeof(*table)); bzero((char*) reg_field,sizeof(Field*)*(field_count+1)); + bzero((char*) default_field, sizeof(Field*) * (field_count)); bzero((char*) from_field,sizeof(Field*)*field_count); + + table->mem_root= own_root; + mem_root_save= thd->mem_root; + thd->mem_root= &table->mem_root; + table->field=reg_field; - table->blob_field= (Field_blob**) blob_field; - table->real_name=table->path=tmpname; - table->table_name= table_alias; + table->alias= table_alias; table->reginfo.lock_type=TL_WRITE; /* Will be updated */ table->db_stat=HA_OPEN_KEYFILE+HA_OPEN_RNDFILE; - table->blob_ptr_size=mi_portable_sizeof_char_ptr; table->map=1; - table->tmp_table= TMP_TABLE; - table->db_low_byte_first=1; // True for HEAP and MyISAM table->temp_pool_slot = temp_pool_slot; table->copy_blobs= 1; table->in_use= thd; - table->keys_for_keyread.init(); - table->keys_in_use.init(); - table->read_only_keys.init(); table->quick_keys.init(); table->used_keys.init(); table->keys_in_use_for_query.init(); + table->s= &table->share_not_to_be_used; + table->s->blob_field= blob_field; + table->s->table_name= table->s->path= tmpname; + table->s->db= ""; + table->s->blob_ptr_size= mi_portable_sizeof_char_ptr; + table->s->tmp_table= TMP_TABLE; + table->s->db_low_byte_first=1; // True for HEAP and MyISAM + table->s->table_charset= param->table_charset; + table->s->keys_for_keyread.init(); + table->s->keys_in_use.init(); + /* For easier error reporting */ + table->s->table_cache_key= (char*) (table->s->db= ""); + /* Calculate which type of fields we will store in the temporary table */ - reclength=blob_count=null_count=hidden_null_count=group_null_items=0; + reclength= string_total_length= 0; + blob_count= string_count= null_count= hidden_null_count= group_null_items= 0; param->using_indirect_summary_function=0; List_iterator_fast<Item> li(fields); @@ -5392,13 +9197,19 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, { if (item->with_sum_func && type != Item::SUM_FUNC_ITEM) { - /* - Mark that the we have ignored an item that refers to a summary - function. We need to know this if someone is going to use - DISTINCT on the result. - */ - param->using_indirect_summary_function=1; - continue; + if (item->used_tables() & OUTER_REF_TABLE_BIT) + item->update_used_tables(); + if (type == Item::SUBSELECT_ITEM || + (item->used_tables() & ~OUTER_REF_TABLE_BIT)) + { + /* + Mark that the we have ignored an item that refers to a summary + function. We need to know this if someone is going to use + DISTINCT on the result. + */ + param->using_indirect_summary_function=1; + continue; + } } if (item->const_item() && (int) hidden_field_count <= 0) continue; // We don't have to store this @@ -5412,21 +9223,35 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, Item *arg= *argp; if (!arg->const_item()) { + uint field_index= (uint) (reg_field - table->field); Field *new_field= create_tmp_field(thd, table, arg, arg->type(), ©_func, - tmp_from_field, group != 0,not_all_columns, - param->convert_blob_length, 0); + tmp_from_field, &default_field[field_index], + group != 0,not_all_columns, + distinct, 0, + param->convert_blob_length); if (!new_field) goto err; // Should be OOM tmp_from_field++; - *(reg_field++)= new_field; reclength+=new_field->pack_length(); if (new_field->flags & BLOB_FLAG) { - *blob_field++= new_field; + *blob_field++= field_index; blob_count++; } + if (new_field->type() == FIELD_TYPE_BIT) + total_uneven_bit_length+= new_field->field_length & 7; + new_field->field_index= field_index; + *(reg_field++)= new_field; + if (new_field->real_type() == MYSQL_TYPE_STRING || + new_field->real_type() == MYSQL_TYPE_VARCHAR) + { + string_count++; + string_total_length+= new_field->pack_length(); + } + thd->mem_root= mem_root_save; thd->change_item_tree(argp, new Item_field(new_field)); + thd->mem_root= &table->mem_root; if (!(new_field->flags & NOT_NULL_FLAG)) { null_count++; @@ -5436,11 +9261,13 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, */ (*argp)->maybe_null=1; } + new_field->query_id= thd->query_id; } } } else { + uint field_index= (uint) (reg_field - table->field); /* The last parameter to create_tmp_field() is a bit tricky: @@ -5450,13 +9277,20 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, write rows to the temporary table. We here distinguish between UNION and multi-table-updates by the fact that in the later case group is set to the row pointer. + + The test for item->marker == 4 is ensure we don't create a group-by + key over a bit field as heap tables can't handle that. */ - Field *new_field= create_tmp_field(thd, table, item, type, ©_func, - tmp_from_field, group != 0, - !force_copy_fields && - (not_all_columns || group !=0), - param->convert_blob_length, - force_copy_fields); + Field *new_field= (param->schema_table) ? + create_tmp_field_for_schema(thd, item, table) : + create_tmp_field(thd, table, item, type, ©_func, + tmp_from_field, &default_field[field_index], + group != 0, + !force_copy_fields && + (not_all_columns || group !=0), + item->marker == 4, force_copy_fields, + param->convert_blob_length); + if (!new_field) { if (thd->is_fatal_error) @@ -5469,9 +9303,11 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, reclength+=new_field->pack_length(); if (!(new_field->flags & NOT_NULL_FLAG)) null_count++; + if (new_field->type() == FIELD_TYPE_BIT) + total_uneven_bit_length+= new_field->field_length & 7; if (new_field->flags & BLOB_FLAG) { - *blob_field++= new_field; + *blob_field++= field_index; blob_count++; } if (item->marker == 4 && item->maybe_null) @@ -5479,28 +9315,37 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, group_null_items++; new_field->flags|= GROUP_FLAG; } + new_field->query_id= thd->query_id; + new_field->field_index= field_index; *(reg_field++) =new_field; } if (!--hidden_field_count) { + /* + This was the last hidden field; Remember how many hidden fields could + have null + */ hidden_null_count=null_count; /* - We need to update hidden_field_count as we may have stored group - functions with constant arguments + We need to update hidden_field_count as we may have stored group + functions with constant arguments */ param->hidden_field_count= (uint) (reg_field - table->field); + null_count= 0; } } DBUG_ASSERT(field_count >= (uint) (reg_field - table->field)); field_count= (uint) (reg_field - table->field); + *reg_field= 0; *blob_field= 0; // End marker /* If result table is small; use a heap */ if (blob_count || using_unique_constraint || (select_options & (OPTION_BIG_TABLES | SELECT_SMALL_RESULT)) == - OPTION_BIG_TABLES ||(select_options & TMP_TABLE_FORCE_MYISAM)) + OPTION_BIG_TABLES || (select_options & TMP_TABLE_FORCE_MYISAM)) { - table->file=get_new_handler(table,table->db_type=DB_TYPE_MYISAM); + table->file= get_new_handler(table, &table->mem_root, + table->s->db_type= DB_TYPE_MYISAM); if (group && (param->group_parts > table->file->max_key_parts() || param->group_length > table->file->max_key_length())) @@ -5508,13 +9353,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, } else { - table->file=get_new_handler(table,table->db_type=DB_TYPE_HEAP); + table->file= get_new_handler(table, &table->mem_root, + table->s->db_type= DB_TYPE_HEAP); } if (!using_unique_constraint) reclength+= group_null_items; // null flag is stored separately - table->blob_fields=blob_count; + table->s->blob_fields= blob_count; if (blob_count == 0) { /* We need to ensure that first byte is not 0 for the delete link */ @@ -5524,22 +9370,31 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, null_count++; } hidden_null_pack_length=(hidden_null_count+7)/8; - null_pack_length=hidden_null_count+(null_count+7)/8; + null_pack_length= (hidden_null_pack_length + + (null_count + total_uneven_bit_length + 7) / 8); reclength+=null_pack_length; if (!reclength) reclength=1; // Dummy select + /* Use packed rows if there is blobs or a lot of space to gain */ + if (blob_count || + string_total_length >= STRING_TOTAL_LENGTH_TO_PACK_ROWS && + (reclength / string_total_length <= RATIO_TO_PACK_ROWS || + string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS)) + use_packed_rows= 1; - table->fields=field_count; - table->reclength=reclength; + table->s->fields= field_count; + table->s->reclength= reclength; { uint alloc_length=ALIGN_SIZE(reclength+MI_UNIQUE_HASH_LENGTH+1); - table->rec_buff_length=alloc_length; - if (!(table->record[0]= (byte *) my_malloc(alloc_length*3, MYF(MY_WME)))) + table->s->rec_buff_length= alloc_length; + if (!(table->record[0]= (byte*) + alloc_root(&table->mem_root, alloc_length*3))) goto err; table->record[1]= table->record[0]+alloc_length; - table->default_values= table->record[1]+alloc_length; + table->s->default_values= table->record[1]+alloc_length; } copy_func[0]=0; // End marker + param->func_count= copy_func - param->items_to_copy; recinfo=param->start_recinfo; null_flags=(uchar*) table->record[0]; @@ -5553,8 +9408,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, bfill(null_flags,null_pack_length,255); // Set null fields table->null_flags= (uchar*) table->record[0]; - table->null_fields= null_count+ hidden_null_count; - table->null_bytes= null_pack_length; + table->s->null_fields= null_count+ hidden_null_count; + table->s->null_bytes= null_pack_length; } null_count= (blob_count == 0) ? 1 : 0; hidden_field_count=param->hidden_field_count; @@ -5589,7 +9444,43 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, } else field->move_field((char*) pos,(uchar*) 0,0); + if (field->type() == FIELD_TYPE_BIT) + { + /* We have to reserve place for extra bits among null bits */ + ((Field_bit*) field)->set_bit_ptr(null_flags + null_count / 8, + null_count & 7); + null_count+= (field->field_length & 7); + } field->reset(); + + /* + Test if there is a default field value. The test for ->ptr is to skip + 'offset' fields generated by initalize_tables + */ + if (default_field[i] && default_field[i]->ptr) + { + /* + default_field[i] is set only in the cases when 'field' can + inherit the default value that is defined for the field referred + by the Item_field object from which 'field' has been created. + */ + my_ptrdiff_t diff; + Field *orig_field= default_field[i]; + + /* Get the value from default_values */ + diff= (my_ptrdiff_t) (orig_field->table->s->default_values- + orig_field->table->record[0]); + orig_field->move_field(diff); // Points now at default_values + if (orig_field->is_real_null()) + field->set_null(); + else + { + field->set_notnull(); + memcpy(field->ptr, orig_field->ptr, field->pack_length()); + } + orig_field->move_field(-diff); // Back to record[0] + } + if (from_field[i]) { /* Not a table Item */ copy->set(field,from_field[i],save_sum_fields); @@ -5602,10 +9493,9 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, recinfo->length=length; if (field->flags & BLOB_FLAG) recinfo->type= (int) FIELD_BLOB; - else if (!field->zero_pack() && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING) && - length >= 10 && blob_count) + else if (use_packed_rows && + field->real_type() == MYSQL_TYPE_STRING && + length >= MIN_STRING_LENGTH_TO_PACK_ROWS) recinfo->type=FIELD_SKIP_ENDSPACE; else recinfo->type=FIELD_NORMAL; @@ -5613,37 +9503,38 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, null_count=(null_count+7) & ~7; // move to next byte // fix table name in field entry - field->table_name= table->table_name; + field->table_name= &table->alias; } param->copy_field_end=copy; param->recinfo=recinfo; - store_record(table,default_values); // Make empty default record + store_record(table,s->default_values); // Make empty default record - if (thd->variables.tmp_table_size == ~(ulong) 0) // No limit - table->max_rows= ~(ha_rows) 0; + if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit + table->s->max_rows= ~(ha_rows) 0; else - table->max_rows=(((table->db_type == DB_TYPE_HEAP) ? - min(thd->variables.tmp_table_size, - thd->variables.max_heap_table_size) : - thd->variables.tmp_table_size)/ table->reclength); - set_if_bigger(table->max_rows,1); // For dummy start options + table->s->max_rows= (ha_rows) (((table->s->db_type == DB_TYPE_HEAP) ? + min(thd->variables.tmp_table_size, + thd->variables.max_heap_table_size) : + thd->variables.tmp_table_size)/ + table->s->reclength); + set_if_bigger(table->s->max_rows,1); // For dummy start options /* Push the LIMIT clause to the temporary table creation, so that we materialize only up to 'rows_limit' records instead of all result records. */ - set_if_smaller(table->max_rows, rows_limit); + set_if_smaller(table->s->max_rows, rows_limit); param->end_write_records= rows_limit; - keyinfo=param->keyinfo; + keyinfo= param->keyinfo; if (group) { DBUG_PRINT("info",("Creating group key in temporary table")); table->group=group; /* Table is grouped by key */ param->group_buff=group_buff; - table->keys=1; - table->uniques= test(using_unique_constraint); + table->s->keys=1; + table->s->uniques= test(using_unique_constraint); table->key_info=keyinfo; keyinfo->key_part=key_part_info; keyinfo->flags=HA_NOSAME; @@ -5651,6 +9542,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, keyinfo->key_length=0; keyinfo->rec_per_key=0; keyinfo->algorithm= HA_KEY_ALG_UNDEF; + keyinfo->name= (char*) "group_key"; ORDER *cur_group= group; for (; cur_group ; cur_group= cur_group->next, key_part_info++) { @@ -5659,35 +9551,40 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, key_part_info->null_bit=0; key_part_info->field= field; key_part_info->offset= field->offset(); - key_part_info->length= (uint16) field->pack_length(); + key_part_info->length= (uint16) field->key_length(); key_part_info->type= (uint8) field->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; if (!using_unique_constraint) { cur_group->buff=(char*) group_buff; - if (!(cur_group->field=field->new_field(thd->mem_root,table))) + if (!(cur_group->field= field->new_key_field(thd->mem_root,table, + (char*) group_buff + + test(maybe_null), + field->null_ptr, + field->null_bit))) goto err; /* purecov: inspected */ if (maybe_null) { /* - To be able to group on NULL, we reserve place in group_buff - for the NULL flag just before the column. + To be able to group on NULL, we reserved place in group_buff + for the NULL flag just before the column. (see above). The field data is after this flag. - The NULL flag is updated by 'end_update()' and 'end_write()' + The NULL flag is updated in 'end_update()' and 'end_write()' */ keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL key_part_info->null_bit=field->null_bit; key_part_info->null_offset= (uint) (field->null_ptr - (uchar*) table->record[0]); - cur_group->field->move_field((char*) ++cur_group->buff); - group_buff++; + cur_group->buff++; // Pointer to field data + group_buff++; // Skipp null flag } - else - cur_group->field->move_field((char*) group_buff); - group_buff+= key_part_info->length; + /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */ + key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; + group_buff+= cur_group->field->pack_length(); } keyinfo->key_length+= key_part_info->length; } @@ -5706,21 +9603,23 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, null_pack_length-=hidden_null_pack_length; keyinfo->key_parts= ((field_count-param->hidden_field_count)+ test(null_pack_length)); - table->distinct=1; - table->keys=1; + table->distinct= 1; + table->s->keys= 1; if (blob_count) { using_unique_constraint=1; - table->uniques=1; + table->s->uniques= 1; } if (!(key_part_info= (KEY_PART_INFO*) - sql_calloc((keyinfo->key_parts)*sizeof(KEY_PART_INFO)))) + alloc_root(&table->mem_root, + keyinfo->key_parts * sizeof(KEY_PART_INFO)))) goto err; + bzero((void*) key_part_info, keyinfo->key_parts * sizeof(KEY_PART_INFO)); table->key_info=keyinfo; keyinfo->key_part=key_part_info; keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL; keyinfo->key_length=(uint16) reclength; - keyinfo->name=(char*) "tmp"; + keyinfo->name= (char*) "distinct_key"; keyinfo->algorithm= HA_KEY_ALG_UNDEF; keyinfo->rec_per_key=0; if (null_pack_length) @@ -5750,28 +9649,29 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, key_part_info->type= (uint8) (*reg_field)->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; } } if (thd->is_fatal_error) // If end of memory goto err; /* purecov: inspected */ - table->db_record_offset=1; - if (table->db_type == DB_TYPE_MYISAM) + table->s->db_record_offset= 1; + if (table->s->db_type == DB_TYPE_MYISAM) { if (create_myisam_tmp_table(table,param,select_options)) goto err; } - if (!open_tmp_table(table)) - DBUG_RETURN(table); + if (open_tmp_table(table)) + goto err; - err: - /* - Hack to ensure that free_blobs() doesn't fail if blob_field is not yet - complete - */ - *table->blob_field= 0; + thd->mem_root= mem_root_save; + + DBUG_RETURN(table); + +err: + thd->mem_root= mem_root_save; free_tmp_table(thd,table); /* purecov: inspected */ if (temp_pool_slot != MY_BIT_NONE) bitmap_clear_bit(&temp_pool, temp_pool_slot); @@ -5779,10 +9679,135 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, } +/****************************************************************************/ + +/* + Create a reduced TABLE object with properly set up Field list from a + list of field definitions. + + SYNOPSIS + create_virtual_tmp_table() + thd connection handle + field_list list of column definitions + + DESCRIPTION + The created table doesn't have a table handler assotiated with + it, has no keys, no group/distinct, no copy_funcs array. + The sole purpose of this TABLE object is to use the power of Field + class to read/write data to/from table->record[0]. Then one can store + the record in any container (RB tree, hash, etc). + The table is created in THD mem_root, so are the table's fields. + Consequently, if you don't BLOB fields, you don't need to free it. + + RETURN + 0 if out of memory, TABLE object in case of success +*/ + +TABLE *create_virtual_tmp_table(THD *thd, List<create_field> &field_list) +{ + uint field_count= field_list.elements; + uint blob_count= 0; + Field **field; + create_field *cdef; /* column definition */ + uint record_length= 0; + uint null_count= 0; /* number of columns which may be null */ + uint null_pack_length; /* NULL representation array length */ + TABLE_SHARE *s; + /* Create the table and list of all fields */ + TABLE *table= (TABLE*) thd->calloc(sizeof(*table)); + field= (Field**) thd->alloc((field_count + 1) * sizeof(Field*)); + if (!table || !field) + return 0; + + table->field= field; + table->s= s= &table->share_not_to_be_used; + s->fields= field_count; + + if (!(s->blob_field= (uint*)thd->alloc((field_list.elements + 1) * + sizeof(uint)))) + return 0; + + s->blob_ptr_size= mi_portable_sizeof_char_ptr; + + /* Create all fields and calculate the total length of record */ + List_iterator_fast<create_field> it(field_list); + while ((cdef= it++)) + { + *field= make_field(0, cdef->length, + (uchar*) (f_maybe_null(cdef->pack_flag) ? "" : 0), + f_maybe_null(cdef->pack_flag) ? 1 : 0, + cdef->pack_flag, cdef->sql_type, cdef->charset, + cdef->geom_type, cdef->unireg_check, + cdef->interval, cdef->field_name, table); + if (!*field) + goto error; + record_length+= (**field).pack_length(); + if (! ((**field).flags & NOT_NULL_FLAG)) + ++null_count; + + if ((*field)->flags & BLOB_FLAG) + s->blob_field[blob_count++]= (uint) (field - table->field); + + ++field; + } + *field= NULL; /* mark the end of the list */ + s->blob_field[blob_count]= 0; /* mark the end of the list */ + s->blob_fields= blob_count; + + null_pack_length= (null_count + 7)/8; + s->reclength= record_length + null_pack_length; + s->rec_buff_length= ALIGN_SIZE(s->reclength + 1); + table->record[0]= (byte*) thd->alloc(s->rec_buff_length); + if (!table->record[0]) + goto error; + + if (null_pack_length) + { + table->null_flags= (uchar*) table->record[0]; + s->null_fields= null_count; + s->null_bytes= null_pack_length; + } + + table->in_use= thd; /* field->reset() may access table->in_use */ + { + /* Set up field pointers */ + byte *null_pos= table->record[0]; + byte *field_pos= null_pos + s->null_bytes; + uint null_bit= 1; + + for (field= table->field; *field; ++field) + { + Field *cur_field= *field; + if ((cur_field->flags & NOT_NULL_FLAG)) + cur_field->move_field((char*) field_pos); + else + { + cur_field->move_field((char*) field_pos, (uchar*) null_pos, null_bit); + null_bit<<= 1; + if (null_bit == (1 << 8)) + { + ++null_pos; + null_bit= 1; + } + } + cur_field->reset(); + + field_pos+= cur_field->pack_length(); + } + } + return table; +error: + for (field= table->field; *field; ++field) + delete *field; /* just invokes field destructor */ + return 0; +} + + static bool open_tmp_table(TABLE *table) { int error; - if ((error=table->file->ha_open(table->real_name,O_RDWR,HA_OPEN_TMP_TABLE))) + if ((error=table->file->ha_open(table->s->table_name,O_RDWR, + HA_OPEN_TMP_TABLE))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ table->db_stat=0; @@ -5794,29 +9819,30 @@ static bool open_tmp_table(TABLE *table) static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, - ulong options) + ulonglong options) { int error; MI_KEYDEF keydef; MI_UNIQUEDEF uniquedef; KEY *keyinfo=param->keyinfo; - DBUG_ENTER("create_myisam_tmp_table"); - if (table->keys) + + if (table->s->keys) { // Get keys for ni_create bool using_unique_constraint=0; - HA_KEYSEG *seg= (HA_KEYSEG*) sql_calloc(sizeof(*seg) * - keyinfo->key_parts); + HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root, + sizeof(*seg) * keyinfo->key_parts); if (!seg) goto err; + bzero(seg, sizeof(*seg) * keyinfo->key_parts); if (keyinfo->key_length >= table->file->max_key_length() || keyinfo->key_parts > table->file->max_key_parts() || - table->uniques) + table->s->uniques) { /* Can't create a key; Make a unique constraint instead of a key */ - table->keys=0; - table->uniques=1; + table->s->keys= 0; + table->s->uniques= 1; using_unique_constraint=1; bzero((char*) &uniquedef,sizeof(uniquedef)); uniquedef.keysegs=keyinfo->key_parts; @@ -5828,7 +9854,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, param->recinfo->type= FIELD_CHECK; param->recinfo->length=MI_UNIQUE_HASH_LENGTH; param->recinfo++; - table->reclength+=MI_UNIQUE_HASH_LENGTH; + table->s->reclength+=MI_UNIQUE_HASH_LENGTH; } else { @@ -5849,21 +9875,18 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, { seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT); - seg->bit_start=seg->length - table->blob_ptr_size; + HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2); + seg->bit_start= (uint8)(field->pack_length() - table->s->blob_ptr_size); seg->flag= HA_BLOB_PART; seg->length=0; // Whole blob in unique constraint } else { - seg->type= - ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT); - if (!(field->flags & ZEROFILL_FLAG) && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING) && + seg->type= keyinfo->key_part[i].type; + /* Tell handler if it can do suffic space compression */ + if (field->real_type() == MYSQL_TYPE_STRING && keyinfo->key_part[i].length > 4) - seg->flag|=HA_SPACE_PACK; + seg->flag|= HA_SPACE_PACK; } if (!(field->flags & NOT_NULL_FLAG)) { @@ -5872,7 +9895,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, /* We are using a GROUP BY on something that contains NULL In this case we have to tell MyISAM that two NULL should - on INSERT be compared as equal + on INSERT be regarded at the same value */ if (!using_unique_constraint) keydef.flag|= HA_NULL_ARE_EQUAL; @@ -5886,10 +9909,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, OPTION_BIG_TABLES) create_info.data_file_length= ~(ulonglong) 0; - if ((error=mi_create(table->real_name,table->keys,&keydef, + if ((error=mi_create(table->s->table_name,table->s->keys,&keydef, (uint) (param->recinfo-param->start_recinfo), param->start_recinfo, - table->uniques, &uniquedef, + table->s->uniques, &uniquedef, &create_info, HA_CREATE_TMP_TABLE))) { @@ -5897,8 +9920,9 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, table->db_stat=0; goto err; } - statistic_increment(created_tmp_disk_tables, &LOCK_status); - table->db_record_offset=1; + statistic_increment(table->in_use->status_var.created_tmp_disk_tables, + &LOCK_status); + table->s->db_record_offset= 1; DBUG_RETURN(0); err: DBUG_RETURN(1); @@ -5908,13 +9932,14 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, void free_tmp_table(THD *thd, TABLE *entry) { + MEM_ROOT own_root= entry->mem_root; const char *save_proc_info; DBUG_ENTER("free_tmp_table"); - DBUG_PRINT("enter",("table: %s",entry->table_name)); + DBUG_PRINT("enter",("table: %s",entry->alias)); save_proc_info=thd->proc_info; thd->proc_info="removing tmp table"; - free_blobs(entry); + if (entry->file) { if (entry->db_stat) @@ -5926,21 +9951,21 @@ free_tmp_table(THD *thd, TABLE *entry) here and we have to ensure that delete_table gets the table name in the original case. */ - if (!(test_flags & TEST_KEEP_TMP_TABLES) || entry->db_type == DB_TYPE_HEAP) - entry->file->delete_table(entry->real_name); + if (!(test_flags & TEST_KEEP_TMP_TABLES) || + entry->s->db_type == DB_TYPE_HEAP) + entry->file->delete_table(entry->s->table_name); delete entry->file; } /* free blobs */ for (Field **ptr=entry->field ; *ptr ; ptr++) (*ptr)->free(); - my_free((gptr) entry->record[0],MYF(0)); free_io_cache(entry); if (entry->temp_pool_slot != MY_BIT_NONE) bitmap_clear_bit(&temp_pool, entry->temp_pool_slot); - my_free((gptr) entry,MYF(0)); + free_root(&own_root, MYF(0)); /* the table is allocated in its own root */ thd->proc_info=save_proc_info; DBUG_VOID_RETURN; @@ -5958,14 +9983,16 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, int write_err; DBUG_ENTER("create_myisam_from_heap"); - if (table->db_type != DB_TYPE_HEAP || error != HA_ERR_RECORD_FILE_FULL) + if (table->s->db_type != DB_TYPE_HEAP || error != HA_ERR_RECORD_FILE_FULL) { table->file->print_error(error,MYF(0)); DBUG_RETURN(1); } new_table= *table; - new_table.db_type=DB_TYPE_MYISAM; - if (!(new_table.file=get_new_handler(&new_table,DB_TYPE_MYISAM))) + new_table.s= &new_table.share_not_to_be_used; + new_table.s->db_type= DB_TYPE_MYISAM; + if (!(new_table.file= get_new_handler(&new_table, &new_table.mem_root, + DB_TYPE_MYISAM))) DBUG_RETURN(1); // End of memory save_proc_info=thd->proc_info; @@ -6015,13 +10042,15 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, /* remove heap table and change to use myisam table */ (void) table->file->ha_rnd_end(); (void) table->file->close(); - (void) table->file->delete_table(table->real_name); + (void) table->file->delete_table(table->s->table_name); delete table->file; table->file=0; - *table =new_table; + *table= new_table; + table->s= &table->share_not_to_be_used; table->file->change_table_ptr(table); - thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ? - "Copying to tmp table on disk" : save_proc_info); + if (save_proc_info) + thd->proc_info= (!strcmp(save_proc_info,"Copying to tmp table") ? + "Copying to tmp table on disk" : save_proc_info); DBUG_RETURN(0); err: @@ -6030,7 +10059,7 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, (void) table->file->ha_rnd_end(); (void) new_table.file->close(); err1: - new_table.file->delete_table(new_table.real_name); + new_table.file->delete_table(new_table.s->table_name); delete new_table.file; err2: thd->proc_info=save_proc_info; @@ -6038,46 +10067,35 @@ bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, } -/**************************************************************************** - Make a join of all tables and write it on socket or to table - Return: 0 if ok - 1 if error is sent - -1 if error should be sent -****************************************************************************/ +/* + SYNOPSIS + setup_end_select_func() + join join to setup the function for. -static int -do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) + DESCRIPTION + Rows produced by a join sweep may end up in a temporary table or be sent + to a client. Setup the function of the nested loop join algorithm which + handles final fully constructed and matched records. + + RETURN + end_select function to use. This function can't fail. +*/ + +Next_select_func setup_end_select_func(JOIN *join) { - int error= 0; - JOIN_TAB *join_tab; - int (*end_select)(JOIN *, struct st_join_table *,bool); - DBUG_ENTER("do_select"); - List<Item> *columns_list= procedure ? &join->procedure_fields_list : fields; - join->procedure=procedure; - /* - Tell the client how many fields there are in a row - */ - if (!table) - join->result->send_fields(*columns_list, 1); - else - { - VOID(table->file->extra(HA_EXTRA_WRITE_CACHE)); - empty_record(table); - } - join->tmp_table= table; /* Save for easy recursion */ - join->fields= fields; + TABLE *table= join->tmp_table; + TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param; + Next_select_func end_select; /* Set up select_end */ if (table) { - if (table->group && join->tmp_table_param.sum_func_count) + if (table->group && tmp_tbl->sum_func_count) { - if (table->keys) + if (table->s->keys) { DBUG_PRINT("info",("Using end_update")); end_select=end_update; - if (!table->file->inited) - table->file->ha_index_init(0); } else { @@ -6085,7 +10103,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) end_select=end_unique_update; } } - else if (join->sort_and_group) + else if (join->sort_and_group && !tmp_tbl->precomputed_group_by) { DBUG_PRINT("info",("Using end_write_group")); end_select=end_write_group; @@ -6094,19 +10112,68 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) { DBUG_PRINT("info",("Using end_write")); end_select=end_write; + if (tmp_tbl->precomputed_group_by) + { + /* + A preceding call to create_tmp_table in the case when loose + index scan is used guarantees that + TMP_TABLE_PARAM::items_to_copy has enough space for the group + by functions. It is OK here to use memcpy since we copy + Item_sum pointers into an array of Item pointers. + */ + memcpy(tmp_tbl->items_to_copy + tmp_tbl->func_count, + join->sum_funcs, + sizeof(Item*)*tmp_tbl->sum_func_count); + tmp_tbl->items_to_copy[tmp_tbl->func_count+tmp_tbl->sum_func_count]= 0; + } } } else { - if (join->sort_and_group || (join->procedure && - join->procedure->flags & PROC_GROUP)) - end_select=end_send_group; + if ((join->sort_and_group || + (join->procedure && join->procedure->flags & PROC_GROUP)) && + !tmp_tbl->precomputed_group_by) + end_select= end_send_group; else - end_select=end_send; + end_select= end_send; } + return end_select; +} + + +/**************************************************************************** + Make a join of all tables and write it on socket or to table + Return: 0 if ok + 1 if error is sent + -1 if error should be sent +****************************************************************************/ + +static int +do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) +{ + int rc= 0; + enum_nested_loop_state error= NESTED_LOOP_OK; + JOIN_TAB *join_tab; + DBUG_ENTER("do_select"); + LINT_INIT(join_tab); + + join->procedure=procedure; + join->tmp_table= table; /* Save for easy recursion */ + join->fields= fields; + + if (table) + { + VOID(table->file->extra(HA_EXTRA_WRITE_CACHE)); + empty_record(table); + if (table->group && join->tmp_table_param.sum_func_count && + table->s->keys && !table->file->inited) + table->file->ha_index_init(0); + } + /* Set up select_end */ + Next_select_func end_select= setup_end_select_func(join); if (join->tables) { - join->join_tab[join->tables-1].next_select=end_select; + join->join_tab[join->tables-1].next_select= end_select; join_tab=join->join_tab+join->const_tables; } @@ -6114,42 +10181,54 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) if (join->tables == join->const_tables) { /* - HAVING will be chcked after processing aggregate functions, + HAVING will be checked after processing aggregate functions, But WHERE should checkd here (we alredy have read tables) */ if (!join->conds || join->conds->val_int()) { - if (!(error=(*end_select)(join,join_tab,0)) || error == -3) - error=(*end_select)(join,join_tab,1); + error= (*end_select)(join,join_tab,0); + if (error == NESTED_LOOP_OK || error == NESTED_LOOP_QUERY_LIMIT) + error= (*end_select)(join,join_tab,1); } else if (join->send_row_on_empty_set()) - error= join->result->send_data(*columns_list); + { + List<Item> *columns_list= (procedure ? &join->procedure_fields_list : + fields); + rc= join->result->send_data(*columns_list); + } } else { - DBUG_ASSERT(join_tab); + DBUG_ASSERT(join->tables); error= sub_select(join,join_tab,0); - if (error >= 0) + if (error == NESTED_LOOP_OK || error == NESTED_LOOP_NO_MORE_ROWS) error= sub_select(join,join_tab,1); - if (error == -3) - error= 0; /* select_limit used */ + if (error == NESTED_LOOP_QUERY_LIMIT) + error= NESTED_LOOP_OK; /* select_limit used */ } + if (error == NESTED_LOOP_NO_MORE_ROWS) + error= NESTED_LOOP_OK; - if (error >= 0) + if (error == NESTED_LOOP_OK) { - error=0; + /* + Sic: this branch works even if rc != 0, e.g. when + send_data above returns an error. + */ if (!table) // If sending data to client { /* The following will unlock all cursors if the command wasn't an update command */ - join->join_free(0); // Unlock all cursors + join->join_free(); // Unlock all cursors if (join->result->send_eof()) - error= 1; // Don't send error + rc= 1; // Don't send error } - DBUG_PRINT("info",("%ld records output",join->send_records)); + DBUG_PRINT("info",("%ld records output", (long) join->send_records)); } + else + rc= -1; if (table) { int tmp, new_errno= 0; @@ -6167,131 +10246,420 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) table->file->print_error(new_errno,MYF(0)); } #ifndef DBUG_OFF - if (error) + if (rc) { DBUG_PRINT("error",("Error: do_select() failed")); } #endif - DBUG_RETURN(join->thd->net.report_error ? -1 : error); + DBUG_RETURN(join->thd->net.report_error ? -1 : rc); } -static int +enum_nested_loop_state sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) { - int error; + enum_nested_loop_state rc; if (end_of_records) { - if ((error=flush_cached_records(join,join_tab,FALSE)) < 0) - return error; /* purecov: inspected */ - return sub_select(join,join_tab,end_of_records); + rc= flush_cached_records(join,join_tab,FALSE); + if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS) + rc= sub_select(join,join_tab,end_of_records); + return rc; } if (join->thd->killed) // If aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ - return -2; /* purecov: inspected */ + join->thd->send_kill_message(); + return NESTED_LOOP_KILLED; /* purecov: inspected */ } if (join_tab->use_quick != 2 || test_if_quick_select(join_tab) <= 0) { if (!store_record_in_cache(&join_tab->cache)) - return 0; // There is more room in cache + return NESTED_LOOP_OK; // There is more room in cache return flush_cached_records(join,join_tab,FALSE); } - if ((error=flush_cached_records(join,join_tab,TRUE)) < 0) - return error; /* purecov: inspected */ - return sub_select(join,join_tab,end_of_records); /* Use ordinary select */ + rc= flush_cached_records(join, join_tab, TRUE); + if (rc == NESTED_LOOP_OK || rc == NESTED_LOOP_NO_MORE_ROWS) + rc= sub_select(join, join_tab, end_of_records); + return rc; } +/* + Retrieve records ends with a given beginning from the result of a join -static int + SYNPOSIS + sub_select() + join pointer to the structure providing all context info for the query + join_tab the first next table of the execution plan to be retrieved + end_records true when we need to perform final steps of retrival + + DESCRIPTION + For a given partial join record consisting of records from the tables + preceding the table join_tab in the execution plan, the function + retrieves all matching full records from the result set and + send them to the result set stream. + + NOTES + The function effectively implements the final (n-k) nested loops + of nested loops join algorithm, where k is the ordinal number of + the join_tab table and n is the total number of tables in the join query. + It performs nested loops joins with all conjunctive predicates from + the where condition pushed as low to the tables as possible. + E.g. for the query + SELECT * FROM t1,t2,t3 + WHERE t1.a=t2.a AND t2.b=t3.b AND t1.a BETWEEN 5 AND 9 + the predicate (t1.a BETWEEN 5 AND 9) will be pushed to table t1, + given the selected plan prescribes to nest retrievals of the + joined tables in the following order: t1,t2,t3. + A pushed down predicate are attached to the table which it pushed to, + at the field select_cond. + When executing a nested loop of level k the function runs through + the rows of 'join_tab' and for each row checks the pushed condition + attached to the table. + If it is false the function moves to the next row of the + table. If the condition is true the function recursively executes (n-k-1) + remaining embedded nested loops. + The situation becomes more complicated if outer joins are involved in + the execution plan. In this case the pushed down predicates can be + checked only at certain conditions. + Suppose for the query + SELECT * FROM t1 LEFT JOIN (t2,t3) ON t3.a=t1.a + WHERE t1>2 AND (t2.b>5 OR t2.b IS NULL) + the optimizer has chosen a plan with the table order t1,t2,t3. + The predicate P1=t1>2 will be pushed down to the table t1, while the + predicate P2=(t2.b>5 OR t2.b IS NULL) will be attached to the table + t2. But the second predicate can not be unconditionally tested right + after a row from t2 has been read. This can be done only after the + first row with t3.a=t1.a has been encountered. + Thus, the second predicate P2 is supplied with a guarded value that are + stored in the field 'found' of the first inner table for the outer join + (table t2). When the first row with t3.a=t1.a for the current row + of table t1 appears, the value becomes true. For now on the predicate + is evaluated immediately after the row of table t2 has been read. + When the first row with t3.a=t1.a has been encountered all + conditions attached to the inner tables t2,t3 must be evaluated. + Only when all of them are true the row is sent to the output stream. + If not, the function returns to the lowest nest level that has a false + attached condition. + The predicates from on expressions are also pushed down. If in the + the above example the on expression were (t3.a=t1.a AND t2.a=t1.a), + then t1.a=t2.a would be pushed down to table t2, and without any + guard. + If after the run through all rows of table t2, the first inner table + for the outer join operation, it turns out that no matches are + found for the current row of t1, then current row from table t1 + is complemented by nulls for t2 and t3. Then the pushed down predicates + are checked for the composed row almost in the same way as it had + been done for the first row with a match. The only difference is + the predicates from on expressions are not checked. + + IMPLEMENTATION + The function forms output rows for a current partial join of k + tables tables recursively. + For each partial join record ending with a certain row from + join_tab it calls sub_select that builds all possible matching + tails from the result set. + To be able check predicates conditionally items of the class + Item_func_trig_cond are employed. + An object of this class is constructed from an item of class COND + and a pointer to a guarding boolean variable. + When the value of the guard variable is true the value of the object + is the same as the value of the predicate, otherwise it's just returns + true. + To carry out a return to a nested loop level of join table t the pointer + to t is remembered in the field 'return_tab' of the join structure. + Consider the following query: + SELECT * FROM t1, + LEFT JOIN + (t2, t3 LEFT JOIN (t4,t5) ON t5.a=t3.a) + ON t4.a=t2.a + WHERE (t2.b=5 OR t2.b IS NULL) AND (t4.b=2 OR t4.b IS NULL) + Suppose the chosen execution plan dictates the order t1,t2,t3,t4,t5 + and suppose for a given joined rows from tables t1,t2,t3 there are + no rows in the result set yet. + When first row from t5 that satisfies the on condition + t5.a=t3.a is found, the pushed down predicate t4.b=2 OR t4.b IS NULL + becomes 'activated', as well the predicate t4.a=t2.a. But + the predicate (t2.b=5 OR t2.b IS NULL) can not be checked until + t4.a=t2.a becomes true. + In order not to re-evaluate the predicates that were already evaluated + as attached pushed down predicates, a pointer to the the first + most inner unmatched table is maintained in join_tab->first_unmatched. + Thus, when the first row from t5 with t5.a=t3.a is found + this pointer for t5 is changed from t4 to t2. + + STRUCTURE NOTES + join_tab->first_unmatched points always backwards to the first inner + table of the embedding nested join, if any. + + RETURN + return one of enum_nested_loop_state, except NESTED_LOOP_NO_MORE_ROWS. +*/ + +enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) { - join_tab->table->null_row=0; if (end_of_records) return (*join_tab->next_select)(join,join_tab+1,end_of_records); - /* Cache variables for faster loop */ int error; - bool found=0; - COND *on_expr=join_tab->on_expr, *select_cond=join_tab->select_cond; + enum_nested_loop_state rc; my_bool *report_error= &(join->thd->net.report_error); + READ_RECORD *info= &join_tab->read_record; - if (!(error=(*join_tab->read_first_record)(join_tab))) + if (join->resume_nested_loop) + { + /* If not the last table, plunge down the nested loop */ + if (join_tab < join->join_tab + join->tables - 1) + rc= (*join_tab->next_select)(join, join_tab + 1, 0); + else + { + join->resume_nested_loop= FALSE; + rc= NESTED_LOOP_OK; + } + } + else { - bool not_exists_optimize= join_tab->table->reginfo.not_exists_optimize; - bool not_used_in_distinct=join_tab->not_used_in_distinct; - ha_rows found_records=join->found_records; - READ_RECORD *info= &join_tab->read_record; + join->return_tab= join_tab; + if (join_tab->last_inner) + { + /* join_tab is the first inner table for an outer join operation. */ + + /* Set initial state of guard variables for this table.*/ + join_tab->found=0; + join_tab->not_null_compl= 1; + + /* Set first_unmatched for the last inner table of this group */ + join_tab->last_inner->first_unmatched= join_tab; + } join->thd->row_count= 0; - do + + error= (*join_tab->read_first_record)(join_tab); + rc= evaluate_join_record(join, join_tab, error, report_error); + } + + while (rc == NESTED_LOOP_OK) + { + error= info->read_record(info); + rc= evaluate_join_record(join, join_tab, error, report_error); + } + + if (rc == NESTED_LOOP_NO_MORE_ROWS && + join_tab->last_inner && !join_tab->found) + rc= evaluate_null_complemented_join_record(join, join_tab); + + if (rc == NESTED_LOOP_NO_MORE_ROWS) + rc= NESTED_LOOP_OK; + return rc; +} + + +/* + Process one record of the nested loop join. + + DESCRIPTION + This function will evaluate parts of WHERE/ON clauses that are + applicable to the partial record on hand and in case of success + submit this record to the next level of the nested loop. +*/ + +static enum_nested_loop_state +evaluate_join_record(JOIN *join, JOIN_TAB *join_tab, + int error, my_bool *report_error) +{ + bool not_exists_optimize= join_tab->table->reginfo.not_exists_optimize; + bool not_used_in_distinct=join_tab->not_used_in_distinct; + ha_rows found_records=join->found_records; + COND *select_cond= join_tab->select_cond; + + if (error > 0 || (*report_error)) // Fatal error + return NESTED_LOOP_ERROR; + if (error < 0) + return NESTED_LOOP_NO_MORE_ROWS; + if (join->thd->killed) // Aborted by user + { + join->thd->send_kill_message(); + return NESTED_LOOP_KILLED; /* purecov: inspected */ + } + DBUG_PRINT("info", ("select cond 0x%lx", (ulong)select_cond)); + if (!select_cond || select_cond->val_int()) + { + /* + There is no select condition or the attached pushed down + condition is true => a match is found. + */ + bool found= 1; + while (join_tab->first_unmatched && found) { - if (join->thd->killed) // Aborted by user - { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ - return -2; /* purecov: inspected */ - } - join->examined_rows++; - DBUG_PRINT("counts", ("join->examined_rows++: %lu", - (ulong) join->examined_rows)); - join->thd->row_count++; - if (!on_expr || on_expr->val_int()) + /* + The while condition is always false if join_tab is not + the last inner join table of an outer join operation. + */ + JOIN_TAB *first_unmatched= join_tab->first_unmatched; + /* + Mark that a match for current outer table is found. + This activates push down conditional predicates attached + to the all inner tables of the outer join. + */ + first_unmatched->found= 1; + for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++) { - found=1; - if (not_exists_optimize) - break; // Searching after not null columns - if (!select_cond || select_cond->val_int()) - { - if ((error=(*join_tab->next_select)(join,join_tab+1,0)) < 0) - return error; - /* - Test if this was a SELECT DISTINCT query on a table that - was not in the field list; In this case we can abort if - we found a row, as no new rows can be added to the result. - */ - if (not_used_in_distinct && found_records != join->found_records) - return 0; - } - else + /* Check all predicates that has just been activated. */ + /* + Actually all predicates non-guarded by first_unmatched->found + will be re-evaluated again. It could be fixed, but, probably, + it's not worth doing now. + */ + if (tab->select_cond && !tab->select_cond->val_int()) { - /* - This row failed selection, release lock on it. - XXX: There is no table handler in MySQL which makes use of this - call. It's kept from Gemini times. A lot of new code was added - recently (i. e. subselects) without having it in mind. - */ - info->file->unlock_row(); + /* The condition attached to table tab is false */ + if (tab == join_tab) + found= 0; + else + { + /* + Set a return point if rejected predicate is attached + not to the last table of the current nest level. + */ + join->return_tab= tab; + return NESTED_LOOP_OK; + } } } - } while (!(error=info->read_record(info)) && !(*report_error)); + /* + Check whether join_tab is not the last inner table + for another embedding outer join. + */ + if ((first_unmatched= first_unmatched->first_upper) && + first_unmatched->last_inner != join_tab) + first_unmatched= 0; + join_tab->first_unmatched= first_unmatched; + } + + /* + It was not just a return to lower loop level when one + of the newly activated predicates is evaluated as false + (See above join->return_tab= tab). + */ + join->examined_rows++; + join->thd->row_count++; + DBUG_PRINT("counts", ("join->examined_rows++: %lu", + (ulong) join->examined_rows)); + + if (found) + { + enum enum_nested_loop_state rc; + if (not_exists_optimize) + return NESTED_LOOP_NO_MORE_ROWS; + /* A match from join_tab is found for the current partial join. */ + rc= (*join_tab->next_select)(join, join_tab+1, 0); + if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS) + return rc; + if (join->return_tab < join_tab) + return NESTED_LOOP_OK; + /* + Test if this was a SELECT DISTINCT query on a table that + was not in the field list; In this case we can abort if + we found a row, as no new rows can be added to the result. + */ + if (not_used_in_distinct && found_records != join->found_records) + return NESTED_LOOP_OK; + } + else + join_tab->read_record.file->unlock_row(); } - if (error > 0 || (*report_error)) // Fatal error - return -1; + else + { + /* + The condition pushed down to the table join_tab rejects all rows + with the beginning coinciding with the current partial join. + */ + join->examined_rows++; + join->thd->row_count++; + join_tab->read_record.file->unlock_row(); + } + return NESTED_LOOP_OK; +} + + +/* + DESCRIPTION + Construct a NULL complimented partial join record and feed it to the next + level of the nested loop. This function is used in case we have + an OUTER join and no matching record was found. +*/ - if (!found && on_expr) - { // OUTER JOIN - restore_record(join_tab->table,default_values); // Make empty record - mark_as_null_row(join_tab->table); // For group by without error - if (!select_cond || select_cond->val_int()) +static enum_nested_loop_state +evaluate_null_complemented_join_record(JOIN *join, JOIN_TAB *join_tab) +{ + /* + The table join_tab is the first inner table of a outer join operation + and no matches has been found for the current outer row. + */ + JOIN_TAB *last_inner_tab= join_tab->last_inner; + /* Cache variables for faster loop */ + COND *select_cond; + for ( ; join_tab <= last_inner_tab ; join_tab++) + { + /* Change the the values of guard predicate variables. */ + join_tab->found= 1; + join_tab->not_null_compl= 0; + /* The outer row is complemented by nulls for each inner tables */ + restore_record(join_tab->table,s->default_values); // Make empty record + mark_as_null_row(join_tab->table); // For group by without error + select_cond= join_tab->select_cond; + /* Check all attached conditions for inner table rows. */ + if (select_cond && !select_cond->val_int()) + return NESTED_LOOP_OK; + } + join_tab--; + /* + The row complemented by nulls might be the first row + of embedding outer joins. + If so, perform the same actions as in the code + for the first regular outer join row above. + */ + for ( ; ; ) + { + JOIN_TAB *first_unmatched= join_tab->first_unmatched; + if ((first_unmatched= first_unmatched->first_upper) && + first_unmatched->last_inner != join_tab) + first_unmatched= 0; + join_tab->first_unmatched= first_unmatched; + if (!first_unmatched) + break; + first_unmatched->found= 1; + for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++) { - if ((error=(*join_tab->next_select)(join,join_tab+1,0)) < 0) - return error; /* purecov: inspected */ + if (tab->select_cond && !tab->select_cond->val_int()) + { + join->return_tab= tab; + return NESTED_LOOP_OK; + } } } - return 0; + /* + The row complemented by nulls satisfies all conditions + attached to inner tables. + Send the row complemented by nulls to be joined with the + remaining tables. + */ + return (*join_tab->next_select)(join, join_tab+1, 0); } -static int +static enum_nested_loop_state flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) { + enum_nested_loop_state rc= NESTED_LOOP_OK; int error; READ_RECORD *info; + join_tab->table->null_row= 0; if (!join_tab->cache.records) - return 0; /* Nothing to do */ + return NESTED_LOOP_OK; /* Nothing to do */ if (skip_last) (void) store_record_in_cache(&join_tab->cache); // Must save this for later if (join_tab->use_quick == 2) @@ -6306,7 +10674,7 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) if ((error=join_init_read_record(join_tab))) { reset_cache_write(&join_tab->cache); - return -error; /* No records or error */ + return error < 0 ? NESTED_LOOP_NO_MORE_ROWS: NESTED_LOOP_ERROR; } for (JOIN_TAB *tmp=join->join_tab; tmp != join_tab ; tmp++) @@ -6320,12 +10688,12 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) { if (join->thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ - return -2; // Aborted by user /* purecov: inspected */ + join->thd->send_kill_message(); + return NESTED_LOOP_KILLED; // Aborted by user /* purecov: inspected */ } SQL_SELECT *select=join_tab->select; - if (!error && (!join_tab->cache.select || - !join_tab->cache.select->skip_record())) + if (rc == NESTED_LOOP_OK && + (!join_tab->cache.select || !join_tab->cache.select->skip_record())) { uint i; reset_cache_read(&join_tab->cache); @@ -6333,11 +10701,14 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) { read_cached_record(join_tab); if (!select || !select->skip_record()) - if ((error=(join_tab->next_select)(join,join_tab+1,0)) < 0) + { + rc= (join_tab->next_select)(join,join_tab+1,0); + if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS) { reset_cache_write(&join_tab->cache); - return error; /* purecov: inspected */ + return rc; } + } } } } while (!(error=info->read_record(info))); @@ -6346,10 +10717,10 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) read_cached_record(join_tab); // Restore current record reset_cache_write(&join_tab->cache); if (error > 0) // Fatal error - return -1; /* purecov: inspected */ + return NESTED_LOOP_ERROR; /* purecov: inspected */ for (JOIN_TAB *tmp2=join->join_tab; tmp2 != join_tab ; tmp2++) tmp2->table->status=tmp2->status; - return 0; + return NESTED_LOOP_OK; } @@ -6373,7 +10744,7 @@ int report_error(TABLE *table, int error) */ if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT) sql_print_error("Got error %d when reading table '%s'", - error, table->path); + error, table->s->path); table->file->print_error(error,MYF(0)); return 1; } @@ -6408,7 +10779,7 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) tab->info="const row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; - if (!table->outer_join || error > 0) + if (!table->maybe_null || error > 0) DBUG_RETURN(error); } } @@ -6422,27 +10793,49 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) table->file->extra(HA_EXTRA_KEYREAD); tab->index= tab->ref.key; } - if ((error=join_read_const(tab))) + error=join_read_const(tab); + if (table->key_read) + { + table->key_read=0; + table->file->extra(HA_EXTRA_NO_KEYREAD); + } + if (error) { tab->info="unique row not found"; /* Mark for EXPLAIN that the row was not found */ pos->records_read=0.0; - if (!table->outer_join || error > 0) + if (!table->maybe_null || error > 0) DBUG_RETURN(error); } - if (table->key_read) - { - table->key_read=0; - table->file->extra(HA_EXTRA_NO_KEYREAD); - } } - if (tab->on_expr && !table->null_row) + if (*tab->on_expr_ref && !table->null_row) { - if ((table->null_row= test(tab->on_expr->val_int() == 0))) + if ((table->null_row= test((*tab->on_expr_ref)->val_int() == 0))) mark_as_null_row(table); } if (!table->null_row) table->maybe_null=0; + + /* Check appearance of new constant items in Item_equal objects */ + JOIN *join= tab->join; + if (join->conds) + update_const_equal_items(join->conds, tab); + TABLE_LIST *tbl; + for (tbl= join->select_lex->leaf_tables; tbl; tbl= tbl->next_leaf) + { + TABLE_LIST *embedded; + TABLE_LIST *embedding= tbl; + do + { + embedded= embedding; + if (embedded->on_expr) + update_const_equal_items(embedded->on_expr, tab); + embedding= embedded->embedding; + } + while (embedding && + embedding->nested_join->join_list.head() == embedded); + } + DBUG_RETURN(0); } @@ -6455,7 +10848,7 @@ join_read_system(JOIN_TAB *tab) if (table->status & STATUS_GARBAGE) // If first read { if ((error=table->file->read_first_row(table->record[0], - table->primary_key))) + table->s->primary_key))) { if (error != HA_ERR_END_OF_FILE) return report_error(table, error); @@ -6472,6 +10865,19 @@ join_read_system(JOIN_TAB *tab) } +/* + Read a table when there is at most one matching row + + SYNOPSIS + join_read_const() + tab Table to read + + RETURN + 0 Row was found + -1 Row was not found + 1 Got an error (other than row not found) during read +*/ + static int join_read_const(JOIN_TAB *tab) { @@ -6479,6 +10885,7 @@ join_read_const(JOIN_TAB *tab) TABLE *table= tab->table; if (table->status & STATUS_GARBAGE) // If first read { + table->status= 0; if (cp_buffer_from_ref(tab->join->thd, &tab->ref)) error=HA_ERR_KEY_NOT_FOUND; else @@ -6489,6 +10896,7 @@ join_read_const(JOIN_TAB *tab) } if (error) { + table->status= STATUS_NOT_FOUND; mark_as_null_row(tab->table); empty_record(table); if (error != HA_ERR_KEY_NOT_FOUND) @@ -6644,6 +11052,15 @@ join_init_quick_read_record(JOIN_TAB *tab) } +int rr_sequential(READ_RECORD *info); +int init_read_record_seq(JOIN_TAB *tab) +{ + tab->read_record.read_record= rr_sequential; + if (tab->read_record.file->ha_rnd_init(1)) + return 1; + return (*tab->read_record.read_record)(&tab->read_record); +} + static int test_if_quick_select(JOIN_TAB *tab) { @@ -6657,8 +11074,8 @@ test_if_quick_select(JOIN_TAB *tab) static int join_init_read_record(JOIN_TAB *tab) { - if (tab->select && tab->select->quick) - tab->select->quick->reset(); + if (tab->select && tab->select->quick && tab->select->quick->reset()) + return 1; init_read_record(&tab->read_record, tab->join->thd, tab->table, tab->select,1,1); return (*tab->read_record.read_record)(&tab->read_record); @@ -6772,7 +11189,7 @@ join_ft_read_next(READ_RECORD *info) Reading of key with key reference and one part that may be NULL */ -static int +int join_read_always_key_or_null(JOIN_TAB *tab) { int res; @@ -6788,7 +11205,7 @@ join_read_always_key_or_null(JOIN_TAB *tab) } -static int +int join_read_next_same_or_null(READ_RECORD *info) { int error; @@ -6805,13 +11222,32 @@ join_read_next_same_or_null(READ_RECORD *info) /***************************************************************************** - The different end of select functions - These functions returns < 0 when end is reached, 0 on ok and > 0 if a - fatal error (like table corruption) was detected + DESCRIPTION + Functions that end one nested loop iteration. Different functions + are used to support GROUP BY clause and to redirect records + to a table (e.g. in case of SELECT into a temporary table) or to the + network client. + + RETURN VALUES + NESTED_LOOP_OK - the record has been successfully handled + NESTED_LOOP_ERROR - a fatal error (like table corruption) + was detected + NESTED_LOOP_KILLED - thread shutdown was requested while processing + the record + NESTED_LOOP_QUERY_LIMIT - the record has been successfully handled; + additionally, the nested loop produced the + number of rows specified in the LIMIT clause + for the query + NESTED_LOOP_CURSOR_LIMIT - the record has been successfully handled; + additionally, there is a cursor and the nested + loop algorithm produced the number of rows + that is specified for current cursor fetch + operation. + All return values except NESTED_LOOP_OK abort the nested loop. *****************************************************************************/ /* ARGSUSED */ -static int +static enum_nested_loop_state end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { @@ -6820,14 +11256,14 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { int error; if (join->having && join->having->val_int() == 0) - DBUG_RETURN(0); // Didn't match having + DBUG_RETURN(NESTED_LOOP_OK); // Didn't match having error=0; if (join->procedure) error=join->procedure->send_row(join->procedure_fields_list); else if (join->do_send_rows) error=join->result->send_data(*join->fields); if (error) - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (++join->send_records >= join->unit->select_limit_cnt && join->do_send_rows) { @@ -6860,28 +11296,37 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { join->do_send_rows= 0; if (join->unit->fake_select_lex) - join->unit->fake_select_lex->select_limit= HA_POS_ERROR; - DBUG_RETURN(0); + join->unit->fake_select_lex->select_limit= 0; + DBUG_RETURN(NESTED_LOOP_OK); } } - DBUG_RETURN(-3); // Abort nicely + DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely + } + else if (join->send_records >= join->fetch_limit) + { + /* + There is a server side cursor and all rows for + this fetch request are sent. + */ + DBUG_RETURN(NESTED_LOOP_CURSOR_LIMIT); } } else { if (join->procedure && join->procedure->end_of_records()) - DBUG_RETURN(-1); + DBUG_RETURN(NESTED_LOOP_ERROR); } - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } /* ARGSUSED */ -static int +static enum_nested_loop_state end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { int idx= -1; + enum_nested_loop_state ok_code= NESTED_LOOP_OK; DBUG_ENTER("end_send_group"); if (!join->first_record || end_of_records || @@ -6934,63 +11379,77 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } } if (error > 0) - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (end_of_records) - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); if (join->send_records >= join->unit->select_limit_cnt && join->do_send_rows) { if (!(join->select_options & OPTION_FOUND_ROWS)) - DBUG_RETURN(-3); // Abort nicely + DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); // Abort nicely join->do_send_rows=0; join->unit->select_limit_cnt = HA_POS_ERROR; } + else if (join->send_records >= join->fetch_limit) + { + /* + There is a server side cursor and all rows + for this fetch request are sent. + */ + /* + Preventing code duplication. When finished with the group reset + the group functions and copy_fields. We fall through. bug #11904 + */ + ok_code= NESTED_LOOP_CURSOR_LIMIT; + } } } else { if (end_of_records) - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); join->first_record=1; VOID(test_if_group_changed(join->group_fields)); } if (idx < (int) join->send_group_parts) { + /* + This branch is executed also for cursors which have finished their + fetch limit - the reason for ok_code. + */ copy_fields(&join->tmp_table_param); if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1])) - DBUG_RETURN(-1); + DBUG_RETURN(NESTED_LOOP_ERROR); if (join->procedure) join->procedure->add(); - DBUG_RETURN(0); + DBUG_RETURN(ok_code); } } if (update_sum_func(join->sum_funcs)) - DBUG_RETURN(-1); + DBUG_RETURN(NESTED_LOOP_ERROR); if (join->procedure) join->procedure->add(); - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } /* ARGSUSED */ -static int +static enum_nested_loop_state end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { TABLE *table=join->tmp_table; - int error; DBUG_ENTER("end_write"); if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-2); /* purecov: inspected */ + join->thd->send_kill_message(); + DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } if (!end_of_records) { copy_fields(&join->tmp_table_param); copy_funcs(join->tmp_table_param.items_to_copy); - #ifdef TO_BE_DELETED if (!table->uniques) // If not unique handling { @@ -7009,6 +11468,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), #endif if (!join->having || join->having->val_int()) { + int error; join->found_records++; if ((error=table->file->write_row(table->record[0]))) { @@ -7017,28 +11477,28 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), goto end; if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param, error,1)) - DBUG_RETURN(-1); // Not a table_is_full error - table->uniques=0; // To ensure rows are the same + DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error + table->s->uniques=0; // To ensure rows are the same } if (++join->send_records >= join->tmp_table_param.end_write_records && join->do_send_rows) { if (!(join->select_options & OPTION_FOUND_ROWS)) - DBUG_RETURN(-3); + DBUG_RETURN(NESTED_LOOP_QUERY_LIMIT); join->do_send_rows=0; join->unit->select_limit_cnt = HA_POS_ERROR; - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } } } end: - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } /* Group by searching after group record and updating it if possible */ /* ARGSUSED */ -static int +static enum_nested_loop_state end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { @@ -7048,11 +11508,11 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_ENTER("end_update"); if (end_of_records) - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-2); /* purecov: inspected */ + join->thd->send_kill_message(); + DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } join->found_records++; @@ -7062,12 +11522,9 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { Item *item= *group->item; item->save_org_in_field(group->field); -#ifdef EMBEDDED_LIBRARY - join->thd->net.last_errno= 0; -#endif /* Store in the used key if the field was 0 */ if (item->maybe_null) - group->buff[-1]=item->null_value ? 1 : 0; + group->buff[-1]= (char) group->field->is_null(); } if (!table->file->index_read(table->record[1], join->tmp_table_param.group_buff,0, @@ -7079,36 +11536,43 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), table->record[0]))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ } - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } - /* The null bits are already set */ + /* + Copy null bits from group key to table + We can't copy all data as the key may have different format + as the row data (for example as with VARCHAR keys) + */ KEY_PART_INFO *key_part; for (group=table->group,key_part=table->key_info[0].key_part; group ; group=group->next,key_part++) - memcpy(table->record[0]+key_part->offset, group->buff, key_part->length); - + { + if (key_part->null_bit) + memcpy(table->record[0]+key_part->offset, group->buff, 1); + } init_tmptable_sum_functions(join->sum_funcs); copy_funcs(join->tmp_table_param.items_to_copy); if ((error=table->file->write_row(table->record[0]))) { if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param, error, 0)) - DBUG_RETURN(-1); // Not a table_is_full error + DBUG_RETURN(NESTED_LOOP_ERROR); // Not a table_is_full error /* Change method to update rows */ table->file->ha_index_init(0); join->join_tab[join->tables-1].next_select=end_unique_update; } join->send_records++; - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } + /* Like end_update, but this is done with unique constraints instead of keys */ -static int +static enum_nested_loop_state end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { @@ -7117,11 +11581,11 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_ENTER("end_unique_update"); if (end_of_records) - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-2); /* purecov: inspected */ + join->thd->send_kill_message(); + DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } init_tmptable_sum_functions(join->sum_funcs); @@ -7135,12 +11599,12 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if ((int) table->file->get_dup_key(error) < 0) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ } if (table->file->rnd_pos(table->record[1],table->file->dupp_ref)) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ } restore_record(table,record[1]); update_tmptable_sum_func(join->sum_funcs,table); @@ -7148,27 +11612,26 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), table->record[0]))) { table->file->print_error(error,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ } } - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } /* ARGSUSED */ -static int +static enum_nested_loop_state end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), bool end_of_records) { TABLE *table=join->tmp_table; - int error; int idx= -1; DBUG_ENTER("end_write_group"); if (join->thd->killed) { // Aborted by user - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ - DBUG_RETURN(-2); /* purecov: inspected */ + join->thd->send_kill_message(); + DBUG_RETURN(NESTED_LOOP_KILLED); /* purecov: inspected */ } if (!join->first_record || end_of_records || (idx=test_if_group_changed(join->group_fields)) >= 0) @@ -7187,35 +11650,27 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } copy_sum_funcs(join->sum_funcs, join->sum_funcs_end[send_group_parts]); - if (join->having && join->having->val_int() == 0) - error= -1; - else if ((error=table->file->write_row(table->record[0]))) + if (!join->having || join->having->val_int()) { - if (create_myisam_from_heap(join->thd, table, - &join->tmp_table_param, - error, 0)) - DBUG_RETURN(-1); - /* - If table->file->write_row() was failed because of 'out of memory' - and tmp table succesfully created, reset error. - */ - error=0; + int error= table->file->write_row(table->record[0]); + if (error && create_myisam_from_heap(join->thd, table, + &join->tmp_table_param, + error, 0)) + DBUG_RETURN(NESTED_LOOP_ERROR); } - if (join->rollup.state != ROLLUP::STATE_NONE && error <= 0) + if (join->rollup.state != ROLLUP::STATE_NONE) { if (join->rollup_write_data((uint) (idx+1), table)) - error= 1; + DBUG_RETURN(NESTED_LOOP_ERROR); } - if (error > 0) - DBUG_RETURN(-1); if (end_of_records) - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } } else { if (end_of_records) - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); join->first_record=1; VOID(test_if_group_changed(join->group_fields)); } @@ -7224,17 +11679,17 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), copy_fields(&join->tmp_table_param); copy_funcs(join->tmp_table_param.items_to_copy); if (init_sum_functions(join->sum_funcs, join->sum_funcs_end[idx+1])) - DBUG_RETURN(-1); + DBUG_RETURN(NESTED_LOOP_ERROR); if (join->procedure) join->procedure->add(); - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } } if (update_sum_func(join->sum_funcs)) - DBUG_RETURN(-1); + DBUG_RETURN(NESTED_LOOP_ERROR); if (join->procedure) join->procedure->add(); - DBUG_RETURN(0); + DBUG_RETURN(NESTED_LOOP_OK); } @@ -7264,11 +11719,11 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) /* We can remove binary fields and numerical fields except float, as float comparison isn't 100 % secure - We have to keep binary strings to be able to check for end spaces + We have to keep normal strings to be able to check for end spaces */ if (field->binary() && - field->real_type() != FIELD_TYPE_STRING && - field->real_type() != FIELD_TYPE_VAR_STRING && + field->real_type() != MYSQL_TYPE_STRING && + field->real_type() != MYSQL_TYPE_VARCHAR && (field->type() != FIELD_TYPE_FLOAT || field->decimals() == 0)) { return !store_val_in_field(field, right_item, CHECK_FIELD_WARN); @@ -7429,7 +11884,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, for (; order ; order=order->next, const_key_parts>>=1) { - Field *field=((Item_field*) (*order->item))->field; + Field *field=((Item_field*) (*order->item)->real_item())->field; int flag; /* @@ -7458,13 +11913,13 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, } -static uint find_shortest_key(TABLE *table, const key_map *usable_keys) +uint find_shortest_key(TABLE *table, const key_map *usable_keys) { uint min_length= (uint) ~0; uint best= MAX_KEY; if (!usable_keys->is_clear_all()) { - for (uint nr=0; nr < table->keys ; nr++) + for (uint nr=0; nr < table->s->keys ; nr++) { if (usable_keys->is_set(nr)) { @@ -7530,7 +11985,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, KEY_PART_INFO *ref_key_part= table->key_info[ref].key_part; KEY_PART_INFO *ref_key_part_end= ref_key_part + ref_key_parts; - for (nr= 0 ; nr < table->keys ; nr++) + for (nr= 0 ; nr < table->s->keys ; nr++) { if (usable_keys->is_set(nr) && table->key_info[nr].key_length < min_length && @@ -7548,7 +12003,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, /* - Check if GROUP BY/DISTINCT can be optimized away because the set is + Check if GROUP BY/DISTINCT can be optimized away because the set is already known to be distinct. SYNOPSIS @@ -7556,7 +12011,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, table The table to operate on. find_func function to iterate over the list and search for a field - + DESCRIPTION Used in removing the GROUP BY/DISTINCT of the following types of statements: @@ -7567,12 +12022,13 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, then <any combination of a,b,c>,{whatever} is also distinct This function checks if all the key parts of any of the unique keys - of the table are referenced by a list : either the select list + of the table are referenced by a list : either the select list through find_field_in_item_list or GROUP BY list through find_field_in_order_list. - If the above holds then we can safely remove the GROUP BY/DISTINCT, + If the above holds and the key parts cannot contain NULLs then we + can safely remove the GROUP BY/DISTINCT, as no result set can be more distinct than an unique key. - + RETURN VALUE 1 found 0 not found. @@ -7582,9 +12038,9 @@ static bool list_contains_unique_index(TABLE *table, bool (*find_func) (Field *, void *), void *data) { - for (uint keynr= 0; keynr < table->keys; keynr++) + for (uint keynr= 0; keynr < table->s->keys; keynr++) { - if (keynr == table->primary_key || + if (keynr == table->s->primary_key || (table->key_info[keynr].flags & HA_NOSAME)) { KEY *keyinfo= table->key_info + keynr; @@ -7595,7 +12051,8 @@ list_contains_unique_index(TABLE *table, key_part < key_part_end; key_part++) { - if (!find_func(key_part->field, data)) + if (key_part->field->maybe_null() || + !find_func(key_part->field, data)) break; } if (key_part == key_part_end) @@ -7707,17 +12164,17 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, Check which keys can be used to resolve ORDER BY. We must not try to use disabled keys. */ - usable_keys= table->keys_in_use; + usable_keys= table->s->keys_in_use; for (ORDER *tmp_order=order; tmp_order ; tmp_order=tmp_order->next) { - if ((*tmp_order->item)->type() != Item::FIELD_ITEM) + Item *item= (*tmp_order->item)->real_item(); + if (item->type() != Item::FIELD_ITEM) { usable_keys.clear_all(); DBUG_RETURN(0); } - usable_keys.intersect(((Item_field*) (*tmp_order->item))-> - field->part_of_sortkey); + usable_keys.intersect(((Item_field*) item)->field->part_of_sortkey); if (usable_keys.is_clear_all()) DBUG_RETURN(0); // No usable keys } @@ -7733,6 +12190,17 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, } else if (select && select->quick) // Range found by opt_range { + int quick_type= select->quick->get_type(); + /* + assume results are not ordered when index merge is used + TODO: sergeyp: Results of all index merge selects actually are ordered + by clustered PK values. + */ + + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) + DBUG_RETURN(0); ref_key= select->quick->index; ref_key_parts= select->quick->used_key_parts; } @@ -7817,8 +12285,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, */ if (!select->quick->reverse_sorted()) { - // ORDER BY range_key DESC - QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick, + int quick_type= select->quick->get_type(); + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + DBUG_RETURN(0); // Use filesort + + /* ORDER BY range_key DESC */ + QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), used_key_parts); if (!tmp || tmp->error) { @@ -7876,7 +12351,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, else keys= usable_keys; - for (nr=0; nr < table->keys ; nr++) + for (nr=0; nr < table->s->keys ; nr++) { uint not_used; if (keys.is_set(nr)) @@ -7936,8 +12411,7 @@ static int create_sort_index(THD *thd, JOIN *join, ORDER *order, ha_rows filesort_limit, ha_rows select_limit) { - SORT_FIELD *sortorder; - uint length; + uint length= 0; ha_rows examined_rows; TABLE *table; SQL_SELECT *select; @@ -7950,11 +12424,20 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, table= tab->table; select= tab->select; - if (test_if_skip_sort_order(tab,order,select_limit,0)) + /* + When there is SQL_BIG_RESULT do not sort using index for GROUP BY, + and thus force sorting on disk. + */ + if ((order != join->group_list || + !(join->select_options & SELECT_BIG_RESULT)) && + test_if_skip_sort_order(tab,order,select_limit,0)) DBUG_RETURN(0); - if (!(sortorder=make_unireg_sortorder(order,&length))) + for (ORDER *ord= join->order; ord; ord= ord->next) + length++; + if (!(join->sortorder= + make_unireg_sortorder(order, &length, join->sortorder))) goto err; /* purecov: inspected */ - /* It's not fatal if the following alloc fails */ + table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), MYF(MY_WME | MY_ZEROFILL)); table->status=0; // May be wrong if quick_select @@ -7966,8 +12449,11 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, { select->quick=tab->quick; tab->quick=0; - /* We can only use 'Only index' if quick key is same as ref_key */ - if (table->key_read && (uint) tab->ref.key != select->quick->index) + /* + We can only use 'Only index' if quick key is same as ref_key + and in index_merge 'Only index' cannot be used + */ + if (table->key_read && ((uint) tab->ref.key != select->quick->index)) { table->key_read=0; table->file->extra(HA_EXTRA_NO_KEYREAD); @@ -7983,13 +12469,21 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, */ if (!(select->quick= (tab->type == JT_FT ? new FT_SELECT(thd, table, tab->ref.key) : - get_quick_select_for_ref(thd, table, &tab->ref)))) + get_quick_select_for_ref(thd, table, &tab->ref, + tab->found_records)))) goto err; } } - if (table->tmp_table) + + /* Fill schema tables with data before filesort if it's necessary */ + if ((join->select_lex->options & OPTION_SCHEMA_TABLE) && + !thd->lex->describe && + get_schema_tables_result(join, PROCESSED_BY_CREATE_SORT_INDEX)) + goto err; + + if (table->s->tmp_table) table->file->info(HA_STATUS_VARIABLE); // Get record count - table->sort.found_records=filesort(thd, table,sortorder, length, + table->sort.found_records=filesort(thd, table,join->sortorder, length, select, filesort_limit, &examined_rows); tab->records= table->sort.found_records; // For SQL_CALC_ROWS if (select) @@ -7998,6 +12492,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, tab->select= 0; } tab->select_cond=0; + tab->last_inner= 0; + tab->first_unmatched= 0; tab->type=JT_ALL; // Read with normal read_record tab->read_first_record= join_init_read_record; tab->join->examined_rows+=examined_rows; @@ -8034,8 +12530,7 @@ static bool fix_having(JOIN *join, Item **having) else // This should never happen if (!(table->select->cond= new Item_cond_and(table->select->cond, sort_table_cond)) || - table->select->cond->fix_fields(join->thd, join->tables_list, - &table->select->cond)) + table->select->cond->fix_fields(join->thd, &table->select->cond)) return 1; table->select_cond=table->select->cond; table->select_cond->top_level_item(); @@ -8061,7 +12556,7 @@ static bool compare_record(TABLE *table, Field **ptr) { for (; *ptr ; ptr++) { - if ((*ptr)->cmp_offset(table->rec_buff_length)) + if ((*ptr)->cmp_offset(table->s->rec_buff_length)) return 1; } return 0; @@ -8114,15 +12609,15 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having) join->unit->select_limit_cnt= 1; // Only send first row DBUG_RETURN(0); } - Field **first_field=entry->field+entry->fields - field_count; + Field **first_field=entry->field+entry->s->fields - field_count; offset= field_count ? - entry->field[entry->fields - field_count]->offset() : 0; - reclength=entry->reclength-offset; + entry->field[entry->s->fields - field_count]->offset() : 0; + reclength=entry->s->reclength-offset; free_io_cache(entry); // Safety entry->file->info(HA_STATUS_VARIABLE); - if (entry->db_type == DB_TYPE_HEAP || - (!entry->blob_fields && + if (entry->s->db_type == DB_TYPE_HEAP || + (!entry->s->blob_fields && ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->records < thd->variables.sortbuff_size))) error=remove_dup_with_hash_index(join->thd, entry, @@ -8144,7 +12639,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, char *org_record,*new_record; byte *record; int error; - ulong reclength=table->reclength-offset; + ulong reclength= table->s->reclength-offset; DBUG_ENTER("remove_dup_with_compare"); org_record=(char*) (record=table->record[0])+offset; @@ -8156,7 +12651,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error=0; goto err; } @@ -8177,7 +12672,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, } if (copy_blobs(first_field)) { - my_error(ER_OUTOFMEMORY,MYF(0)); + my_message(ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(0)); error=0; goto err; } @@ -8255,7 +12750,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, ulong total_length= 0; for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++) { - uint length= (*ptr)->pack_length(); + uint length= (*ptr)->sort_length(); (*field_length++)= length; total_length+= length; } @@ -8280,7 +12775,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, byte *org_key_pos; if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error=0; goto err; } @@ -8335,7 +12830,8 @@ err: } -SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length) +SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length, + SORT_FIELD *sortorder) { uint count; SORT_FIELD *sort,*pos; @@ -8344,21 +12840,25 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length) count=0; for (ORDER *tmp = order; tmp; tmp=tmp->next) count++; - pos=sort=(SORT_FIELD*) sql_alloc(sizeof(SORT_FIELD)*(count+1)); + if (!sortorder) + sortorder= (SORT_FIELD*) sql_alloc(sizeof(SORT_FIELD) * + (max(count, *length) + 1)); + pos= sort= sortorder; + if (!pos) return 0; for (;order;order=order->next,pos++) { - pos->field=0; pos->item=0; - if (order->item[0]->type() == Item::FIELD_ITEM) - pos->field= ((Item_field*) (*order->item))->field; - else if (order->item[0]->type() == Item::SUM_FUNC_ITEM && - !order->item[0]->const_item()) - pos->field= ((Item_sum*) order->item[0])->get_tmp_table_field(); - else if (order->item[0]->type() == Item::COPY_STR_ITEM) + Item *item= order->item[0]->real_item(); + pos->field= 0; pos->item= 0; + if (item->type() == Item::FIELD_ITEM) + pos->field= ((Item_field*) item)->field; + else if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) + pos->field= ((Item_sum*) item)->get_tmp_table_field(); + else if (item->type() == Item::COPY_STR_ITEM) { // Blob patch - pos->item= ((Item_copy_string*) (*order->item))->item; + pos->item= ((Item_copy_string*) item)->item; } else pos->item= *order->item; @@ -8399,6 +12899,7 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) } if (!(cache->field=(CACHE_FIELD*) sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)* + sizeof(CACHE_FIELD*)))) { my_free((gptr) cache->buff,MYF(0)); /* purecov: inspected */ @@ -8432,10 +12933,10 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) } } /* Copy null bits from table */ - if (null_fields && tables[i].table->null_fields) + if (null_fields && tables[i].table->s->null_fields) { /* must copy null bits */ copy->str=(char*) tables[i].table->null_flags; - copy->length=tables[i].table->null_bytes; + copy->length= tables[i].table->s->null_bytes; copy->strip=0; copy->blob_field=0; length+=copy->length; @@ -8650,72 +13151,169 @@ cp_buffer_from_ref(THD *thd, TABLE_REF *ref) *****************************************************************************/ /* - Find order/group item in requested columns and change the item to point at - it. If item doesn't exists, add it first in the field list - Return 0 if ok. + Resolve an ORDER BY or GROUP BY column reference. + + SYNOPSIS + find_order_in_list() + thd [in] Pointer to current thread structure + ref_pointer_array [in/out] All select, group and order by fields + tables [in] List of tables to search in (usually FROM clause) + order [in] Column reference to be resolved + fields [in] List of fields to search in (usually SELECT list) + all_fields [in/out] All select, group and order by fields + is_group_field [in] True if order is a GROUP field, false if + ORDER by field + + DESCRIPTION + Given a column reference (represented by 'order') from a GROUP BY or ORDER + BY clause, find the actual column it represents. If the column being + resolved is from the GROUP BY clause, the procedure searches the SELECT + list 'fields' and the columns in the FROM list 'tables'. If 'order' is from + the ORDER BY clause, only the SELECT list is being searched. + + If 'order' is resolved to an Item, then order->item is set to the found + Item. If there is no item for the found column (that is, it was resolved + into a table field), order->item is 'fixed' and is added to all_fields and + ref_pointer_array. + + RETURN + FALSE if OK + TRUE if error occurred + + ref_pointer_array and all_fields are updated */ -static int -find_order_in_list(THD *thd, Item **ref_pointer_array, - TABLE_LIST *tables,ORDER *order, List<Item> &fields, - List<Item> &all_fields) -{ - Item *it= *order->item; - if (it->type() == Item::INT_ITEM) +static bool +find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + ORDER *order, List<Item> &fields, List<Item> &all_fields, + bool is_group_field) +{ + Item *order_item= *order->item; /* The item from the GROUP/ORDER caluse. */ + Item::Type order_item_type; + Item **select_item; /* The corresponding item from the SELECT clause. */ + Field *from_field; /* The corresponding field from the FROM clause. */ + uint counter; + enum_resolution_type resolution; + + /* + Local SP variables may be int but are expressions, not positions. + (And they can't be used before fix_fields is called for them). + */ + if (order_item->type() == Item::INT_ITEM && order_item->basic_const_item()) { /* Order by position */ - uint count= (uint) it->val_int(); + uint count= (uint) order_item->val_int(); if (!count || count > fields.elements) { - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR), - MYF(0), it->full_name(), thd->where); - return 1; + my_error(ER_BAD_FIELD_ERROR, MYF(0), + order_item->full_name(), thd->where); + return TRUE; } - order->item= ref_pointer_array + count-1; + order->item= ref_pointer_array + count - 1; order->in_field_list= 1; - return 0; + order->counter= count; + order->counter_used= 1; + return FALSE; } - uint counter; - bool unaliased; - Item **item= find_item_in_list(it, fields, &counter, - REPORT_EXCEPT_NOT_FOUND, &unaliased); - if (!item) - return 1; + /* Lookup the current GROUP/ORDER field in the SELECT clause. */ + select_item= find_item_in_list(order_item, fields, &counter, + REPORT_EXCEPT_NOT_FOUND, &resolution); + if (!select_item) + return TRUE; /* The item is not unique, or some other error occured. */ + - if (item != (Item **)not_found_item) + /* Check whether the resolved field is not ambiguos. */ + if (select_item != not_found_item) { + Item *view_ref= NULL; /* If we have found field not by its alias in select list but by its original field name, we should additionaly check if we have conflict for this name (in case if we would perform lookup in all tables). */ - if (unaliased && !it->fixed && it->fix_fields(thd, tables, order->item)) - return 1; - - order->item= ref_pointer_array + counter; - order->in_field_list=1; - return 0; + if (resolution == RESOLVED_BEHIND_ALIAS && !order_item->fixed && + order_item->fix_fields(thd, order->item)) + return TRUE; + + /* Lookup the current GROUP field in the FROM clause. */ + order_item_type= order_item->type(); + from_field= (Field*) not_found_field; + if (is_group_field && + order_item_type == Item::FIELD_ITEM || + order_item_type == Item::REF_ITEM) + { + from_field= find_field_in_tables(thd, (Item_ident*) order_item, tables, + NULL, &view_ref, IGNORE_ERRORS, TRUE, + FALSE); + if (!from_field) + from_field= (Field*) not_found_field; + } + + if (from_field == not_found_field || + (from_field != view_ref_found ? + /* it is field of base table => check that fields are same */ + ((*select_item)->type() == Item::FIELD_ITEM && + ((Item_field*) (*select_item))->field->eq(from_field)) : + /* + in is field of view table => check that references on translation + table are same + */ + ((*select_item)->type() == Item::REF_ITEM && + view_ref->type() == Item::REF_ITEM && + ((Item_ref *) (*select_item))->ref == + ((Item_ref *) view_ref)->ref))) + { + /* + If there is no such field in the FROM clause, or it is the same field + as the one found in the SELECT clause, then use the Item created for + the SELECT field. As a result if there was a derived field that + 'shadowed' a table field with the same name, the table field will be + chosen over the derived field. + */ + order->item= ref_pointer_array + counter; + order->in_field_list=1; + return FALSE; + } + else + { + /* + There is a field with the same name in the FROM clause. This + is the field that will be chosen. In this case we issue a + warning so the user knows that the field from the FROM clause + overshadows the column reference from the SELECT list. + */ + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, + ER(ER_NON_UNIQ_ERROR), + ((Item_ident*) order_item)->field_name, + current_thd->where); + } } order->in_field_list=0; /* - We check it->fixed because Item_func_group_concat can put + The call to order_item->fix_fields() means that here we resolve + 'order_item' to a column from a table in the list 'tables', or to + a column in some outer query. Exactly because of the second case + we come to this point even if (select_item == not_found_item), + inspite of that fix_fields() calls find_item_in_list() one more + time. + + We check order_item->fixed because Item_func_group_concat can put arguments for which fix_fields already was called. - - 'it' reassigned in if condition because fix_field can change it. */ - if (!it->fixed && - (it->fix_fields(thd, tables, order->item) || - (it= *order->item)->check_cols(1) || + if (!order_item->fixed && + (order_item->fix_fields(thd, order->item) || + (order_item= *order->item)->check_cols(1) || thd->is_fatal_error)) - return 1; // Wrong field + return TRUE; /* Wrong field. */ uint el= all_fields.elements; - all_fields.push_front(it); // Add new field to field list - ref_pointer_array[el]= it; + all_fields.push_front(order_item); /* Add new field to field list. */ + ref_pointer_array[el]= order_item; order->item= ref_pointer_array + el; - return 0; + return FALSE; } + /* Change order to point at item in select list. If item isn't a number and doesn't exits in the select list, add it the the field list. @@ -8728,7 +13326,7 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, for (; order; order=order->next) { if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, - all_fields)) + all_fields, FALSE)) return 1; } return 0; @@ -8742,7 +13340,7 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, setup_group() thd Thread handler ref_pointer_array We store references to all fields that was not in - 'fields' here. + 'fields' here. fields All fields in the select part. Any item in 'order' that is part of these list is replaced by a pointer to this fields. @@ -8764,48 +13362,83 @@ setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, bool *hidden_group_fields) { *hidden_group_fields=0; + ORDER *ord; + if (!order) return 0; /* Everything is ok */ - if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) - { - Item *item; - List_iterator<Item> li(fields); - while ((item=li++)) - item->marker=0; /* Marker that field is not used */ - } uint org_fields=all_fields.elements; thd->where="group statement"; - for (; order; order=order->next) + for (ord= order; ord; ord= ord->next) { - if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, - all_fields)) + if (find_order_in_list(thd, ref_pointer_array, tables, ord, fields, + all_fields, TRUE)) return 1; - (*order->item)->marker=1; /* Mark found */ - if ((*order->item)->with_sum_func) + (*ord->item)->marker= UNDEF_POS; /* Mark found */ + if ((*ord->item)->with_sum_func) { - my_printf_error(ER_WRONG_GROUP_FIELD, ER(ER_WRONG_GROUP_FIELD),MYF(0), - (*order->item)->full_name()); + my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*ord->item)->full_name()); return 1; } } if (thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) { - /* Don't allow one to use fields that is not used in GROUP BY */ + /* + Don't allow one to use fields that is not used in GROUP BY + For each select a list of field references that aren't under an + aggregate function is created. Each field in this list keeps the + position of the select list expression which it belongs to. + + First we check an expression from the select list against the GROUP BY + list. If it's found there then it's ok. It's also ok if this expression + is a constant or an aggregate function. Otherwise we scan the list + of non-aggregated fields and if we'll find at least one field reference + that belongs to this expression and doesn't occur in the GROUP BY list + we throw an error. If there are no fields in the created list for a + select list expression this means that all fields in it are used under + aggregate functions. + */ Item *item; + Item_field *field; + int cur_pos_in_select_list= 0; List_iterator<Item> li(fields); + List_iterator<Item_field> naf_it(thd->lex->current_select->non_agg_fields); - while ((item=li++)) + field= naf_it++; + while (field && (item=li++)) { - if (item->type() != Item::SUM_FUNC_ITEM && !item->marker && - !item->const_item()) + if (item->type() != Item::SUM_FUNC_ITEM && item->marker >= 0 && + !item->const_item() && + !(item->real_item()->type() == Item::FIELD_ITEM && + item->used_tables() & OUTER_REF_TABLE_BIT)) { - my_printf_error(ER_WRONG_FIELD_WITH_GROUP, - ER(ER_WRONG_FIELD_WITH_GROUP), - MYF(0),item->full_name()); - return 1; + while (field) + { + /* Skip fields from previous expressions. */ + if (field->marker < cur_pos_in_select_list) + goto next_field; + /* Found a field from the next expression. */ + if (field->marker > cur_pos_in_select_list) + break; + /* + Check whether the field occur in the GROUP BY list. + Throw the error later if the field isn't found. + */ + for (ord= order; ord; ord= ord->next) + if ((*ord->item)->eq((Item*)field, 0)) + goto next_field; + /* + TODO: change ER_WRONG_FIELD_WITH_GROUP to more detailed + ER_NON_GROUPING_FIELD_USED + */ + my_error(ER_WRONG_FIELD_WITH_GROUP, MYF(0), field->full_name()); + return 1; +next_field: + field= naf_it++; + } } + cur_pos_in_select_list++; } } if (org_fields != all_fields.elements) @@ -8818,7 +13451,7 @@ setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, */ static bool -setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields, +setup_new_fields(THD *thd, List<Item> &fields, List<Item> &all_fields, ORDER *new_field) { Item **item; @@ -8826,7 +13459,7 @@ setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields, thd->set_query_id=1; // Not really needed, but... uint counter; - bool not_used; + enum_resolution_type not_used; for (; new_field ; new_field= new_field->next) { if ((item= find_item_in_list(*new_field->item, fields, &counter, @@ -8835,7 +13468,7 @@ setup_new_fields(THD *thd,TABLE_LIST *tables,List<Item> &fields, else { thd->where="procedure list"; - if ((*new_field->item)->fix_fields(thd, tables, new_field->item)) + if ((*new_field->item)->fix_fields(thd, new_field->item)) DBUG_RETURN(1); /* purecov: inspected */ all_fields.push_front(*new_field->item); new_field->item=all_fields.head_ref(); @@ -8858,7 +13491,6 @@ create_distinct_group(THD *thd, Item **ref_pointer_array, List_iterator<Item> li(fields); Item *item; ORDER *order,*group,**prev; - uint index= 0; *all_order_by_fields_used= 1; while ((item=li++)) @@ -8885,6 +13517,17 @@ create_distinct_group(THD *thd, Item **ref_pointer_array, { if (!item->const_item() && !item->with_sum_func && !item->marker) { + /* + Don't put duplicate columns from the SELECT list into the + GROUP BY list. + */ + ORDER *ord_iter; + for (ord_iter= group; ord_iter; ord_iter= ord_iter->next) + if ((*ord_iter->item)->eq(item, 1)) + break; + if (ord_iter) + continue; + ORDER *ord=(ORDER*) thd->calloc(sizeof(ORDER)); if (!ord) return 0; @@ -8893,12 +13536,12 @@ create_distinct_group(THD *thd, Item **ref_pointer_array, simple indexing of ref_pointer_array (order in the array and in the list are same) */ - ord->item= ref_pointer_array + index; + ord->item= ref_pointer_array; ord->asc=1; *prev=ord; prev= &ord->next; } - index++; + ref_pointer_array++; } *prev=0; return group; @@ -8921,21 +13564,22 @@ count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields, param->quick_group=1; while ((field=li++)) { - Item::Type type=field->type(); - if (type == Item::FIELD_ITEM) + Item::Type real_type= field->real_item()->type(); + if (real_type == Item::FIELD_ITEM) param->field_count++; - else if (type == Item::SUM_FUNC_ITEM) + else if (real_type == Item::SUM_FUNC_ITEM) { if (! field->const_item()) { - Item_sum *sum_item=(Item_sum*) field; + Item_sum *sum_item=(Item_sum*) field->real_item(); if (!sum_item->quick_group) param->quick_group=0; // UDF SUM function param->sum_func_count++; + param->func_count++; for (uint i=0 ; i < sum_item->arg_count ; i++) { - if (sum_item->args[0]->type() == Item::FIELD_ITEM) + if (sum_item->args[0]->real_item()->type() == Item::FIELD_ITEM) param->field_count++; else param->func_count++; @@ -8997,7 +13641,7 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables) if (!map || (map & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT))) DBUG_RETURN(0); - for (; !(map & tables->table->map) ; tables=tables->next) ; + for (; !(map & tables->table->map); tables= tables->next_leaf); if (map != tables->table->map) DBUG_RETURN(0); // More than one table DBUG_PRINT("exit",("sort by table: %d",tables->table->tablenr)); @@ -9016,22 +13660,53 @@ calc_group_buffer(JOIN *join,ORDER *group) join->group= 1; for (; group ; group=group->next) { - Field *field=(*group->item)->get_tmp_table_field(); + Item *group_item= *group->item; + Field *field= group_item->get_tmp_table_field(); if (field) { - if (field->type() == FIELD_TYPE_BLOB) + enum_field_types type; + if ((type= field->type()) == FIELD_TYPE_BLOB) key_length+=MAX_BLOB_WIDTH; // Can't be used as a key + else if (type == MYSQL_TYPE_VARCHAR || type == MYSQL_TYPE_VAR_STRING) + key_length+= field->field_length + HA_KEY_BLOB_LENGTH; + else if (type == FIELD_TYPE_BIT) + { + /* Bit is usually stored as a longlong key for group fields */ + key_length+= 8; // Big enough + } else - key_length+=field->pack_length(); + key_length+= field->pack_length(); } - else if ((*group->item)->result_type() == REAL_RESULT) - key_length+=sizeof(double); - else if ((*group->item)->result_type() == INT_RESULT) - key_length+=sizeof(longlong); else - key_length+=(*group->item)->max_length; + { + switch (group_item->result_type()) { + case REAL_RESULT: + key_length+= sizeof(double); + break; + case INT_RESULT: + key_length+= sizeof(longlong); + break; + case DECIMAL_RESULT: + key_length+= my_decimal_get_binary_size(group_item->max_length - + (group_item->decimals ? 1 : 0), + group_item->decimals); + break; + case STRING_RESULT: + /* + Group strings are taken as varstrings and require an length field. + A field is not yet created by create_tmp_field() + and the sizes should match up. + */ + key_length+= group_item->max_length + HA_KEY_BLOB_LENGTH; + break; + default: + /* This case should never be choosen */ + DBUG_ASSERT(0); + join->thd->fatal_error(); + } + } parts++; - if ((*group->item)->maybe_null) + if (group_item->maybe_null) null_parts++; } join->tmp_table_param.group_length=key_length+null_parts; @@ -9083,7 +13758,7 @@ alloc_group_fields(JOIN *join,ORDER *group) { for (; group ; group=group->next) { - Item_buff *tmp=new_Item_buff(join->thd, *group->item); + Cached_item *tmp=new_Cached_item(join->thd, *group->item); if (!tmp || join->group_fields.push_front(tmp)) return TRUE; } @@ -9094,12 +13769,12 @@ alloc_group_fields(JOIN *join,ORDER *group) static int -test_if_group_changed(List<Item_buff> &list) +test_if_group_changed(List<Cached_item> &list) { DBUG_ENTER("test_if_group_changed"); - List_iterator<Item_buff> li(list); + List_iterator<Cached_item> li(list); int idx= -1,i; - Item_buff *buff; + Cached_item *buff; for (i=(int) list.elements-1 ; (buff=li++) ; i--) { @@ -9159,11 +13834,20 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, { Field *field; char *tmp; - if (pos->type() == Item::FIELD_ITEM) + Item *real_pos= pos->real_item(); + if (real_pos->type() == Item::FIELD_ITEM) { Item_field *item; - if (!(item= new Item_field(thd, ((Item_field*) pos)))) + if (!(item= new Item_field(thd, ((Item_field*) real_pos)))) goto err; + if (pos->type() == Item::REF_ITEM) + { + /* preserve the names of the ref when dereferncing */ + Item_ref *ref= (Item_ref *) pos; + item->db_name= ref->db_name; + item->table_name= ref->table_name; + item->name= ref->name; + } pos= item; if (item->field->flags & BLOB_FLAG) { @@ -9188,7 +13872,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, saved value */ field= item->field; - item->result_field=field->new_field(thd->mem_root,field->table); + item->result_field=field->new_field(thd->mem_root,field->table, 1); /* We need to allocate one extra byte for null handling and another extra byte to not get warnings from purify in @@ -9197,20 +13881,24 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, tmp= (char*) sql_alloc(field->pack_length()+2); if (!tmp) goto err; - copy->set(tmp, item->result_field); - item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1); + if (copy) + { + copy->set(tmp, item->result_field); + item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1); #ifdef HAVE_purify - copy->to_ptr[copy->from_length]= 0; + copy->to_ptr[copy->from_length]= 0; #endif - copy++; + copy++; + } } } - else if ((pos->type() == Item::FUNC_ITEM || - pos->type() == Item::SUBSELECT_ITEM || - pos->type() == Item::CACHE_ITEM || - pos->type() == Item::COND_ITEM) && - !pos->with_sum_func) + else if ((real_pos->type() == Item::FUNC_ITEM || + real_pos->type() == Item::SUBSELECT_ITEM || + real_pos->type() == Item::CACHE_ITEM || + real_pos->type() == Item::COND_ITEM) && + !real_pos->with_sum_func) { // Save for send fields + pos= real_pos; /* TODO: In most cases this result will be sent to the user. This should be changed to use copy_int or copy_real depending @@ -9246,7 +13934,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, err: if (copy) - delete [] param->copy_field; + delete [] param->copy_field; // This is never 0 param->copy_field=0; err2: DBUG_RETURN(TRUE); @@ -9306,7 +13994,19 @@ bool JOIN::alloc_func_list() disctinct->group_by optimization */ if (select_distinct) + { group_parts+= fields_list.elements; + /* + If the ORDER clause is specified then it's possible that + it also will be optimized, so reserve space for it too + */ + if (order) + { + ORDER *ord; + for (ord= order; ord; ord= ord->next) + group_parts++; + } + } /* This must use calloc() as rollup_make_fields depends on this */ sum_funcs= (Item_sum**) thd->calloc(sizeof(Item_sum**) * (func_count+1) + @@ -9324,9 +14024,7 @@ bool JOIN::alloc_func_list() field_list All items send_fields Items in select list before_group_by Set to 1 if this is called before GROUP BY handling - - NOTES - Calls ::setup() for all item_sum objects in field_list + recompute Set to TRUE if sum_funcs must be recomputed RETURN 0 ok @@ -9334,23 +14032,21 @@ bool JOIN::alloc_func_list() */ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields, - bool before_group_by) + bool before_group_by, bool recompute) { List_iterator_fast<Item> it(field_list); Item_sum **func; Item *item; DBUG_ENTER("make_sum_func_list"); + if (*sum_funcs && !recompute) + DBUG_RETURN(FALSE); /* We have already initialized sum_funcs. */ + func= sum_funcs; while ((item=it++)) { if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) - { *func++= (Item_sum*) item; - /* let COUNT(DISTINCT) create the temporary table */ - if (((Item_sum*) item)->setup(thd)) - DBUG_RETURN(TRUE); - } } if (before_group_by && rollup.state == ROLLUP::STATE_INITED) { @@ -9395,6 +14091,8 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, { List_iterator_fast<Item> it(all_fields); Item *item_field,*item; + DBUG_ENTER("change_to_use_tmp_fields"); + res_selected_fields.empty(); res_all_fields.empty(); @@ -9403,7 +14101,9 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, { Field *field; - if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) + if ((item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM) || + (item->type() == Item::FUNC_ITEM && + ((Item_func*)item)->functype() == Item_func::SUSERVAR_FUNC)) item_field= item; else { @@ -9418,8 +14118,8 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, else item_field= (Item*) new Item_field(field); if (!item_field) - return TRUE; // Fatal error - item_field->name= item->name; /*lint -e613 */ + DBUG_RETURN(TRUE); // Fatal error + item_field->name= item->name; #ifndef DBUG_OFF if (_db_on_ && !item_field->name) { @@ -9443,7 +14143,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, for (i= 0; i < border; i++) itr++; itr.sublist(res_selected_fields, elements); - return FALSE; + DBUG_RETURN(FALSE); } @@ -9497,6 +14197,33 @@ change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array, Code for calculating functions ******************************************************************************/ + +/* + Call ::setup for all sum functions + + SYNOPSIS + setup_sum_funcs() + thd thread handler + func_ptr sum function list + + RETURN + FALSE ok + TRUE error +*/ + +static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr) +{ + Item_sum *func; + DBUG_ENTER("setup_sum_funcs"); + while ((func= *(func_ptr++))) + { + if (func->setup(thd)) + DBUG_RETURN(TRUE); + } + DBUG_RETURN(FALSE); +} + + static void init_tmptable_sum_functions(Item_sum **func_ptr) { @@ -9596,13 +14323,14 @@ static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab) DBUG_RETURN(TRUE); if (!cond->fixed) - cond->fix_fields(thd,(TABLE_LIST *) 0, (Item**)&cond); + cond->fix_fields(thd, (Item**)&cond); if (join_tab->select) { error=(int) cond->add(join_tab->select->cond); join_tab->select_cond=join_tab->select->cond=cond; } - else if ((join_tab->select=make_select(join_tab->table, 0, 0, cond,&error))) + else if ((join_tab->select= make_select(join_tab->table, 0, 0, cond, 0, + &error))) join_tab->select_cond=cond; DBUG_RETURN(error ? TRUE : FALSE); @@ -9637,8 +14365,8 @@ void free_underlaid_joins(THD *thd, SELECT_LEX *select) thd reference to the context expr expression to make replacement group_list list of references to group by items - changed out: returns 1 if item contains a replaced field item - + changed out: returns 1 if item contains a replaced field item + DESCRIPTION The function replaces occurrences of group by fields in expr by ref objects for these fields unless they are under aggregate @@ -9672,6 +14400,7 @@ static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list, { if (expr->arg_count) { + Name_resolution_context *context= &thd->lex->current_select->context; Item **arg,**arg_end; bool arg_changed= FALSE; for (arg= expr->arguments(), @@ -9686,8 +14415,9 @@ static bool change_group_ref(THD *thd, Item_func *expr, ORDER *group_list, { if (item->eq(*group_tmp->item,0)) { - Item *new_item; - if(!(new_item= new Item_ref(group_tmp->item, 0, item->name))) + Item *new_item; + if (!(new_item= new Item_ref(context, group_tmp->item, 0, + item->name))) return 1; // fatal_error is set thd->change_item_tree(arg, new_item); arg_changed= TRUE; @@ -9876,8 +14606,6 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields, */ item= item->copy_or_same(thd); ((Item_sum*) item)->make_unique(); - if (((Item_sum*) item)->setup(thd)) - return 1; *(*func)= (Item_sum*) item; (*func)++; } @@ -9894,9 +14622,10 @@ bool JOIN::rollup_make_fields(List<Item> &fields_arg, List<Item> &sel_fields, This is an element that is used by the GROUP BY and should be set to NULL in this level */ - Item_null_result *null_item; + Item_null_result *null_item= new (thd->mem_root) Item_null_result(); + if (!null_item) + return 1; item->maybe_null= 1; // Value will be null sometimes - null_item= rollup.null_items[i]; null_item->result_field= item->get_tmp_table_field(); item= null_item; break; @@ -9977,7 +14706,7 @@ int JOIN::rollup_send_data(uint idx) 1 if write_data_failed() */ -int JOIN::rollup_write_data(uint idx, TABLE *table) +int JOIN::rollup_write_data(uint idx, TABLE *table_arg) { uint i; for (i= send_group_parts ; i-- > idx ; ) @@ -9988,7 +14717,7 @@ int JOIN::rollup_write_data(uint idx, TABLE *table) ref_pointer_array_size); if ((!having || having->val_int())) { - int error; + int write_error; Item *item; List_iterator_fast<Item> it(rollup.fields[i]); while ((item= it++)) @@ -9997,10 +14726,10 @@ int JOIN::rollup_write_data(uint idx, TABLE *table) item->save_in_result_field(1); } copy_sum_funcs(sum_funcs_end[i+1], sum_funcs_end[i]); - if ((error= table->file->write_row(table->record[0]))) + if ((write_error= table_arg->file->write_row(table_arg->record[0]))) { - if (create_myisam_from_heap(thd, table, &tmp_table_param, - error, 0)) + if (create_myisam_from_heap(thd, table_arg, &tmp_table_param, + write_error, 0)) return 1; } } @@ -10046,6 +14775,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, select_result *result=join->result; Item *item_null= new Item_null(); CHARSET_INFO *cs= system_charset_info; + int quick_type; DBUG_ENTER("select_describe"); DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s", (ulong)join->select_lex, join->select_lex->type, @@ -10087,7 +14817,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { SELECT_LEX *sl= join->unit->first_select(); uint len= 6, lastop= 0; - memcpy(table_name_buffer, "<union", 6); + memcpy(table_name_buffer, STRING_WITH_LEN("<union")); for (; sl && len + lastop + 5 < NAME_LEN; sl= sl->next_select()) { len+= lastop; @@ -10096,7 +14826,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, } if (sl || len + lastop >= NAME_LEN) { - memcpy(table_name_buffer + len, "...>", 5); + memcpy(table_name_buffer + len, STRING_WITH_LEN("...>") + 1); len+= 4; } else @@ -10137,14 +14867,20 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { JOIN_TAB *tab=join->join_tab+i; TABLE *table=tab->table; - char buff[512],*buff_ptr=buff; - char buff1[512], buff2[512]; + char buff[512]; + char buff1[512], buff2[512], buff3[512]; + char keylen_str_buf[64]; + String extra(buff, sizeof(buff),cs); char table_name_buffer[NAME_LEN]; String tmp1(buff1,sizeof(buff1),cs); String tmp2(buff2,sizeof(buff2),cs); + String tmp3(buff3,sizeof(buff3),cs); + extra.length(0); tmp1.length(0); tmp2.length(0); + tmp3.length(0); + quick_type= -1; item_list.empty(); /* id */ item_list.push_back(new Item_uint((uint32) @@ -10154,7 +14890,15 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, strlen(join->select_lex->type), cs)); if (tab->type == JT_ALL && tab->select && tab->select->quick) - tab->type= JT_RANGE; + { + quick_type= tab->select->quick->get_type(); + if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) || + (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) || + (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION)) + tab->type = JT_INDEX_MERGE; + else + tab->type = JT_RANGE; + } /* table */ if (table->derived_select_number) { @@ -10165,18 +14909,21 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(new Item_string(table_name_buffer, len, cs)); } else - item_list.push_back(new Item_string(table->table_name, - strlen(table->table_name), + { + TABLE_LIST *real_table= table->pos_in_table_list; + item_list.push_back(new Item_string(real_table->alias, + strlen(real_table->alias), cs)); + } /* type */ item_list.push_back(new Item_string(join_type_str[tab->type], strlen(join_type_str[tab->type]), cs)); - uint j; - /* possible_keys */ + /* Build "possible_keys" value and add it to item_list */ if (!tab->keys.is_clear_all()) { - for (j=0 ; j < table->keys ; j++) + uint j; + for (j=0 ; j < table->s->keys ; j++) { if (tab->keys.is_set(j)) { @@ -10192,14 +14939,19 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(new Item_string(tmp1.ptr(),tmp1.length(),cs)); else item_list.push_back(item_null); - /* key key_len ref */ + + /* Build "key", "key_len", and "ref" values and add them to item_list */ if (tab->ref.key_parts) { KEY *key_info=table->key_info+ tab->ref.key; + register uint length; item_list.push_back(new Item_string(key_info->name, strlen(key_info->name), system_charset_info)); - item_list.push_back(new Item_int((int32) tab->ref.key_length)); + length= longlong2str(tab->ref.key_length, keylen_str_buf, 10) - + keylen_str_buf; + item_list.push_back(new Item_string(keylen_str_buf, length, + system_charset_info)); for (store_key **ref=tab->ref.key_copy ; *ref ; ref++) { if (tmp2.length()) @@ -10212,18 +14964,21 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, else if (tab->type == JT_NEXT) { KEY *key_info=table->key_info+ tab->index; + register uint length; item_list.push_back(new Item_string(key_info->name, strlen(key_info->name),cs)); - item_list.push_back(new Item_int((int32) key_info->key_length)); + length= longlong2str(key_info->key_length, keylen_str_buf, 10) - + keylen_str_buf; + item_list.push_back(new Item_string(keylen_str_buf, + length, + system_charset_info)); item_list.push_back(item_null); } else if (tab->select && tab->select->quick) { - KEY *key_info=table->key_info+ tab->select->quick->index; - item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name),cs)); - item_list.push_back(new Item_int((int32) tab->select->quick-> - max_used_key_length)); + tab->select->quick->add_keys_and_lengths(&tmp2, &tmp3); + item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs)); + item_list.push_back(new Item_string(tmp3.ptr(),tmp3.length(),cs)); item_list.push_back(item_null); } else @@ -10232,52 +14987,116 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(item_null); item_list.push_back(item_null); } - /* rows */ + /* Add "rows" field to item_list. */ item_list.push_back(new Item_int((longlong) (ulonglong) join->best_positions[i]. records_read, - 21)); - /* extra */ + MY_INT64_NUM_DECIMAL_DIGITS)); + /* Build "Extra" field and add it to item_list. */ my_bool key_read=table->key_read; if ((tab->type == JT_NEXT || tab->type == JT_CONST) && table->used_keys.is_set(tab->index)) key_read=1; - + if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT && + !((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row) + key_read=1; + if (tab->info) item_list.push_back(new Item_string(tab->info,strlen(tab->info),cs)); + else if (tab->packed_info & TAB_INFO_HAVE_VALUE) + { + if (tab->packed_info & TAB_INFO_USING_INDEX) + extra.append(STRING_WITH_LEN("; Using index")); + if (tab->packed_info & TAB_INFO_USING_WHERE) + extra.append(STRING_WITH_LEN("; Using where")); + if (tab->packed_info & TAB_INFO_FULL_SCAN_ON_NULL) + extra.append(STRING_WITH_LEN("; Full scan on NULL key")); + /* Skip initial "; "*/ + const char *str= extra.ptr(); + uint32 len= extra.length(); + if (len) + { + str += 2; + len -= 2; + } + item_list.push_back(new Item_string(str, len, cs)); + } else { + if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) + { + extra.append(STRING_WITH_LEN("; Using ")); + tab->select->quick->add_info_string(&extra); + } if (tab->select) { if (tab->use_quick == 2) { char buf[MAX_KEY/8+1]; - sprintf(buff_ptr,"; Range checked for each record (index map: 0x%s)", - tab->keys.print(buf)); - buff_ptr=strend(buff_ptr); + extra.append(STRING_WITH_LEN("; Range checked for each " + "record (index map: 0x")); + extra.append(tab->keys.print(buf)); + extra.append(')'); } - else - buff_ptr=strmov(buff_ptr,"; Using where"); + else if (tab->select->cond) + { + const COND *pushed_cond= tab->table->file->pushed_cond; + + if (thd->variables.engine_condition_pushdown && pushed_cond) + { + extra.append(STRING_WITH_LEN("; Using where with pushed " + "condition")); + if (thd->lex->describe & DESCRIBE_EXTENDED) + { + extra.append(STRING_WITH_LEN(": ")); + ((COND *)pushed_cond)->print(&extra); + } + } + else + extra.append(STRING_WITH_LEN("; Using where")); + } } if (key_read) - buff_ptr= strmov(buff_ptr,"; Using index"); + { + if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + extra.append(STRING_WITH_LEN("; Using index for group-by")); + else + extra.append(STRING_WITH_LEN("; Using index")); + } if (table->reginfo.not_exists_optimize) - buff_ptr= strmov(buff_ptr,"; Not exists"); + extra.append(STRING_WITH_LEN("; Not exists")); if (need_tmp_table) { need_tmp_table=0; - buff_ptr= strmov(buff_ptr,"; Using temporary"); + extra.append(STRING_WITH_LEN("; Using temporary")); } if (need_order) { need_order=0; - buff_ptr= strmov(buff_ptr,"; Using filesort"); + extra.append(STRING_WITH_LEN("; Using filesort")); } if (distinct & test_all_bits(used_tables,thd->used_tables)) - buff_ptr= strmov(buff_ptr,"; Distinct"); - if (buff_ptr == buff) - buff_ptr+= 2; // Skip inital "; " - item_list.push_back(new Item_string(buff+2,(uint) (buff_ptr - buff)-2, - cs)); + extra.append(STRING_WITH_LEN("; Distinct")); + + for (uint part= 0; part < tab->ref.key_parts; part++) + { + if (tab->ref.cond_guards[part]) + { + extra.append(STRING_WITH_LEN("; Full scan on NULL key")); + break; + } + } + + /* Skip initial "; "*/ + const char *str= extra.ptr(); + uint32 len= extra.length(); + if (len) + { + str += 2; + len -= 2; + } + item_list.push_back(new Item_string(str, len, cs)); } // For next iteration used_tables|=table->map; @@ -10296,10 +15115,10 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, } -int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) +bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) { DBUG_ENTER("mysql_explain_union"); - int res= 0; + bool res= 0; SELECT_LEX *first= unit->first_select(); for (SELECT_LEX *sl= first; @@ -10309,8 +15128,8 @@ int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) // drop UNCACHEABLE_EXPLAIN, because it is for internal usage only uint8 uncacheable= (sl->uncacheable & ~UNCACHEABLE_EXPLAIN); sl->type= (((&thd->lex->select_lex)==sl)? - ((thd->lex->all_selects_list != sl) ? - primary_key_name : "SIMPLE"): + (sl->first_inner_unit() || sl->next_select() ? + "PRIMARY" : "SIMPLE"): ((sl == first)? ((sl->linkage == DERIVED_TABLE_TYPE) ? "DERIVED": @@ -10329,14 +15148,14 @@ int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) unit->fake_select_lex->select_number= UINT_MAX; // jost for initialization unit->fake_select_lex->type= "UNION RESULT"; unit->fake_select_lex->options|= SELECT_DESCRIBE; - if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | SELECT_DESCRIBE, - ""))) + if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | SELECT_DESCRIBE))) res= unit->exec(); res|= unit->cleanup(); } else { thd->lex->current_select= first; + unit->set_limit(unit->global_parameters); res= mysql_select(thd, &first->ref_pointer_array, (TABLE_LIST*) first->table_list.first, first->with_wild, first->item_list, @@ -10350,39 +15169,167 @@ int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) first->options | thd->options | SELECT_DESCRIBE, result, unit, first); } - if (res > 0 || thd->net.report_error) - res= -1; // mysql_explain_select do not report error - DBUG_RETURN(res); + DBUG_RETURN(res || thd->net.report_error); +} + + +/* + Print joins from the FROM clause + + SYNOPSIS + print_join() + thd thread handler + str string where table should be printed + tables list of tables in join +*/ + +static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables) +{ + /* List is reversed => we should reverse it before using */ + List_iterator_fast<TABLE_LIST> ti(*tables); + TABLE_LIST **table= (TABLE_LIST **)thd->alloc(sizeof(TABLE_LIST*) * + tables->elements); + if (table == 0) + return; // out of memory + + for (TABLE_LIST **t= table + (tables->elements - 1); t >= table; t--) + *t= ti++; + + DBUG_ASSERT(tables->elements >= 1); + (*table)->print(thd, str); + + TABLE_LIST **end= table + tables->elements; + for (TABLE_LIST **tbl= table + 1; tbl < end; tbl++) + { + TABLE_LIST *curr= *tbl; + if (curr->outer_join) + { + /* MySQL converts right to left joins */ + str->append(STRING_WITH_LEN(" left join ")); + } + else if (curr->straight) + str->append(STRING_WITH_LEN(" straight_join ")); + else + str->append(STRING_WITH_LEN(" join ")); + curr->print(thd, str); + if (curr->on_expr) + { + str->append(STRING_WITH_LEN(" on(")); + curr->on_expr->print(str); + str->append(')'); + } + } +} + + +/* + Print table as it should be in join list + + SYNOPSIS + st_table_list::print(); + str string where table should bbe printed +*/ + +void st_table_list::print(THD *thd, String *str) +{ + if (nested_join) + { + str->append('('); + print_join(thd, str, &nested_join->join_list); + str->append(')'); + } + else + { + const char *cmp_name; // Name to compare with alias + if (view_name.str) + { + // A view + + if (!(belong_to_view && + belong_to_view->compact_view_format)) + { + append_identifier(thd, str, view_db.str, view_db.length); + str->append('.'); + } + append_identifier(thd, str, view_name.str, view_name.length); + cmp_name= view_name.str; + } + else if (derived) + { + // A derived table + str->append('('); + derived->print(str); + str->append(')'); + cmp_name= ""; // Force printing of alias + } + else + { + // A normal table + + if (!(belong_to_view && + belong_to_view->compact_view_format)) + { + append_identifier(thd, str, db, db_length); + str->append('.'); + } + if (schema_table) + { + append_identifier(thd, str, schema_table_name, + strlen(schema_table_name)); + cmp_name= schema_table_name; + } + else + { + append_identifier(thd, str, table_name, table_name_length); + cmp_name= table_name; + } + } + if (my_strcasecmp(table_alias_charset, cmp_name, alias)) + { + str->append(' '); + append_identifier(thd, str, alias, strlen(alias)); + } + } } void st_select_lex::print(THD *thd, String *str) { + /* QQ: thd may not be set for sub queries, but this should be fixed */ if (!thd) thd= current_thd; - str->append("select ", 7); - - //options + str->append(STRING_WITH_LEN("select ")); + + /* First add options */ if (options & SELECT_STRAIGHT_JOIN) - str->append("straight_join ", 14); + str->append(STRING_WITH_LEN("straight_join ")); if ((thd->lex->lock_option == TL_READ_HIGH_PRIORITY) && (this == &thd->lex->select_lex)) - str->append("high_priority ", 14); + str->append(STRING_WITH_LEN("high_priority ")); if (options & SELECT_DISTINCT) - str->append("distinct ", 9); + str->append(STRING_WITH_LEN("distinct ")); if (options & SELECT_SMALL_RESULT) - str->append("sql_small_result ", 17); + str->append(STRING_WITH_LEN("sql_small_result ")); if (options & SELECT_BIG_RESULT) - str->append("sql_big_result ", 15); + str->append(STRING_WITH_LEN("sql_big_result ")); if (options & OPTION_BUFFER_RESULT) - str->append("sql_buffer_result ", 18); + str->append(STRING_WITH_LEN("sql_buffer_result ")); if (options & OPTION_FOUND_ROWS) - str->append("sql_calc_found_rows ", 20); - if (!thd->lex->safe_to_cache_query) - str->append("sql_no_cache ", 13); - if (options & OPTION_TO_QUERY_CACHE) - str->append("sql_cache ", 10); + str->append(STRING_WITH_LEN("sql_calc_found_rows ")); + switch (sql_cache) + { + case SQL_NO_CACHE: + str->append(STRING_WITH_LEN("sql_no_cache ")); + break; + case SQL_CACHE: + str->append(STRING_WITH_LEN("sql_cache ")); + break; + case SQL_CACHE_UNSPECIFIED: + break; + default: + DBUG_ASSERT(0); + } //Item List bool first= 1; @@ -10403,85 +15350,36 @@ void st_select_lex::print(THD *thd, String *str) */ if (table_list.elements) { - str->append(" from ", 6); - Item *next_on= 0; - for (TABLE_LIST *table= (TABLE_LIST *) table_list.first; - table; - table= table->next) - { - if (table->derived) - { - str->append('('); - table->derived->print(str); - str->append(") "); - str->append(table->alias); - } - else - { - str->append(table->db); - str->append('.'); - str->append(table->real_name); - if (my_strcasecmp(table_alias_charset, table->real_name, table->alias)) - { - str->append(' '); - str->append(table->alias); - } - } - - if (table->on_expr && ((table->outer_join & JOIN_TYPE_LEFT) || - !(table->outer_join & JOIN_TYPE_RIGHT))) - next_on= table->on_expr; - - if (next_on) - { - str->append(" on(", 4); - next_on->print(str); - str->append(')'); - next_on= 0; - } - - TABLE_LIST *next_table; - if ((next_table= table->next)) - { - if (table->outer_join & JOIN_TYPE_RIGHT) - { - str->append(" right join ", 12); - if (!(table->outer_join & JOIN_TYPE_LEFT) && - table->on_expr) - next_on= table->on_expr; - } - else if (next_table->straight) - str->append(" straight_join ", 15); - else if (next_table->outer_join & JOIN_TYPE_LEFT) - str->append(" left join ", 11); - else - str->append(" join ", 6); - } - } + str->append(STRING_WITH_LEN(" from ")); + /* go through join tree */ + print_join(thd, str, &top_join_list); } // Where Item *cur_where= where; if (join) cur_where= join->conds; - if (cur_where) + if (cur_where || cond_value != Item::COND_UNDEF) { - str->append(" where ", 7); - cur_where->print(str); + str->append(STRING_WITH_LEN(" where ")); + if (cur_where) + cur_where->print(str); + else + str->append(cond_value != Item::COND_FALSE ? "1" : "0"); } // group by & olap if (group_list.elements) { - str->append(" group by ", 10); + str->append(STRING_WITH_LEN(" group by ")); print_order(str, (ORDER *) group_list.first); switch (olap) { case CUBE_TYPE: - str->append(" with cube", 10); + str->append(STRING_WITH_LEN(" with cube")); break; case ROLLUP_TYPE: - str->append(" with rollup", 12); + str->append(STRING_WITH_LEN(" with rollup")); break; default: ; //satisfy compiler @@ -10493,15 +15391,18 @@ void st_select_lex::print(THD *thd, String *str) if (join) cur_having= join->having; - if (cur_having) + if (cur_having || having_value != Item::COND_UNDEF) { - str->append(" having ", 8); - cur_having->print(str); + str->append(STRING_WITH_LEN(" having ")); + if (cur_having) + cur_having->print(str); + else + str->append(having_value != Item::COND_FALSE ? "1" : "0"); } if (order_list.elements) { - str->append(" order by ", 10); + str->append(STRING_WITH_LEN(" order by ")); print_order(str, (ORDER *) order_list.first); } @@ -10520,17 +15421,18 @@ void st_select_lex::print(THD *thd, String *str) res new select_result object RETURN - 0 - OK - -1 - error + FALSE - OK + TRUE - error */ -int JOIN::change_result(select_result *res) +bool JOIN::change_result(select_result *res) { DBUG_ENTER("JOIN::change_result"); result= res; - if (!procedure && result->prepare(fields_list, select_lex->master_unit())) + if (!procedure && (result->prepare(fields_list, select_lex->master_unit()) || + result->prepare2())) { - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_select.h b/sql/sql_select.h index c61ef4fb92b..9aa6fc1cfcd 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -36,6 +35,17 @@ typedef struct keyuse_t { satisfied if val has NULL 'value'. */ bool null_rejecting; + /* + !NULL - This KEYUSE was created from an equality that was wrapped into + an Item_func_trig_cond. This means the equality (and validity of + this KEYUSE element) can be turned on and off. The on/off state + is indicted by the pointed value: + *cond_guard == TRUE <=> equality condition is on + *cond_guard == FALSE <=> equality condition is off + + NULL - Otherwise (the source equality can't be turned off) + */ + bool *cond_guard; } KEYUSE; class store_key; @@ -50,6 +60,18 @@ typedef struct st_table_ref byte *key_buff2; // key_buff+key_length store_key **key_copy; // Item **items; // val()'s for each keypart + /* + Array of pointers to trigger variables. Some/all of the pointers may be + NULL. The ref access can be used iff + + for each used key part i, (!cond_guards[i] || *cond_guards[i]) + + This array is used by subquery code. The subquery code may inject + triggered conditions, i.e. conditions that can be 'switched off'. A ref + access created from such condition is not valid when at least one of the + underlying conditions is switched off (see subquery code for more details) + */ + bool **cond_guards; /* (null_rejecting & (1<<i)) means the condition is '=' and no matching rows will be produced if items[i] IS NULL (see add_not_null_conds()) @@ -85,23 +107,57 @@ typedef struct st_join_cache { /* ** The structs which holds the join connections and join states */ - enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF, JT_ALL, JT_RANGE, JT_NEXT, JT_FT, JT_REF_OR_NULL, - JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY}; + JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY, JT_INDEX_MERGE}; class JOIN; +enum enum_nested_loop_state +{ + NESTED_LOOP_KILLED= -2, NESTED_LOOP_ERROR= -1, + NESTED_LOOP_OK= 0, NESTED_LOOP_NO_MORE_ROWS= 1, + NESTED_LOOP_QUERY_LIMIT= 3, NESTED_LOOP_CURSOR_LIMIT= 4 +}; + + +/* Values for JOIN_TAB::packed_info */ +#define TAB_INFO_HAVE_VALUE 1 +#define TAB_INFO_USING_INDEX 2 +#define TAB_INFO_USING_WHERE 4 +#define TAB_INFO_FULL_SCAN_ON_NULL 8 + +typedef enum_nested_loop_state +(*Next_select_func)(JOIN *, struct st_join_table *, bool); +typedef int (*Read_record_func)(struct st_join_table *tab); +Next_select_func setup_end_select_func(JOIN *join); + typedef struct st_join_table { + st_join_table() {} /* Remove gcc warning */ TABLE *table; KEYUSE *keyuse; /* pointer to first used key */ SQL_SELECT *select; COND *select_cond; - QUICK_SELECT *quick; - Item *on_expr; + QUICK_SELECT_I *quick; + Item **on_expr_ref; /* pointer to the associated on expression */ + COND_EQUAL *cond_equal; /* multiple equalities for the on expression */ + st_join_table *first_inner; /* first inner table for including outerjoin */ + bool found; /* true after all matches or null complement */ + bool not_null_compl;/* true before null complement is added */ + st_join_table *last_inner; /* last table table for embedding outer join */ + st_join_table *first_upper; /* first inner table for embedding outer join */ + st_join_table *first_unmatched; /* used for optimization purposes only */ + + /* Special content for EXPLAIN 'Extra' column or NULL if none */ const char *info; - int (*read_first_record)(struct st_join_table *tab); - int (*next_select)(JOIN *,struct st_join_table *,bool); + /* + Bitmap of TAB_INFO_* bits that encodes special line for EXPLAIN 'Extra' + column, or 0 if there is no info. + */ + uint packed_info; + + Read_record_func read_first_record; + Next_select_func next_select; READ_RECORD read_record; double worst_seeks; key_map const_keys; /* Keys with constant part */ @@ -118,14 +174,28 @@ typedef struct st_join_table { TABLE_REF ref; JOIN_CACHE cache; JOIN *join; - + /* Bitmap of nested joins this table is part of */ + nested_join_map embedding_map; + void cleanup(); + inline bool is_using_loose_index_scan() + { + return (select && select->quick && + (select->quick->get_type() == + QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)); + } } JOIN_TAB; +enum_nested_loop_state sub_select_cache(JOIN *join, JOIN_TAB *join_tab, bool + end_of_records); +enum_nested_loop_state sub_select(JOIN *join,JOIN_TAB *join_tab, bool + end_of_records); + typedef struct st_position /* Used in find_best */ { double records_read; + double read_time; JOIN_TAB *table; KEYUSE *key; } POSITION; @@ -142,20 +212,45 @@ typedef struct st_rollup class JOIN :public Sql_alloc { - public: - JOIN_TAB *join_tab,**best_ref,**map2table; - JOIN_TAB *join_tab_save; //saved join_tab for subquery reexecution + JOIN(const JOIN &rhs); /* not implemented */ + JOIN& operator=(const JOIN &rhs); /* not implemented */ +public: + JOIN_TAB *join_tab,**best_ref; + JOIN_TAB **map2table; // mapping between table indexes and JOIN_TABs + JOIN_TAB *join_tab_save; // saved join_tab for subquery reexecution TABLE **table,**all_tables,*sort_by_table; uint tables,const_tables; uint send_group_parts; bool sort_and_group,first_record,full_join,group, no_field_update; bool do_send_rows; + /* + TRUE when we want to resume nested loop iterations when + fetching data from a cursor + */ + bool resume_nested_loop; table_map const_table_map,found_const_table_map,outer_join; ha_rows send_records,found_records,examined_rows,row_limit, select_limit; + /* + Used to fetch no more than given amount of rows per one + fetch operation of server side cursor. + The value is checked in end_send and end_send_group in fashion, similar + to offset_limit_cnt: + - fetch_limit= HA_POS_ERROR if there is no cursor. + - when we open a cursor, we set fetch_limit to 0, + - on each fetch iteration we add num_rows to fetch to fetch_limit + */ + ha_rows fetch_limit; POSITION positions[MAX_TABLES+1],best_positions[MAX_TABLES+1]; + + /* + Bitmap of nested joins embedding the position at the end of the current + partial join (valid only during join optimizer run). + */ + nested_join_map cur_embedding_map; + double best_read; List<Item> *fields; - List<Item_buff> group_fields, group_fields_cache; + List<Cached_item> group_fields, group_fields_cache; TABLE *tmp_table; // used to store 2 possible tmp table of SELECT TABLE *exec_tmp_table1, *exec_tmp_table2; @@ -167,7 +262,7 @@ class JOIN :public Sql_alloc Item *having; Item *tmp_having; // To store having when processed temporary table Item *having_history; // Store having for explain - uint select_options; + ulonglong select_options; select_result *result; TMP_TABLE_PARAM tmp_table_param; MYSQL_LOCK *lock; @@ -195,9 +290,9 @@ class JOIN :public Sql_alloc /* Is set if we have a GROUP BY and we have ORDER BY on a constant. */ bool skip_sort_order; - bool need_tmp, hidden_group_fields, buffer_result; + bool need_tmp, hidden_group_fields; DYNAMIC_ARRAY keyuse; - Item::cond_result cond_value; + Item::cond_result cond_value, having_value; List<Item> all_fields; // to store all fields that used in query //Above list changed to use temporary table List<Item> tmp_all_fields1, tmp_all_fields2, tmp_all_fields3; @@ -210,8 +305,11 @@ class JOIN :public Sql_alloc ORDER *order, *group_list, *proc_param; //hold parameters of mysql_select COND *conds; // ---"--- Item *conds_history; // store WHERE for explain - TABLE_LIST *tables_list; //hold 'tables' parameter of mysql_selec + TABLE_LIST *tables_list; //hold 'tables' parameter of mysql_select + List<TABLE_LIST> *join_list; // list of joined tables in reverse order + COND_EQUAL *cond_equal; SQL_SELECT *select; //created in optimisation phase + JOIN_TAB *return_tab; //used only for outer joins Item **ref_pointer_array; //used pointer reference for this select // Copy of above to be used with different lists Item **items0, **items1, **items2, **items3, **current_ref_pointer_array; @@ -221,35 +319,46 @@ class JOIN :public Sql_alloc bool union_part; // this subselect is part of union bool optimized; // flag to avoid double optimization in EXPLAIN - JOIN(THD *thd_arg, List<Item> &fields_arg, ulong select_options_arg, + /* + storage for caching buffers allocated during query execution. + These buffers allocations need to be cached as the thread memory pool is + cleared only at the end of the execution of the whole query and not caching + allocations that occur in repetition at execution time will result in + excessive memory usage. + */ + SORT_FIELD *sortorder; // make_unireg_sortorder() + TABLE **table_reexec; // make_simple_join() + JOIN_TAB *join_tab_reexec; // make_simple_join() + /* end of allocation caching storage */ + + JOIN(THD *thd_arg, List<Item> &fields_arg, ulonglong select_options_arg, select_result *result_arg) :fields_list(fields_arg) { init(thd_arg, fields_arg, select_options_arg, result_arg); } - JOIN(JOIN &join) - :Sql_alloc(), fields_list(join.fields_list) - { - init(join.thd, join.fields_list, join.select_options, - join.result); - } - - void init(THD *thd_arg, List<Item> &fields_arg, ulong select_options_arg, + void init(THD *thd_arg, List<Item> &fields_arg, ulonglong select_options_arg, select_result *result_arg) { join_tab= join_tab_save= 0; table= 0; tables= 0; const_tables= 0; + join_list= 0; sort_and_group= 0; first_record= 0; do_send_rows= 1; + resume_nested_loop= FALSE; send_records= 0; found_records= 0; + fetch_limit= HA_POS_ERROR; examined_rows= 0; exec_tmp_table1= 0; exec_tmp_table2= 0; + sortorder= 0; + table_reexec= 0; + join_tab_reexec= 0; thd= thd_arg; sum_funcs= sum_funcs2= 0; procedure= 0; @@ -266,17 +375,16 @@ class JOIN :public Sql_alloc skip_sort_order= 0; need_tmp= 0; hidden_group_fields= 0; /*safety*/ - buffer_result= test(select_options & OPTION_BUFFER_RESULT) && - !test(select_options & OPTION_FOUND_ROWS); - all_fields= fields_arg; - fields_list= fields_arg; error= 0; select= 0; + return_tab= 0; ref_pointer_array= items0= items1= items2= items3= 0; ref_pointer_array_size= 0; zero_result_cause= 0; optimized= 0; + cond_equal= 0; + all_fields= fields_arg; fields_list= fields_arg; bzero((char*) &keyuse,sizeof(keyuse)); tmp_table_param.init(); @@ -291,11 +399,11 @@ class JOIN :public Sql_alloc int optimize(); int reinit(); void exec(); - int cleanup(); + int destroy(); void restore_tmp(); bool alloc_func_list(); bool make_sum_func_list(List<Item> &all_fields, List<Item> &send_fields, - bool before_group_by); + bool before_group_by, bool recompute= FALSE); inline void set_items_ref_array(Item **ptr) { @@ -314,8 +422,16 @@ class JOIN :public Sql_alloc Item_sum ***func); int rollup_send_data(uint idx); int rollup_write_data(uint idx, TABLE *table); - bool test_in_subselect(Item **where); - void join_free(bool full); + void remove_subq_pushed_predicates(Item **where); + /* + Release memory and, if possible, the open tables held by this execution + plan (and nested plans). It's used to release some tables before + the end of execution in order to increase concurrency and reduce + memory consumption. + */ + void join_free(); + /* Cleanup this JOIN, possibly for reuse */ + void cleanup(bool full); void clear(); bool save_join_tab(); bool send_row_on_empty_set() @@ -323,7 +439,12 @@ class JOIN :public Sql_alloc return (do_send_rows && tmp_table_param.sum_func_count != 0 && !group_list); } - int change_result(select_result *result); + bool change_result(select_result *result); + bool is_top_level_join() const + { + return (unit == &thd->lex->unit && (unit->fake_select_lex == 0 || + select_lex == unit->fake_select_lex)); + } }; @@ -338,7 +459,7 @@ void TEST_join(JOIN *join); bool store_val_in_field(Field *field, Item *val, enum_check_fields check_flag); TABLE *create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, ORDER *group, bool distinct, bool save_sum_fields, - ulong select_options, ha_rows rows_limit, + ulonglong select_options, ha_rows rows_limit, char* alias); void free_tmp_table(THD *thd, TABLE *entry); void count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields, @@ -351,37 +472,69 @@ void copy_fields(TMP_TABLE_PARAM *param); void copy_funcs(Item **func_ptr); bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, int error, bool ignore_last_dupp_error); - +uint find_shortest_key(TABLE *table, const key_map *usable_keys); +Field* create_tmp_field_from_field(THD *thd, Field* org_field, + const char *name, TABLE *table, + Item_field *item, uint convert_blob_length); + /* functions from opt_sum.cc */ +bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); +/* from sql_delete.cc, used by opt_range.cc */ +extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); + /* class to copying an field/item to a key struct */ class store_key :public Sql_alloc { - protected: - Field *to_field; // Store data here - char *null_ptr; - char err; - public: +public: + bool null_key; /* TRUE <=> the value of the key has a null part */ enum store_key_result { STORE_KEY_OK, STORE_KEY_FATAL, STORE_KEY_CONV }; store_key(THD *thd, Field *field_arg, char *ptr, char *null, uint length) - :null_ptr(null),err(0) + :null_key(0), null_ptr(null), err(0) { if (field_arg->type() == FIELD_TYPE_BLOB) - to_field=new Field_varstring(ptr, length, (uchar*) null, 1, + { + /* Key segments are always packed with a 2 byte length prefix */ + to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1, Field::NONE, field_arg->field_name, field_arg->table, field_arg->charset()); - else - { - to_field=field_arg->new_field(thd->mem_root,field_arg->table); - if (to_field) - to_field->move_field(ptr, (uchar*) null, 1); } + else + to_field=field_arg->new_key_field(thd->mem_root, field_arg->table, + ptr, (uchar*) null, 1); } virtual ~store_key() {} /* Not actually needed */ - virtual enum store_key_result copy()=0; virtual const char *name() const=0; + + /** + @brief sets ignore truncation warnings mode and calls the real copy method + + @details this function makes sure truncation warnings when preparing the + key buffers don't end up as errors (because of an enclosing INSERT/UPDATE). + */ + enum store_key_result copy() + { + enum store_key_result result; + enum_check_fields saved_count_cuted_fields= + to_field->table->in_use->count_cuted_fields; + + to_field->table->in_use->count_cuted_fields= CHECK_FIELD_IGNORE; + + result= copy_inner(); + + to_field->table->in_use->count_cuted_fields= saved_count_cuted_fields; + + return result; + } + + protected: + Field *to_field; // Store data here + char *null_ptr; + char err; + + virtual enum store_key_result copy_inner()=0; }; @@ -401,12 +554,15 @@ class store_key_field: public store_key copy_field.set(to_field,from_field,0); } } - enum store_key_result copy() + const char *name() const { return field_name; } + + protected: + enum store_key_result copy_inner() { copy_field.do_copy(©_field); + null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } - const char *name() const { return field_name; } }; @@ -421,13 +577,15 @@ public: null_ptr_arg ? null_ptr_arg : item_arg->maybe_null ? &err : NullS, length), item(item_arg) {} - enum store_key_result copy() + const char *name() const { return "func"; } + + protected: + enum store_key_result copy_inner() { int res= item->save_in_field(to_field, 1); + null_key= to_field->is_null() || item->null_value; return (err != 0 || res > 2 ? STORE_KEY_FATAL : (store_key_result) res); - } - const char *name() const { return "func"; } }; @@ -443,7 +601,10 @@ public: &err : NullS, length, item_arg), inited(0) { } - enum store_key_result copy() + const char *name() const { return "const"; } + +protected: + enum store_key_result copy_inner() { int res; if (!inited) @@ -455,9 +616,9 @@ public: err= res; } } + null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); } - const char *name() const { return "const"; } }; bool cp_buffer_from_ref(THD *thd, TABLE_REF *ref); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index bf0e254d3e4..286799a44f6 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -20,158 +19,39 @@ #include "mysql_priv.h" #include "sql_select.h" // For select_describe #include "repl_failsafe.h" +#include "sp.h" +#include "sp_head.h" +#include "sql_trigger.h" #include <my_dir.h> #ifdef HAVE_BERKELEY_DB #include "ha_berkeley.h" // For berkeley_show_logs #endif +#ifndef NO_EMBEDDED_ACCESS_CHECKS static const char *grant_names[]={ "select","insert","update","delete","create","drop","reload","shutdown", "process","file","grant","references","index","alter"}; -#ifndef NO_EMBEDDED_ACCESS_CHECKS static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), "grant_types", grant_names, NULL}; #endif static int -store_create_info(THD *thd, TABLE *table, String *packet); - - -/* - Report list of databases - A database is a directory in the mysql_data_home directory -*/ - -int -mysqld_show_dbs(THD *thd,const char *wild) -{ - Item_string *field=new Item_string("",0,thd->charset()); - List<Item> field_list; - char *end; - List<char> files; - char *file_name; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_dbs"); - - field->name=(char*) thd->alloc(20+ (wild ? (uint) strlen(wild)+4: 0)); - field->max_length=NAME_LEN; - end=strmov(field->name,"Database"); - if (wild && wild[0]) - strxmov(end," (",wild,")",NullS); - field_list.push_back(field); - - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - if (mysql_find_files(thd,&files,NullS,mysql_data_home,wild,1)) - DBUG_RETURN(1); - List_iterator_fast<char> it(files); - - while ((file_name=it++)) - { -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (thd->master_access & (DB_ACLS | SHOW_DB_ACL) || - acl_get(thd->host, thd->ip, thd->priv_user, file_name,0) || - (grant_option && !check_grant_db(thd, file_name))) -#endif - { - protocol->prepare_for_resend(); - protocol->store(file_name, system_charset_info); - if (protocol->write()) - DBUG_RETURN(-1); - } - } - send_eof(thd); - DBUG_RETURN(0); -} - - -/*************************************************************************** - List all open tables in a database -***************************************************************************/ - -int mysqld_show_open_tables(THD *thd,const char *wild) -{ - List<Item> field_list; - OPEN_TABLE_LIST *open_list; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_open_tables"); - - field_list.push_back(new Item_empty_string("Database",NAME_LEN)); - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - field_list.push_back(new Item_return_int("In_use", 1, MYSQL_TYPE_TINY)); - field_list.push_back(new Item_return_int("Name_locked", 4, MYSQL_TYPE_TINY)); - - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - - if (!(open_list=list_open_tables(thd,wild)) && thd->is_fatal_error) - DBUG_RETURN(-1); - - for (; open_list ; open_list=open_list->next) - { - protocol->prepare_for_resend(); - protocol->store(open_list->db, system_charset_info); - protocol->store(open_list->table, system_charset_info); - protocol->store_tiny((longlong) open_list->in_use); - protocol->store_tiny((longlong) open_list->locked); - if (protocol->write()) - { - DBUG_RETURN(-1); - } - } - send_eof(thd); - DBUG_RETURN(0); -} +store_create_info(THD *thd, TABLE_LIST *table_list, String *packet); +static void +append_algorithm(TABLE_LIST *table, String *buff); +static int +view_store_create_info(THD *thd, TABLE_LIST *table, String *buff); +static bool schema_table_store_record(THD *thd, TABLE *table); /*************************************************************************** -** List all tables in a database (fast version) -** A table is a .frm file in the current databasedir -***************************************************************************/ - -int mysqld_show_tables(THD *thd,const char *db,const char *wild) -{ - Item_string *field=new Item_string("",0,thd->charset()); - List<Item> field_list; - char path[FN_LEN],*end; - List<char> files; - char *file_name; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_tables"); - - field->name=(char*) thd->alloc(20+(uint) strlen(db)+ - (wild ? (uint) strlen(wild)+4:0)); - end=strxmov(field->name,"Tables_in_",db,NullS); - if (wild && wild[0]) - strxmov(end," (",wild,")",NullS); - field->max_length=NAME_LEN; - (void) sprintf(path,"%s/%s",mysql_data_home,db); - (void) unpack_dirname(path,path); - field_list.push_back(field); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - if (mysql_find_files(thd,&files,db,path,wild,0)) - DBUG_RETURN(-1); - List_iterator_fast<char> it(files); - while ((file_name=it++)) - { - protocol->prepare_for_resend(); - protocol->store(file_name, system_charset_info); - if (protocol->write()) - DBUG_RETURN(-1); - } - send_eof(thd); - DBUG_RETURN(0); -} - -/*************************************************************************** ** List all table types supported ***************************************************************************/ -int mysqld_show_storage_engines(THD *thd) +bool mysqld_show_storage_engines(THD *thd) { List<Item> field_list; Protocol *protocol= thd->protocol; @@ -181,29 +61,33 @@ int mysqld_show_storage_engines(THD *thd) field_list.push_back(new Item_empty_string("Support",10)); field_list.push_back(new Item_empty_string("Comment",80)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); const char *default_type_name= ha_get_storage_engine((enum db_type)thd->variables.table_type); - show_table_type_st *types; - for (types= sys_table_types; types->type; types++) + handlerton **types; + for (types= sys_table_types; *types; types++) { - protocol->prepare_for_resend(); - protocol->store(types->type, system_charset_info); - const char *option_name= show_comp_option_name[(int) *types->value]; - - if (*types->value == SHOW_OPTION_YES && - !my_strcasecmp(system_charset_info, default_type_name, types->type)) - option_name= "DEFAULT"; - protocol->store(option_name, system_charset_info); - protocol->store(types->comment, system_charset_info); - if (protocol->write()) - DBUG_RETURN(-1); + if (!((*types)->flags & HTON_HIDDEN)) + { + protocol->prepare_for_resend(); + protocol->store((*types)->name, system_charset_info); + const char *option_name= show_comp_option_name[(int) (*types)->state]; + + if ((*types)->state == SHOW_OPTION_YES && + !my_strcasecmp(system_charset_info, default_type_name, (*types)->name)) + option_name= "DEFAULT"; + protocol->store(option_name, system_charset_info); + protocol->store((*types)->comment, system_charset_info); + if (protocol->write()) + DBUG_RETURN(TRUE); + } } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -220,12 +104,17 @@ struct show_privileges_st { static struct show_privileges_st sys_privileges[]= { {"Alter", "Tables", "To alter the table"}, - {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"}, + {"Alter routine", "Functions,Procedures", "To alter or drop stored functions/procedures"}, {"Create", "Databases,Tables,Indexes", "To create new databases and tables"}, + {"Create routine","Functions,Procedures","To use CREATE FUNCTION/PROCEDURE"}, + {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"}, + {"Create view", "Tables", "To create new views"}, + {"Create user", "Server Admin", "To create new users"}, {"Delete", "Tables", "To delete existing rows"}, - {"Drop", "Databases,Tables", "To drop databases and tables"}, + {"Drop", "Databases,Tables", "To drop databases, tables, and views"}, + {"Execute", "Functions,Procedures", "To execute stored routines"}, {"File", "File access on server", "To read and write files on the server"}, - {"Grant option", "Databases,Tables", "To give to other users those privileges you possess"}, + {"Grant option", "Databases,Tables,Functions,Procedures", "To give to other users those privileges you possess"}, {"Index", "Tables", "To create or drop indexes"}, {"Insert", "Tables", "To insert data into tables"}, {"Lock tables","Databases","To use LOCK TABLES (together with SELECT privilege)"}, @@ -236,14 +125,15 @@ static struct show_privileges_st sys_privileges[]= {"Replication slave","Server Admin","To read binary log events from the master"}, {"Select", "Tables", "To retrieve rows from table"}, {"Show databases","Server Admin","To see all databases with SHOW DATABASES"}, - {"Shutdown","Server Admin", "To shutdown the server"}, + {"Show view","Tables","To see views with SHOW CREATE VIEW"}, + {"Shutdown","Server Admin", "To shut down the server"}, {"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."}, {"Update", "Tables", "To update existing rows"}, {"Usage","Server Admin","No privileges - allow connect only"}, {NullS, NullS, NullS} }; -int mysqld_show_privileges(THD *thd) +bool mysqld_show_privileges(THD *thd) { List<Item> field_list; Protocol *protocol= thd->protocol; @@ -253,8 +143,9 @@ int mysqld_show_privileges(THD *thd) field_list.push_back(new Item_empty_string("Context",15)); field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); show_privileges_st *privilege= sys_privileges; for (privilege= sys_privileges; privilege->privilege ; privilege++) @@ -264,10 +155,10 @@ int mysqld_show_privileges(THD *thd) protocol->store(privilege->context, system_charset_info); protocol->store(privilege->comment, system_charset_info); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -307,14 +198,15 @@ static struct show_column_type_st sys_column_types[]= "A very small integer"}, }; -int mysqld_show_column_types(THD *thd) +bool mysqld_show_column_types(THD *thd) { List<Item> field_list; Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_show_column_types"); field_list.push_back(new Item_empty_string("Type",30)); - field_list.push_back(new Item_int("Size",(longlong) 1,21)); + field_list.push_back(new Item_int("Size",(longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("Min_Value",20)); field_list.push_back(new Item_empty_string("Max_Value",20)); field_list.push_back(new Item_return_int("Prec", 4, MYSQL_TYPE_SHORT)); @@ -328,8 +220,9 @@ int mysqld_show_column_types(THD *thd) field_list.push_back(new Item_empty_string("Default",NAME_LEN)); field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); /* TODO: Change the loop to not use 'i' */ for (uint i=0; i < sizeof(sys_column_types)/sizeof(sys_column_types[0]); i++) @@ -350,16 +243,42 @@ int mysqld_show_column_types(THD *thd) protocol->store(sys_column_types[i].default_value, system_charset_info); protocol->store(sys_column_types[i].comment, system_charset_info); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } -int -mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, - const char *wild, bool dir) +/* + find_files() - find files in a given directory. + + SYNOPSIS + find_files() + thd thread handler + files put found files in this list + db database name to set in TABLE_LIST structure + path path to database + wild filter for found files + dir read databases in path if TRUE, read .frm files in + database otherwise + + RETURN + FIND_FILES_OK success + FIND_FILES_OOM out of memory error + FIND_FILES_DIR no such directory, or directory can't be read +*/ + +enum find_files_result { + FIND_FILES_OK, + FIND_FILES_OOM, + FIND_FILES_DIR +}; + +static +find_files_result +find_files(THD *thd, List<char> *files, const char *db, + const char *path, const char *wild, bool dir) { uint i; char *ext; @@ -369,15 +288,21 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, uint col_access=thd->col_access; #endif TABLE_LIST table_list; - DBUG_ENTER("mysql_find_files"); + DBUG_ENTER("find_files"); if (wild && !wild[0]) wild=0; bzero((char*) &table_list,sizeof(table_list)); - if (!(dirp = my_dir(path,MYF(MY_WME | (dir ? MY_WANT_STAT : 0))))) - DBUG_RETURN(-1); + if (!(dirp = my_dir(path,MYF(dir ? MY_WANT_STAT : 0)))) + { + if (my_errno == ENOENT) + my_error(ER_BAD_DB_ERROR, MYF(ME_BELL+ME_WAITTANG), db); + else + my_error(ER_CANT_READ_DIR, MYF(ME_BELL+ME_WAITTANG), path, my_errno); + DBUG_RETURN(FIND_FILES_DIR); + } for (i=0 ; i < (uint) dirp->number_off_files ; i++) { @@ -427,16 +352,18 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, if (db && !(col_access & TABLE_ACLS)) { table_list.db= (char*) db; - table_list.real_name=file->name; + table_list.db_length= strlen(db); + table_list.table_name= file->name; + table_list.table_name_length= strlen(file->name); table_list.grant.privilege=col_access; - if (check_grant(thd, TABLE_ACLS, &table_list, 1, UINT_MAX, 1)) + if (check_grant(thd, TABLE_ACLS, &table_list, 1, 1, 1)) continue; } #endif if (files->push_back(thd->strdup(file->name))) { my_dirend(dirp); - DBUG_RETURN(-1); + DBUG_RETURN(FIND_FILES_OOM); } } DBUG_PRINT("info",("found: %d files", files->elements)); @@ -444,408 +371,104 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, VOID(ha_find_files(thd,db,path,wild,dir,files)); - DBUG_RETURN(0); + DBUG_RETURN(FIND_FILES_OK); } -/*************************************************************************** - Extended version of mysqld_show_tables -***************************************************************************/ - -int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) -{ - Item *item; - List<char> files; - List<Item> field_list; - char path[FN_LEN]; - char *file_name; - TABLE *table; - Protocol *protocol= thd->protocol; - TIME time; - int res= 0; - DBUG_ENTER("mysqld_extend_show_tables"); - - (void) sprintf(path,"%s/%s",mysql_data_home,db); - (void) unpack_dirname(path,path); - field_list.push_back(item=new Item_empty_string("Name",NAME_LEN)); - field_list.push_back(item=new Item_empty_string("Engine",10)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Version", (longlong) 0, 21)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Row_format",10)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Rows",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Avg_row_length",(int32) 0,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Data_length",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Max_data_length",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Index_length",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Data_free",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Auto_increment",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_datetime("Create_time")); - item->maybe_null=1; - field_list.push_back(item=new Item_datetime("Update_time")); - item->maybe_null=1; - field_list.push_back(item=new Item_datetime("Check_time")); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Collation",32)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Create_options",255)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Comment",80)); - item->maybe_null=1; - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - - if (mysql_find_files(thd,&files,db,path,wild,0)) - DBUG_RETURN(-1); - List_iterator_fast<char> it(files); - while ((file_name=it++)) - { - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - protocol->prepare_for_resend(); - protocol->store(file_name, system_charset_info); - table_list.db=(char*) db; - table_list.real_name= table_list.alias= file_name; - if (lower_case_table_names) - my_casedn_str(files_charset_info, file_name); - if (!(table = open_ltable(thd, &table_list, TL_READ))) - { - for (uint i=2 ; i < field_list.elements ; i++) - protocol->store_null(); - // Send error to Comment field - protocol->store(thd->net.last_error, system_charset_info); - thd->clear_error(); - } - else - { - const char *str; - handler *file=table->file; - file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_NO_LOCK); - protocol->store(file->table_type(), system_charset_info); - protocol->store((ulonglong) table->frm_version); - str= ((table->db_options_in_use & HA_OPTION_COMPRESS_RECORD) ? - "Compressed" : - (table->db_options_in_use & HA_OPTION_PACK_RECORD) ? - "Dynamic" : "Fixed"); - protocol->store(str, system_charset_info); - protocol->store((ulonglong) file->records); - protocol->store((ulonglong) file->mean_rec_length); - protocol->store((ulonglong) file->data_file_length); - if (file->max_data_file_length) - protocol->store((ulonglong) file->max_data_file_length); - else - protocol->store_null(); - protocol->store((ulonglong) file->index_file_length); - protocol->store((ulonglong) file->delete_length); - if (table->found_next_number_field) - { - table->next_number_field=table->found_next_number_field; - table->next_number_field->reset(); - file->update_auto_increment(); - protocol->store(table->next_number_field->val_int()); - table->next_number_field=0; - } - else - protocol->store_null(); - if (!file->create_time) - protocol->store_null(); - else - { - thd->variables.time_zone->gmt_sec_to_TIME(&time, file->create_time); - protocol->store(&time); - } - if (!file->update_time) - protocol->store_null(); - else - { - thd->variables.time_zone->gmt_sec_to_TIME(&time, file->update_time); - protocol->store(&time); - } - if (!file->check_time) - protocol->store_null(); - else - { - thd->variables.time_zone->gmt_sec_to_TIME(&time, file->check_time); - protocol->store(&time); - } - str= (table->table_charset ? table->table_charset->name : "default"); - protocol->store(str, system_charset_info); - if (file->table_flags() & HA_HAS_CHECKSUM) - protocol->store((ulonglong)file->checksum()); - else - protocol->store_null(); // Checksum - { - char option_buff[350],*ptr; - ptr=option_buff; - if (table->min_rows) - { - ptr=strmov(ptr," min_rows="); - ptr=longlong10_to_str(table->min_rows,ptr,10); - } - if (table->max_rows) - { - ptr=strmov(ptr," max_rows="); - ptr=longlong10_to_str(table->max_rows,ptr,10); - } - if (table->avg_row_length) - { - ptr=strmov(ptr," avg_row_length="); - ptr=longlong10_to_str(table->avg_row_length,ptr,10); - } - if (table->db_create_options & HA_OPTION_PACK_KEYS) - ptr=strmov(ptr," pack_keys=1"); - if (table->db_create_options & HA_OPTION_NO_PACK_KEYS) - ptr=strmov(ptr," pack_keys=0"); - if (table->db_create_options & HA_OPTION_CHECKSUM) - ptr=strmov(ptr," checksum=1"); - if (table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) - ptr=strmov(ptr," delay_key_write=1"); - if (table->row_type != ROW_TYPE_DEFAULT) - ptr=strxmov(ptr, " row_format=", ha_row_type[(uint) table->row_type], - NullS); - if (file->raid_type) - { - char buff[100]; - sprintf(buff," raid_type=%s raid_chunks=%d raid_chunksize=%ld", - my_raid_type(file->raid_type), file->raid_chunks, file->raid_chunksize/RAID_BLOCK_SIZE); - ptr=strmov(ptr,buff); - } - protocol->store(option_buff+1, - (ptr == option_buff ? 0 : (uint) (ptr-option_buff)-1) - , system_charset_info); - } - { - char *comment=table->file->update_table_comment(table->comment); - protocol->store(comment, system_charset_info); - if (comment != table->comment) - my_free(comment,MYF(0)); - } - close_thread_tables(thd,0); - } - if (protocol->write()) - { - res= -1; - break; - } - } - thd->insert_id(0); - if (!res) - send_eof(thd); - DBUG_RETURN(res); -} - - -/*************************************************************************** -** List all columns in a table_list->real_name -***************************************************************************/ - -int -mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, - bool verbose) +bool +mysqld_show_create(THD *thd, TABLE_LIST *table_list) { - TABLE *table; - handler *file; - char tmp[MAX_FIELD_WIDTH]; - char tmp1[MAX_FIELD_WIDTH]; - Item *item; Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_fields"); + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + DBUG_ENTER("mysqld_show_create"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, - table_list->real_name)); + table_list->table_name)); - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) - { - send_error(thd); - DBUG_RETURN(1); - } - file=table->file; - file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); -#ifndef NO_EMBEDDED_ACCESS_CHECKS - (void) get_table_grant(thd, table_list); -#endif - List<Item> field_list; - field_list.push_back(new Item_empty_string("Field",NAME_LEN)); - field_list.push_back(new Item_empty_string("Type", 40)); - if (verbose) - field_list.push_back(new Item_empty_string("Collation",40)); - field_list.push_back(new Item_empty_string("Null",1)); - field_list.push_back(new Item_empty_string("Key",3)); - field_list.push_back(item=new Item_empty_string("Default",NAME_LEN)); - item->maybe_null=1; - field_list.push_back(new Item_empty_string("Extra",20)); - if (verbose) - { - field_list.push_back(new Item_empty_string("Privileges",80)); - field_list.push_back(new Item_empty_string("Comment",255)); - } - // Send first number of fields and records - if (protocol->send_records_num(&field_list, (ulonglong)file->records) || - protocol->send_fields(&field_list,0)) - DBUG_RETURN(1); - restore_record(table,default_values); // Get empty record + /* We want to preserve the tree for views. */ + thd->lex->view_prepare_mode= TRUE; - Field **ptr,*field; - for (ptr=table->field; (field= *ptr) ; ptr++) + /* Only one table for now, but VIEW can involve several tables */ + if (open_normal_and_derived_tables(thd, table_list, 0)) { - if (!wild || !wild[0] || - !wild_case_compare(system_charset_info, field->field_name,wild)) - { - { - byte *pos; - uint flags=field->flags; - String type(tmp,sizeof(tmp), system_charset_info); -#ifndef NO_EMBEDDED_ACCESS_CHECKS - uint col_access; -#endif - protocol->prepare_for_resend(); - protocol->store(field->field_name, system_charset_info); - field->sql_type(type); - protocol->store(type.ptr(), type.length(), system_charset_info); - if (verbose) - protocol->store(field->has_charset() ? field->charset()->name : "NULL", - system_charset_info); - pos= (byte*) ((flags & NOT_NULL_FLAG) ? "" : "YES"); - protocol->store((const char*) pos, system_charset_info); - pos=(byte*) ((field->flags & PRI_KEY_FLAG) ? "PRI" : - (field->flags & UNIQUE_KEY_FLAG) ? "UNI" : - (field->flags & MULTIPLE_KEY_FLAG) ? "MUL":""); - protocol->store((char*) pos, system_charset_info); - - if (table->timestamp_field == field && - field->unireg_check != Field::TIMESTAMP_UN_FIELD) - { - /* - We have NOW() as default value but we use CURRENT_TIMESTAMP form - because it is more SQL standard comatible - */ - protocol->store("CURRENT_TIMESTAMP", system_charset_info); - } - else if (field->unireg_check != Field::NEXT_NUMBER && - !field->is_null()) - { // Not null by default - /* - Note: we have to convert the default value into - system_charset_info before sending. - This is necessary for "SET NAMES binary": - If the client character set is binary, we want to - send metadata in UTF8 rather than in the column's - character set. - This conversion also makes "SHOW COLUMNS" and - "SHOW CREATE TABLE" output consistent. Without - this conversion the default values were displayed - differently. - */ - String def(tmp1,sizeof(tmp1), system_charset_info); - type.set(tmp, sizeof(tmp), field->charset()); - field->val_str(&type); - uint dummy_errors; - def.copy(type.ptr(), type.length(), type.charset(), - system_charset_info, &dummy_errors); - protocol->store(def.ptr(), def.length(), def.charset()); - } - else if (field->unireg_check == Field::NEXT_NUMBER || - field->maybe_null()) - protocol->store_null(); // Null as default - else - protocol->store("",0, system_charset_info); // empty string + if (!table_list->view || thd->net.last_errno != ER_VIEW_INVALID) + DBUG_RETURN(TRUE); - char *end=tmp; - if (field->unireg_check == Field::NEXT_NUMBER) - end=strmov(tmp,"auto_increment"); - protocol->store(tmp,(uint) (end-tmp), system_charset_info); - - if (verbose) - { - /* Add grant options & comments */ - end=tmp; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - col_access= get_column_grant(thd,table_list,field) & COL_ACLS; - for (uint bitnr=0; col_access ; col_access>>=1,bitnr++) - { - if (col_access & 1) - { - *end++=','; - end=strmov(end,grant_types.type_names[bitnr]); - } - } -#else - end=strmov(end,""); -#endif - protocol->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), - system_charset_info); - protocol->store(field->comment.str, field->comment.length, - system_charset_info); - } - if (protocol->write()) - DBUG_RETURN(1); - } - } + /* + Clear all messages with 'error' level status and + issue a warning with 'warning' level status in + case of invalid view and last error is ER_VIEW_INVALID + */ + mysql_reset_errors(thd, true); + thd->clear_error(); + + push_warning_printf(thd,MYSQL_ERROR::WARN_LEVEL_WARN, + ER_VIEW_INVALID, + ER(ER_VIEW_INVALID), + table_list->view_db.str, + table_list->view_name.str); } - send_eof(thd); - DBUG_RETURN(0); -} - -int -mysqld_show_create(THD *thd, TABLE_LIST *table_list) -{ - TABLE *table; - Protocol *protocol= thd->protocol; - char buff[2048]; - String buffer(buff, sizeof(buff), system_charset_info); - DBUG_ENTER("mysqld_show_create"); - DBUG_PRINT("enter",("db: %s table: %s",table_list->db, - table_list->real_name)); - - /* Only one table for now */ - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) + /* TODO: add environment variables show when it become possible */ + if (thd->lex->only_view && !table_list->view) { - send_error(thd); - DBUG_RETURN(1); + my_error(ER_WRONG_OBJECT, MYF(0), + table_list->db, table_list->table_name, "VIEW"); + DBUG_RETURN(TRUE); } buffer.length(0); - if (store_create_info(thd, table, &buffer)) - DBUG_RETURN(-1); + if ((table_list->view ? + view_store_create_info(thd, table_list, &buffer) : + store_create_info(thd, table_list, &buffer))) + DBUG_RETURN(TRUE); List<Item> field_list; - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - // 1024 is for not to confuse old clients - field_list.push_back(new Item_empty_string("Create Table", - max(buffer.length(),1024))); + if (table_list->view) + { + field_list.push_back(new Item_empty_string("View",NAME_LEN)); + field_list.push_back(new Item_empty_string("Create View", + max(buffer.length(),1024))); + } + else + { + field_list.push_back(new Item_empty_string("Table",NAME_LEN)); + // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Create Table", + max(buffer.length(),1024))); + } - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); - protocol->store(table->table_name, system_charset_info); + if (table_list->view) + protocol->store(table_list->view_name.str, system_charset_info); + else + { + if (table_list->schema_table) + protocol->store(table_list->schema_table->table_name, + system_charset_info); + else + protocol->store(table_list->table->alias, system_charset_info); + } protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); + if (protocol->write()) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } -int mysqld_show_create_db(THD *thd, char *dbname, - const HA_CREATE_INFO *create_info) +bool mysqld_show_create_db(THD *thd, char *dbname, + const HA_CREATE_INFO *create_info) { - int length; - char path[FN_REFLEN]; char buff[2048]; String buffer(buff, sizeof(buff), system_charset_info); #ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *sctx= thd->security_ctx; uint db_access; #endif - bool found_libchar; HA_CREATE_INFO create; uint create_options = create_info ? create_info->options : 0; Protocol *protocol=thd->protocol; @@ -853,80 +476,78 @@ int mysqld_show_create_db(THD *thd, char *dbname, if (check_db_name(dbname)) { - net_printf(thd,ER_WRONG_DB_NAME, dbname); - DBUG_RETURN(1); + my_error(ER_WRONG_DB_NAME, MYF(0), dbname); + DBUG_RETURN(TRUE); } #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (test_all_bits(thd->master_access,DB_ACLS)) + if (test_all_bits(sctx->master_access, DB_ACLS)) db_access=DB_ACLS; else - db_access= (acl_get(thd->host,thd->ip, thd->priv_user,dbname,0) | - thd->master_access); + db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, dbname, 0) | + sctx->master_access); if (!(db_access & DB_ACLS) && (!grant_option || check_grant_db(thd,dbname))) { - net_printf(thd,ER_DBACCESS_DENIED_ERROR, - thd->priv_user, thd->host_or_ip, dbname); + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + sctx->priv_user, sctx->host_or_ip, dbname); mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), - thd->priv_user, thd->host_or_ip, dbname); - DBUG_RETURN(1); + sctx->priv_user, sctx->host_or_ip, dbname); + DBUG_RETURN(TRUE); } #endif - - (void) sprintf(path,"%s/%s",mysql_data_home, dbname); - length=unpack_dirname(path,path); // Convert if not unix - found_libchar= 0; - if (length && path[length-1] == FN_LIBCHAR) + if (!my_strcasecmp(system_charset_info, dbname, + information_schema_name.str)) { - found_libchar= 1; - path[length-1]=0; // remove ending '\' + dbname= information_schema_name.str; + create.default_table_charset= system_charset_info; } - if (access(path,F_OK)) + else { - net_printf(thd,ER_BAD_DB_ERROR,dbname); - DBUG_RETURN(1); - } - if (found_libchar) - path[length-1]= FN_LIBCHAR; - strmov(path+length, MY_DB_OPT_FILE); - load_db_opt(thd, path, &create); + if (check_db_dir_existence(dbname)) + { + my_error(ER_BAD_DB_ERROR, MYF(0), dbname); + DBUG_RETURN(TRUE); + } + load_db_opt_by_name(thd, dbname, &create); + } List<Item> field_list; field_list.push_back(new Item_empty_string("Database",NAME_LEN)); field_list.push_back(new Item_empty_string("Create Database",1024)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); protocol->store(dbname, strlen(dbname), system_charset_info); buffer.length(0); - buffer.append("CREATE DATABASE ", 16); + buffer.append(STRING_WITH_LEN("CREATE DATABASE ")); if (create_options & HA_LEX_CREATE_IF_NOT_EXISTS) - buffer.append("/*!32312 IF NOT EXISTS*/ ", 25); + buffer.append(STRING_WITH_LEN("/*!32312 IF NOT EXISTS*/ ")); append_identifier(thd, &buffer, dbname, strlen(dbname)); if (create.default_table_charset) { - buffer.append(" /*!40100", 9); - buffer.append(" DEFAULT CHARACTER SET ", 23); + buffer.append(STRING_WITH_LEN(" /*!40100")); + buffer.append(STRING_WITH_LEN(" DEFAULT CHARACTER SET ")); buffer.append(create.default_table_charset->csname); if (!(create.default_table_charset->state & MY_CS_PRIMARY)) { - buffer.append(" COLLATE ", 9); + buffer.append(STRING_WITH_LEN(" COLLATE ")); buffer.append(create.default_table_charset->name); } - buffer.append(" */", 3); + buffer.append(STRING_WITH_LEN(" */")); } protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); if (protocol->write()) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } -int +bool mysqld_show_logs(THD *thd) { List<Item> field_list; @@ -937,113 +558,17 @@ mysqld_show_logs(THD *thd) field_list.push_back(new Item_empty_string("Type",10)); field_list.push_back(new Item_empty_string("Status",10)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); #ifdef HAVE_BERKELEY_DB if ((have_berkeley_db == SHOW_OPTION_YES) && berkeley_show_logs(protocol)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); #endif send_eof(thd); - DBUG_RETURN(0); -} - - -int -mysqld_show_keys(THD *thd, TABLE_LIST *table_list) -{ - TABLE *table; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_keys"); - DBUG_PRINT("enter",("db: %s table: %s",table_list->db, - table_list->real_name)); - - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) - { - send_error(thd); - DBUG_RETURN(1); - } - - List<Item> field_list; - Item *item; - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - field_list.push_back(new Item_return_int("Non_unique",1, MYSQL_TYPE_TINY)); - field_list.push_back(new Item_empty_string("Key_name",NAME_LEN)); - field_list.push_back(new Item_return_int("Seq_in_index",2, MYSQL_TYPE_TINY)); - field_list.push_back(new Item_empty_string("Column_name",NAME_LEN)); - field_list.push_back(item=new Item_empty_string("Collation",1)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Cardinality",0,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_return_int("Sub_part",3, - MYSQL_TYPE_SHORT)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Packed",10)); - item->maybe_null=1; - field_list.push_back(new Item_empty_string("Null",3)); - field_list.push_back(new Item_empty_string("Index_type",16)); - field_list.push_back(new Item_empty_string("Comment",255)); - item->maybe_null=1; - - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - - KEY *key_info=table->key_info; - table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); - for (uint i=0 ; i < table->keys ; i++,key_info++) - { - KEY_PART_INFO *key_part= key_info->key_part; - const char *str; - for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) - { - protocol->prepare_for_resend(); - protocol->store(table->table_name, system_charset_info); - protocol->store_tiny((longlong) ((key_info->flags & HA_NOSAME) ? 0 :1)); - protocol->store(key_info->name, system_charset_info); - protocol->store_tiny((longlong) (j+1)); - str=(key_part->field ? key_part->field->field_name : - "?unknown field?"); - protocol->store(str, system_charset_info); - if (table->file->index_flags(i, j, 0) & HA_READ_ORDER) - protocol->store(((key_part->key_part_flag & HA_REVERSE_SORT) ? - "D" : "A"), 1, system_charset_info); - else - protocol->store_null(); /* purecov: inspected */ - KEY *key=table->key_info+i; - if (key->rec_per_key[j]) - { - ha_rows records=(table->file->records / key->rec_per_key[j]); - protocol->store((ulonglong) records); - } - else - protocol->store_null(); - - /* Check if we have a key part that only uses part of the field */ - if (!(key_info->flags & HA_FULLTEXT) && (!key_part->field || - key_part->length != table->field[key_part->fieldnr-1]->key_length())) - protocol->store_short((longlong) key_part->length / - key_part->field->charset()->mbmaxlen); - else - protocol->store_null(); - protocol->store_null(); // No pack_information yet - - /* Null flag */ - uint flags= key_part->field ? key_part->field->flags : 0; - char *pos=(char*) ((flags & NOT_NULL_FLAG) ? "" : "YES"); - protocol->store((const char*) pos, system_charset_info); - protocol->store(table->file->index_type(i), system_charset_info); - /* Comment */ - if (!table->keys_in_use.is_set(i)) - protocol->store("disabled",8, system_charset_info); - else - protocol->store("", 0, system_charset_info); - if (protocol->write()) - DBUG_RETURN(1); /* purecov: inspected */ - } - } - send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -1057,13 +582,12 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) { TABLE *table; DBUG_ENTER("mysqld_list_fields"); - DBUG_PRINT("enter",("table: %s",table_list->real_name)); + DBUG_PRINT("enter",("table: %s",table_list->table_name)); - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) - { - send_error(thd); + if (open_normal_and_derived_tables(thd, table_list, 0)) DBUG_VOID_RETURN; - } + table= table_list->table; + List<Item> field_list; Field **ptr,*field; @@ -1071,10 +595,18 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) { if (!wild || !wild[0] || !wild_case_compare(system_charset_info, field->field_name,wild)) - field_list.push_back(new Item_field(field)); + { + if (table_list->view) + field_list.push_back(new Item_ident_for_show(field, + table_list->view_db.str, + table_list->view_name.str)); + else + field_list.push_back(new Item_field(field)); + } } - restore_record(table,default_values); // Get empty record - if (thd->protocol->send_fields(&field_list,2)) + restore_record(table, s->default_values); // Get empty record + if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS | + Protocol::SEND_EOF)) DBUG_VOID_RETURN; thd->protocol->flush(); DBUG_VOID_RETURN; @@ -1082,15 +614,15 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) int -mysqld_dump_create_info(THD *thd, TABLE *table, int fd) +mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd) { Protocol *protocol= thd->protocol; String *packet= protocol->storage_packet(); DBUG_ENTER("mysqld_dump_create_info"); - DBUG_PRINT("enter",("table: %s",table->real_name)); + DBUG_PRINT("enter",("table: %s",table_list->table->s->table_name)); protocol->prepare_for_resend(); - if (store_create_info(thd, table, packet)) + if (store_create_info(thd, table_list, packet)) DBUG_RETURN(-1); if (fd < 0) @@ -1110,7 +642,7 @@ mysqld_dump_create_info(THD *thd, TABLE *table, int fd) /* Go through all character combinations and ensure that sql_lex.cc can - parse it as an identifer. + parse it as an identifier. SYNOPSIS require_quotes() @@ -1127,7 +659,7 @@ static const char *require_quotes(const char *name, uint name_length) uint length; const char *end= name + name_length; - for ( ; name < end ; name++) + for (; name < end ; name++) { uchar chr= (uchar) *name; length= my_mbcharlen(system_charset_info, chr); @@ -1138,6 +670,18 @@ static const char *require_quotes(const char *name, uint name_length) } +/* + Quote the given identifier if needed and append it to the target string. + If the given identifier is empty, it will be quoted. + + SYNOPSIS + append_identifier() + thd thread handler + packet target string + name the identifier to be appended + name_length length of the appending identifier +*/ + void append_identifier(THD *thd, String *packet, const char *name, uint length) { @@ -1156,7 +700,7 @@ append_identifier(THD *thd, String *packet, const char *name, uint length) it's a keyword */ - packet->reserve(length*2 + 2); + VOID(packet->reserve(length*2 + 2)); quote_char= (char) q; packet->append("e_char, 1, system_charset_info); @@ -1191,8 +735,11 @@ append_identifier(THD *thd, String *packet, const char *name, uint length) length length of name IMPLEMENTATION - If name is a keyword or includes a special character, then force - quoting. + Force quoting in the following cases: + - name is empty (for one, it is possible when we use this function for + quoting user and host names for DEFINER clause); + - name is a keyword; + - name includes a special character; Otherwise identifier is quoted only if the option OPTION_QUOTE_SHOW_CREATE is set. @@ -1203,7 +750,8 @@ append_identifier(THD *thd, String *packet, const char *name, uint length) int get_quote_char_for_identifier(THD *thd, const char *name, uint length) { - if (!is_keyword(name,length) && + if (length && + !is_keyword(name,length) && !require_quotes(name, length) && !(thd->options & OPTION_QUOTE_SHOW_CREATE)) return EOF; @@ -1223,16 +771,19 @@ static void append_directory(THD *thd, String *packet, const char *dir_type, uint length= dirname_length(filename); packet->append(' '); packet->append(dir_type); - packet->append(" DIRECTORY='", 12); + packet->append(STRING_WITH_LEN(" DIRECTORY='")); #ifdef __WIN__ - char *winfilename = thd->memdup(filename, length); - for (uint i=0; i < length; i++) - if (winfilename[i] == '\\') - winfilename[i] = '/'; - packet->append(winfilename, length); -#else - packet->append(filename, length); + /* Convert \ to / to be able to create table on unix */ + char *winfilename= (char*) thd->memdup(filename, length); + char *pos, *end; + for (pos= winfilename, end= pos+length ; pos < end ; pos++) + { + if (*pos == '\\') + *pos = '/'; + } + filename= winfilename; #endif + packet->append(filename, length); packet->append('\''); } } @@ -1241,15 +792,18 @@ static void append_directory(THD *thd, String *packet, const char *dir_type, #define LIST_PROCESS_HOST_LEN 64 static int -store_create_info(THD *thd, TABLE *table, String *packet) +store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) { List<Item> field_list; - char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end, *alias; + char tmp[MAX_FIELD_WIDTH], *for_str, buff[128]; + const char *alias; String type(tmp, sizeof(tmp), system_charset_info); Field **ptr,*field; uint primary_key; KEY *key_info; + TABLE *table= table_list->table; handler *file= table->file; + TABLE_SHARE *share= table->s; HA_CREATE_INFO create_info; my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | MODE_ORACLE | @@ -1260,20 +814,22 @@ store_create_info(THD *thd, TABLE *table, String *packet) my_bool limited_mysql_mode= (thd->variables.sql_mode & (MODE_NO_FIELD_OPTIONS | MODE_MYSQL323 | MODE_MYSQL40)) != 0; - DBUG_ENTER("store_create_info"); - DBUG_PRINT("enter",("table: %s",table->real_name)); + DBUG_PRINT("enter",("table: %s", table->s->table_name)); - restore_record(table,default_values); // Get empty record + restore_record(table, s->default_values); // Get empty record - if (table->tmp_table) - packet->append("CREATE TEMPORARY TABLE ", 23); + if (share->tmp_table) + packet->append(STRING_WITH_LEN("CREATE TEMPORARY TABLE ")); + else + packet->append(STRING_WITH_LEN("CREATE TABLE ")); + if (table_list->schema_table) + alias= table_list->schema_table->table_name; else - packet->append("CREATE TABLE ", 13); - alias= (lower_case_table_names == 2 ? table->table_name : - table->real_name); + alias= (lower_case_table_names == 2 ? table->alias : + share->table_name); append_identifier(thd, packet, alias, strlen(alias)); - packet->append(" (\n", 3); + packet->append(STRING_WITH_LEN(" (\n")); for (ptr=table->field ; (field= *ptr); ptr++) { @@ -1282,9 +838,9 @@ store_create_info(THD *thd, TABLE *table, String *packet) uint flags = field->flags; if (ptr != table->field) - packet->append(",\n", 2); + packet->append(STRING_WITH_LEN(",\n")); - packet->append(" ", 2); + packet->append(STRING_WITH_LEN(" ")); append_identifier(thd,packet,field->field_name, strlen(field->field_name)); packet->append(' '); // check for surprises from the previous call to Field::sql_type() @@ -1299,9 +855,9 @@ store_create_info(THD *thd, TABLE *table, String *packet) if (field->has_charset() && !(thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40))) { - if (field->charset() != table->table_charset) + if (field->charset() != share->table_charset) { - packet->append(" character set ", 15); + packet->append(STRING_WITH_LEN(" character set ")); packet->append(field->charset()->csname); } /* @@ -1310,20 +866,20 @@ store_create_info(THD *thd, TABLE *table, String *packet) */ if (!(field->charset()->state & MY_CS_PRIMARY)) { - packet->append(" collate ", 9); + packet->append(STRING_WITH_LEN(" collate ")); packet->append(field->charset()->name); } } if (flags & NOT_NULL_FLAG) - packet->append(" NOT NULL", 9); + packet->append(STRING_WITH_LEN(" NOT NULL")); else if (field->type() == FIELD_TYPE_TIMESTAMP) { /* TIMESTAMP field require explicit NULL flag, because unlike all other fields they are treated as NOT NULL by default. */ - packet->append(" NULL", 5); + packet->append(STRING_WITH_LEN(" NULL")); } /* @@ -1334,15 +890,16 @@ store_create_info(THD *thd, TABLE *table, String *packet) field->unireg_check != Field::TIMESTAMP_UN_FIELD; has_default= (field->type() != FIELD_TYPE_BLOB && + !(field->flags & NO_DEFAULT_VALUE_FLAG) && field->unireg_check != Field::NEXT_NUMBER && !((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) && has_now_default)); if (has_default) { - packet->append(" default ", 9); + packet->append(STRING_WITH_LEN(" default ")); if (has_now_default) - packet->append("CURRENT_TIMESTAMP",17); + packet->append(STRING_WITH_LEN("CURRENT_TIMESTAMP")); else if (!field->is_null()) { // Not null by default type.set(tmp, sizeof(tmp), field->charset()); @@ -1357,53 +914,52 @@ store_create_info(THD *thd, TABLE *table, String *packet) append_unescaped(packet, def_val.ptr(), def_val.length()); } else - packet->append("''",2); + packet->append(STRING_WITH_LEN("''")); } else if (field->maybe_null()) - packet->append("NULL", 4); // Null as default + packet->append(STRING_WITH_LEN("NULL")); // Null as default else packet->append(tmp); } if (!limited_mysql_mode && table->timestamp_field == field && field->unireg_check != Field::TIMESTAMP_DN_FIELD) - packet->append(" on update CURRENT_TIMESTAMP",28); + packet->append(STRING_WITH_LEN(" on update CURRENT_TIMESTAMP")); if (field->unireg_check == Field::NEXT_NUMBER && !(thd->variables.sql_mode & MODE_NO_FIELD_OPTIONS)) - packet->append(" auto_increment", 15 ); + packet->append(STRING_WITH_LEN(" auto_increment")); if (field->comment.length) { - packet->append(" COMMENT ",9); + packet->append(STRING_WITH_LEN(" COMMENT ")); append_unescaped(packet, field->comment.str, field->comment.length); } } key_info= table->key_info; - file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); bzero((char*) &create_info, sizeof(create_info)); file->update_create_info(&create_info); - primary_key= table->primary_key; + primary_key= share->primary_key; - for (uint i=0 ; i < table->keys ; i++,key_info++) + for (uint i=0 ; i < share->keys ; i++,key_info++) { KEY_PART_INFO *key_part= key_info->key_part; bool found_primary=0; - packet->append(",\n ", 4); + packet->append(STRING_WITH_LEN(",\n ")); if (i == primary_key && !strcmp(key_info->name, primary_key_name)) { found_primary=1; - packet->append("PRIMARY ", 8); + packet->append(STRING_WITH_LEN("PRIMARY ")); } else if (key_info->flags & HA_NOSAME) - packet->append("UNIQUE ", 7); + packet->append(STRING_WITH_LEN("UNIQUE ")); else if (key_info->flags & HA_FULLTEXT) - packet->append("FULLTEXT ", 9); + packet->append(STRING_WITH_LEN("FULLTEXT ")); else if (key_info->flags & HA_SPATIAL) - packet->append("SPATIAL ", 8); - packet->append("KEY ", 4); + packet->append(STRING_WITH_LEN("SPATIAL ")); + packet->append(STRING_WITH_LEN("KEY ")); if (!found_primary) append_identifier(thd, packet, key_info->name, strlen(key_info->name)); @@ -1412,19 +968,19 @@ store_create_info(THD *thd, TABLE *table, String *packet) !limited_mysql_mode && !foreign_db_mode) { if (key_info->algorithm == HA_KEY_ALG_BTREE) - packet->append(" USING BTREE", 12); - + packet->append(STRING_WITH_LEN(" USING BTREE")); + if (key_info->algorithm == HA_KEY_ALG_HASH) - packet->append(" USING HASH", 11); - + packet->append(STRING_WITH_LEN(" USING HASH")); + // +BAR: send USING only in non-default case: non-spatial rtree if ((key_info->algorithm == HA_KEY_ALG_RTREE) && !(key_info->flags & HA_SPATIAL)) - packet->append(" USING RTREE", 12); + packet->append(STRING_WITH_LEN(" USING RTREE")); - // No need to send TYPE FULLTEXT, it is sent as FULLTEXT KEY + // No need to send USING FULLTEXT, it is sent as FULLTEXT KEY } - packet->append(" (", 2); + packet->append(STRING_WITH_LEN(" (")); for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) { @@ -1434,13 +990,13 @@ store_create_info(THD *thd, TABLE *table, String *packet) if (key_part->field) append_identifier(thd,packet,key_part->field->field_name, strlen(key_part->field->field_name)); - if (!key_part->field || + if (key_part->field && (key_part->length != table->field[key_part->fieldnr-1]->key_length() && - !(key_info->flags & HA_FULLTEXT))) + !(key_info->flags & (HA_FULLTEXT | HA_SPATIAL)))) { buff[0] = '('; - char* end=int10_to_str((long) key_part->length / + char* end=int10_to_str((long) key_part->length / key_part->field->charset()->mbmaxlen, buff + 1,10); *end++ = ')'; @@ -1461,15 +1017,15 @@ store_create_info(THD *thd, TABLE *table, String *packet) file->free_foreign_key_create_info(for_str); } - packet->append("\n)", 2); + packet->append(STRING_WITH_LEN("\n)")); if (!(thd->variables.sql_mode & MODE_NO_TABLE_OPTIONS) && !foreign_db_mode) { if (thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) - packet->append(" TYPE=", 6); + packet->append(STRING_WITH_LEN(" TYPE=")); else - packet->append(" ENGINE=", 8); + packet->append(STRING_WITH_LEN(" ENGINE=")); packet->append(file->table_type()); - + /* Add AUTO_INCREMENT=... if there is an AUTO_INCREMENT column, and NEXT_ID > 1 (the default). We must not print the clause @@ -1481,65 +1037,75 @@ store_create_info(THD *thd, TABLE *table, String *packet) but may extrapolate its existence from that of an AUTO_INCREMENT column. */ - if(create_info.auto_increment_value > 1) + if (create_info.auto_increment_value > 1) { + char *end; packet->append(" AUTO_INCREMENT=", 16); end= longlong10_to_str(create_info.auto_increment_value, buff,10); packet->append(buff, (uint) (end - buff)); } + - if (table->table_charset && + if (share->table_charset && !(thd->variables.sql_mode & MODE_MYSQL323) && !(thd->variables.sql_mode & MODE_MYSQL40)) { - packet->append(" DEFAULT CHARSET=", 17); - packet->append(table->table_charset->csname); - if (!(table->table_charset->state & MY_CS_PRIMARY)) + packet->append(STRING_WITH_LEN(" DEFAULT CHARSET=")); + packet->append(share->table_charset->csname); + if (!(share->table_charset->state & MY_CS_PRIMARY)) { - packet->append(" COLLATE=", 9); - packet->append(table->table_charset->name); + packet->append(STRING_WITH_LEN(" COLLATE=")); + packet->append(table->s->table_charset->name); } } - if (table->min_rows) + if (share->min_rows) { - packet->append(" MIN_ROWS=", 10); - end= longlong10_to_str(table->min_rows, buff, 10); + char *end; + packet->append(STRING_WITH_LEN(" MIN_ROWS=")); + end= longlong10_to_str(share->min_rows, buff, 10); packet->append(buff, (uint) (end- buff)); } - if (table->max_rows) + if (share->max_rows && !table_list->schema_table) { - packet->append(" MAX_ROWS=", 10); - end= longlong10_to_str(table->max_rows, buff, 10); + char *end; + packet->append(STRING_WITH_LEN(" MAX_ROWS=")); + end= longlong10_to_str(share->max_rows, buff, 10); packet->append(buff, (uint) (end - buff)); } - if (table->avg_row_length) + if (share->avg_row_length) { - packet->append(" AVG_ROW_LENGTH=", 16); - end= longlong10_to_str(table->avg_row_length, buff,10); + char *end; + packet->append(STRING_WITH_LEN(" AVG_ROW_LENGTH=")); + end= longlong10_to_str(share->avg_row_length, buff,10); packet->append(buff, (uint) (end - buff)); } - if (table->db_create_options & HA_OPTION_PACK_KEYS) - packet->append(" PACK_KEYS=1", 12); - if (table->db_create_options & HA_OPTION_NO_PACK_KEYS) - packet->append(" PACK_KEYS=0", 12); - if (table->db_create_options & HA_OPTION_CHECKSUM) - packet->append(" CHECKSUM=1", 11); - if (table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) - packet->append(" DELAY_KEY_WRITE=1",18); - if (table->row_type != ROW_TYPE_DEFAULT) + if (share->db_create_options & HA_OPTION_PACK_KEYS) + packet->append(STRING_WITH_LEN(" PACK_KEYS=1")); + if (share->db_create_options & HA_OPTION_NO_PACK_KEYS) + packet->append(STRING_WITH_LEN(" PACK_KEYS=0")); + if (share->db_create_options & HA_OPTION_CHECKSUM) + packet->append(STRING_WITH_LEN(" CHECKSUM=1")); + if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE) + packet->append(STRING_WITH_LEN(" DELAY_KEY_WRITE=1")); + if (share->row_type != ROW_TYPE_DEFAULT) { - packet->append(" ROW_FORMAT=",12); - packet->append(ha_row_type[(uint) table->row_type]); + packet->append(STRING_WITH_LEN(" ROW_FORMAT=")); + packet->append(ha_row_type[(uint) share->row_type]); } table->file->append_create_info(packet); - if (table->comment && table->comment[0]) + if (share->comment.length) { - packet->append(" COMMENT=", 9); - append_unescaped(packet, table->comment, strlen(table->comment)); + packet->append(STRING_WITH_LEN(" COMMENT=")); + append_unescaped(packet, share->comment.str, share->comment.length); + } + if (share->connect_string.length) + { + packet->append(STRING_WITH_LEN(" CONNECTION=")); + append_unescaped(packet, share->connect_string.str, share->connect_string.length); } if (file->raid_type) { @@ -1556,6 +1122,132 @@ store_create_info(THD *thd, TABLE *table, String *packet) DBUG_RETURN(0); } +void +view_store_options(THD *thd, TABLE_LIST *table, String *buff) +{ + append_algorithm(table, buff); + append_definer(thd, buff, &table->definer.user, &table->definer.host); + if (table->view_suid) + buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER ")); + else + buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER ")); +} + + +/* + Append DEFINER clause to the given buffer. + + SYNOPSIS + append_definer() + thd [in] thread handle + buffer [inout] buffer to hold DEFINER clause + definer_user [in] user name part of definer + definer_host [in] host name part of definer +*/ + +static void append_algorithm(TABLE_LIST *table, String *buff) +{ + buff->append(STRING_WITH_LEN("ALGORITHM=")); + switch ((int8)table->algorithm) { + case VIEW_ALGORITHM_UNDEFINED: + buff->append(STRING_WITH_LEN("UNDEFINED ")); + break; + case VIEW_ALGORITHM_TMPTABLE: + buff->append(STRING_WITH_LEN("TEMPTABLE ")); + break; + case VIEW_ALGORITHM_MERGE: + buff->append(STRING_WITH_LEN("MERGE ")); + break; + default: + DBUG_ASSERT(0); // never should happen + } +} + + +/* + Append DEFINER clause to the given buffer. + + SYNOPSIS + append_definer() + thd [in] thread handle + buffer [inout] buffer to hold DEFINER clause + definer_user [in] user name part of definer + definer_host [in] host name part of definer +*/ + +void append_definer(THD *thd, String *buffer, const LEX_STRING *definer_user, + const LEX_STRING *definer_host) +{ + buffer->append(STRING_WITH_LEN("DEFINER=")); + append_identifier(thd, buffer, definer_user->str, definer_user->length); + buffer->append('@'); + append_identifier(thd, buffer, definer_host->str, definer_host->length); + buffer->append(' '); +} + + +static int +view_store_create_info(THD *thd, TABLE_LIST *table, String *buff) +{ + my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | + MODE_ORACLE | + MODE_MSSQL | + MODE_DB2 | + MODE_MAXDB | + MODE_ANSI)) != 0; + /* + Compact output format for view can be used + - if user has db of this view as current db + - if this view only references table inside it's own db + */ + if (!thd->db || strcmp(thd->db, table->view_db.str)) + table->compact_view_format= FALSE; + else + { + TABLE_LIST *tbl; + table->compact_view_format= TRUE; + for (tbl= thd->lex->query_tables; + tbl; + tbl= tbl->next_global) + { + if (strcmp(table->view_db.str, tbl->view ? tbl->view_db.str :tbl->db)!= 0) + { + table->compact_view_format= FALSE; + break; + } + } + } + + buff->append(STRING_WITH_LEN("CREATE ")); + if (!foreign_db_mode) + { + view_store_options(thd, table, buff); + } + buff->append(STRING_WITH_LEN("VIEW ")); + if (!table->compact_view_format) + { + append_identifier(thd, buff, table->view_db.str, table->view_db.length); + buff->append('.'); + } + append_identifier(thd, buff, table->view_name.str, table->view_name.length); + buff->append(STRING_WITH_LEN(" AS ")); + + /* + We can't just use table->query, because our SQL_MODE may trigger + a different syntax, like when ANSI_QUOTES is defined. + */ + table->view->unit.print(buff); + + if (table->with_check != VIEW_CHECK_NONE) + { + if (table->with_check == VIEW_CHECK_LOCAL) + buff->append(STRING_WITH_LEN(" WITH LOCAL CHECK OPTION")); + else + buff->append(STRING_WITH_LEN(" WITH CASCADED CHECK OPTION")); + } + return 0; +} + /**************************************************************************** Return info about all processes @@ -1564,9 +1256,13 @@ store_create_info(THD *thd, TABLE *table, String *packet) class thread_info :public ilink { public: - static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } static void operator delete(void *ptr __attribute__((unused)), - size_t size __attribute__((unused))) {} /*lint -e715 */ + size_t size __attribute__((unused))) + { TRASH(ptr, size); } ulong thread_id; time_t start_time; @@ -1575,7 +1271,7 @@ public: char *query; }; -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class I_List<thread_info>; #endif @@ -1589,7 +1285,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) Protocol *protocol= thd->protocol; DBUG_ENTER("mysqld_list_processes"); - field_list.push_back(new Item_int("Id",0,11)); + field_list.push_back(new Item_int("Id", 0, MY_INT32_NUM_DECIMAL_DIGITS)); field_list.push_back(new Item_empty_string("User",16)); field_list.push_back(new Item_empty_string("Host",LIST_PROCESS_HOST_LEN)); field_list.push_back(field=new Item_empty_string("db",NAME_LEN)); @@ -1600,7 +1296,8 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) field->maybe_null=1; field_list.push_back(field=new Item_empty_string("Info",max_query_length)); field->maybe_null=1; - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_VOID_RETURN; VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list @@ -1610,30 +1307,34 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) THD *tmp; while ((tmp=it++)) { + Security_context *tmp_sctx= tmp->security_ctx; struct st_my_thread_var *mysys_var; if ((tmp->vio_ok() || tmp->system_thread) && - (!user || (tmp->user && !strcmp(tmp->user,user)))) + (!user || (tmp_sctx->user && !strcmp(tmp_sctx->user, user)))) { - thread_info *thd_info=new thread_info; + thread_info *thd_info= new thread_info; thd_info->thread_id=tmp->thread_id; - thd_info->user=thd->strdup(tmp->user ? tmp->user : - (tmp->system_thread ? - "system user" : "unauthenticated user")); - if (tmp->peer_port && (tmp->host || tmp->ip) && thd->host_or_ip[0]) + thd_info->user= thd->strdup(tmp_sctx->user ? tmp_sctx->user : + (tmp->system_thread ? + "system user" : "unauthenticated user")); + if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) && + thd->security_ctx->host_or_ip[0]) { if ((thd_info->host= thd->alloc(LIST_PROCESS_HOST_LEN+1))) my_snprintf((char *) thd_info->host, LIST_PROCESS_HOST_LEN, - "%s:%u", tmp->host_or_ip, tmp->peer_port); + "%s:%u", tmp_sctx->host_or_ip, tmp->peer_port); } else - thd_info->host= thd->strdup(tmp->host_or_ip); + thd_info->host= thd->strdup(tmp_sctx->host_or_ip[0] ? + tmp_sctx->host_or_ip : + tmp_sctx->host ? tmp_sctx->host : ""); if ((thd_info->db=tmp->db)) // Safe test thd_info->db=thd->strdup(thd_info->db); thd_info->command=(int) tmp->command; if ((mysys_var= tmp->mysys_var)) pthread_mutex_lock(&mysys_var->mutex); - thd_info->proc_info= (char*) (tmp->killed ? "Killed" : 0); + thd_info->proc_info= (char*) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0); #ifndef EMBEDDED_LIBRARY thd_info->state_info= (char*) (tmp->locked ? "Locked" : tmp->net.reading_or_writing ? @@ -1707,434 +1408,2959 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) Status functions *****************************************************************************/ -static bool write_collation(Protocol *protocol, CHARSET_INFO *cs) + +static bool show_status_array(THD *thd, const char *wild, + show_var_st *variables, + enum enum_var_type value_type, + struct system_status_var *status_var, + const char *prefix, TABLE *table) { - protocol->prepare_for_resend(); - protocol->store(cs->name, system_charset_info); - protocol->store(cs->csname, system_charset_info); - protocol->store_short((longlong) cs->number); - protocol->store((cs->state & MY_CS_PRIMARY) ? "Yes" : "",system_charset_info); - protocol->store((cs->state & MY_CS_COMPILED)? "Yes" : "",system_charset_info); - protocol->store_short((longlong) cs->strxfrm_multiply); - return protocol->write(); + char buff[1024], *prefix_end; + /* the variable name should not be longer then 80 characters */ + char name_buffer[80]; + int len; + LEX_STRING null_lex_str; + DBUG_ENTER("show_status_array"); + + null_lex_str.str= 0; // For sys_var->value_ptr() + null_lex_str.length= 0; + + prefix_end=strnmov(name_buffer, prefix, sizeof(name_buffer)-1); + len=name_buffer + sizeof(name_buffer) - prefix_end; + + for (; variables->name; variables++) + { + strnmov(prefix_end, variables->name, len); + name_buffer[sizeof(name_buffer)-1]=0; /* Safety */ + SHOW_TYPE show_type=variables->type; + if (show_type == SHOW_VARS) + { + show_status_array(thd, wild, (show_var_st *) variables->value, + value_type, status_var, variables->name, table); + } + else + { + if (!(wild && wild[0] && wild_case_compare(system_charset_info, + name_buffer, wild))) + { + char *value=variables->value; + const char *pos, *end; // We assign a lot of const's + long nr; + if (show_type == SHOW_SYS) + { + show_type= ((sys_var*) value)->show_type(); + value= (char*) ((sys_var*) value)->value_ptr(thd, value_type, + &null_lex_str); + } + + pos= end= buff; + switch (show_type) { + case SHOW_LONG_STATUS: + case SHOW_LONG_CONST_STATUS: + value= ((char *) status_var + (ulong) value); + /* fall through */ + case SHOW_LONG: + case SHOW_LONG_CONST: + end= int10_to_str(*(long*) value, buff, 10); + break; + case SHOW_LONGLONG: + end= longlong10_to_str(*(longlong*) value, buff, 10); + break; + case SHOW_HA_ROWS: + end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10); + break; + case SHOW_BOOL: + end= strmov(buff, *(bool*) value ? "ON" : "OFF"); + break; + case SHOW_MY_BOOL: + end= strmov(buff, *(my_bool*) value ? "ON" : "OFF"); + break; + case SHOW_INT_CONST: + case SHOW_INT: + end= int10_to_str((long) *(uint32*) value, buff, 10); + break; + case SHOW_HAVE: + { + SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value; + pos= show_comp_option_name[(int) tmp]; + end= strend(pos); + break; + } + case SHOW_CHAR: + { + if (!(pos= value)) + pos= ""; + end= strend(pos); + break; + } + case SHOW_STARTTIME: + nr= (long) (thd->query_start() - server_start_time); + end= int10_to_str(nr, buff, 10); + break; + case SHOW_QUESTION: + end= int10_to_str((long) thd->query_id, buff, 10); + break; +#ifdef HAVE_REPLICATION + case SHOW_RPL_STATUS: + end= strmov(buff, rpl_status_type[(int)rpl_status]); + break; + case SHOW_SLAVE_RUNNING: + { + pthread_mutex_lock(&LOCK_active_mi); + end= strmov(buff, (active_mi && active_mi->slave_running && + active_mi->rli.slave_running) ? "ON" : "OFF"); + pthread_mutex_unlock(&LOCK_active_mi); + break; + } + case SHOW_SLAVE_RETRIED_TRANS: + { + /* + TODO: in 5.1 with multimaster, have one such counter per line in + SHOW SLAVE STATUS, and have the sum over all lines here. + */ + pthread_mutex_lock(&LOCK_active_mi); + if (active_mi) + { + pthread_mutex_lock(&active_mi->rli.data_lock); + end= int10_to_str(active_mi->rli.retried_trans, buff, 10); + pthread_mutex_unlock(&active_mi->rli.data_lock); + } + pthread_mutex_unlock(&LOCK_active_mi); + break; + } + case SHOW_SLAVE_SKIP_ERRORS: + { + MY_BITMAP *bitmap= (MY_BITMAP *)value; + if (!use_slave_mask || bitmap_is_clear_all(bitmap)) + { + end= strmov(buff, "OFF"); + } + else if (bitmap_is_set_all(bitmap)) + { + end= strmov(buff, "ALL"); + } + else + { + /* 10 is enough assuming errors are max 4 digits */ + int i; + for (i= 1; + i < MAX_SLAVE_ERROR && (uint) (end-buff) < sizeof(buff)-10; + i++) + { + if (bitmap_is_set(bitmap, i)) + { + end= int10_to_str(i, (char*) end, 10); + *(char*) end++= ','; + } + } + if (end != buff) + end--; // Remove last ',' + if (i < MAX_SLAVE_ERROR) + end= strmov((char*) end, "..."); // Couldn't show all errors + } + break; + } +#endif /* HAVE_REPLICATION */ + case SHOW_OPENTABLES: + end= int10_to_str((long) cached_tables(), buff, 10); + break; + case SHOW_CHAR_PTR: + { + if (!(pos= *(char**) value)) + pos= ""; + end= strend(pos); + break; + } + case SHOW_DOUBLE_STATUS: + { + value= ((char *) status_var + (ulong) value); + end= buff + sprintf(buff, "%f", *(double*) value); + break; + } +#ifdef HAVE_OPENSSL + /* First group - functions relying on CTX */ + case SHOW_SSL_CTX_SESS_ACCEPT: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_ACCEPT_GOOD: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept_good(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CONNECT_GOOD: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect_good(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd-> ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CB_HITS: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_cb_hits(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_HITS: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_hits(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CACHE_FULL: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_cache_full(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_MISSES: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_misses(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_TIMEOUTS: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_SESS_NUMBER: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_SESS_CONNECT: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_SESS_GET_CACHE_SIZE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_GET_VERIFY_MODE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_GET_VERIFY_DEPTH: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_GET_SESSION_CACHE_MODE: + if (!ssl_acceptor_fd) + { + pos= "NONE"; + end= pos+4; + break; + } + switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context)) + { + case SSL_SESS_CACHE_OFF: + pos= "OFF"; + break; + case SSL_SESS_CACHE_CLIENT: + pos= "CLIENT"; + break; + case SSL_SESS_CACHE_SERVER: + pos= "SERVER"; + break; + case SSL_SESS_CACHE_BOTH: + pos= "BOTH"; + break; + case SSL_SESS_CACHE_NO_AUTO_CLEAR: + pos= "NO_AUTO_CLEAR"; + break; + case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP: + pos= "NO_INTERNAL_LOOKUP"; + break; + default: + pos= "Unknown"; + break; + } + end= strend(pos); + break; + /* First group - functions relying on SSL */ + case SHOW_SSL_GET_VERSION: + pos= (thd->net.vio->ssl_arg ? + SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); + end= strend(pos); + break; + case SHOW_SSL_SESSION_REUSED: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_session_reused((SSL*) thd->net.vio-> + ssl_arg) : + 0), + buff, 10); + break; + case SHOW_SSL_GET_DEFAULT_TIMEOUT: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_default_timeout((SSL*) thd->net.vio-> + ssl_arg) : + 0), + buff, 10); + break; + case SHOW_SSL_GET_VERIFY_MODE: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_verify_mode((SSL*) thd->net.vio-> + ssl_arg): + 0), + buff, 10); + break; + case SHOW_SSL_GET_VERIFY_DEPTH: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_verify_depth((SSL*) thd->net.vio-> + ssl_arg): + 0), + buff, 10); + break; + case SHOW_SSL_GET_CIPHER: + pos= (thd->net.vio->ssl_arg ? + SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : "" ); + end= strend(pos); + break; + case SHOW_SSL_GET_CIPHER_LIST: + if (thd->net.vio->ssl_arg) + { + char *to= buff; + for (int i=0 ; i++ ;) + { + const char *p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i); + if (p == NULL) + break; + to= strmov(to, p); + *to++= ':'; + } + if (to != buff) + to--; // Remove last ':' + end= to; + } + break; + +#endif /* HAVE_OPENSSL */ + case SHOW_KEY_CACHE_LONG: + case SHOW_KEY_CACHE_CONST_LONG: + value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache; + end= int10_to_str(*(long*) value, buff, 10); + break; + case SHOW_KEY_CACHE_LONGLONG: + value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache; + end= longlong10_to_str(*(longlong*) value, buff, 10); + break; + case SHOW_NET_COMPRESSION: + end= strmov(buff, thd->net.compress ? "ON" : "OFF"); + break; + case SHOW_UNDEF: // Show never happen + case SHOW_SYS: + break; // Return empty string + default: + break; + } + restore_record(table, s->default_values); + table->field[0]->store(name_buffer, strlen(name_buffer), + system_charset_info); + table->field[1]->store(pos, (uint32) (end - pos), system_charset_info); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(TRUE); + } + } + } + + DBUG_RETURN(FALSE); } -int mysqld_show_collations(THD *thd, const char *wild) + +/* collect status for all running threads */ + +void calc_sum_of_all_status(STATUS_VAR *to) { - char buff[8192]; - String packet2(buff,sizeof(buff),thd->charset()); - List<Item> field_list; - CHARSET_INFO **cs; - Protocol *protocol= thd->protocol; + DBUG_ENTER("calc_sum_of_all_status"); + + /* Ensure that thread id not killed during loop */ + VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list + + I_List_iterator<THD> it(threads); + THD *tmp; + + /* Get global values as base */ + *to= global_status_var; + + /* Add to this status from existing threads */ + while ((tmp= it++)) + add_to_status(to, &tmp->status_var); + + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + DBUG_VOID_RETURN; +} + + +LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, + const char* str, uint length, + bool allocate_lex_string) +{ + MEM_ROOT *mem= thd->mem_root; + if (allocate_lex_string) + if (!(lex_str= (LEX_STRING *)thd->alloc(sizeof(LEX_STRING)))) + return 0; + lex_str->str= strmake_root(mem, str, length); + lex_str->length= length; + return lex_str; +} - DBUG_ENTER("mysqld_show_charsets"); - field_list.push_back(new Item_empty_string("Collation",30)); - field_list.push_back(new Item_empty_string("Charset",30)); - field_list.push_back(new Item_return_int("Id",11, FIELD_TYPE_SHORT)); - field_list.push_back(new Item_empty_string("Default",30)); - field_list.push_back(new Item_empty_string("Compiled",30)); - field_list.push_back(new Item_return_int("Sortlen",3, FIELD_TYPE_SHORT)); +/* INFORMATION_SCHEMA name */ +LEX_STRING information_schema_name= {(char*)"information_schema", 18}; - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(1); +/* This is only used internally, but we need it here as a forward reference */ +extern ST_SCHEMA_TABLE schema_tables[]; + +typedef struct st_index_field_values +{ + const char *db_value, *table_value; +} INDEX_FIELD_VALUES; - for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + +/* + Store record to I_S table, convert HEAP table + to MyISAM if necessary + + SYNOPSIS + schema_table_store_record() + thd thread handler + table Information schema table to be updated + + RETURN + 0 success + 1 error +*/ + +static bool schema_table_store_record(THD *thd, TABLE *table) +{ + int error; + if ((error= table->file->write_row(table->record[0]))) { - CHARSET_INFO **cl; - if (!cs[0] || !(cs[0]->state & MY_CS_AVAILABLE) || - !(cs[0]->state & MY_CS_PRIMARY)) + if (create_myisam_from_heap(thd, table, + table->pos_in_table_list->schema_table_param, + error, 0)) + return 1; + } + return 0; +} + + +void get_index_field_values(LEX *lex, INDEX_FIELD_VALUES *index_field_values) +{ + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + switch (lex->orig_sql_command) { + case SQLCOM_SHOW_DATABASES: + index_field_values->db_value= wild; + break; + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_TABLE_STATUS: + case SQLCOM_SHOW_TRIGGERS: + index_field_values->db_value= lex->select_lex.db; + index_field_values->table_value= wild; + break; + default: + index_field_values->db_value= NullS; + index_field_values->table_value= NullS; + break; + } +} + + +int make_table_list(THD *thd, SELECT_LEX *sel, + char *db, char *table) +{ + Table_ident *table_ident; + LEX_STRING ident_db, ident_table; + ident_db.str= db; + ident_db.length= strlen(db); + ident_table.str= table; + ident_table.length= strlen(table); + table_ident= new Table_ident(thd, ident_db, ident_table, 1); + sel->init_query(); + if (!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, + (List<String> *) 0, (List<String> *) 0)) + return 1; + return 0; +} + + +bool uses_only_table_name_fields(Item *item, TABLE_LIST *table) +{ + if (item->type() == Item::FUNC_ITEM) + { + Item_func *item_func= (Item_func*)item; + Item **child; + Item **item_end= (item_func->arguments()) + item_func->argument_count(); + for (child= item_func->arguments(); child != item_end; child++) + { + if (!uses_only_table_name_fields(*child, table)) + return 0; + } + } + else if (item->type() == Item::FIELD_ITEM) + { + Item_field *item_field= (Item_field*)item; + CHARSET_INFO *cs= system_charset_info; + ST_SCHEMA_TABLE *schema_table= table->schema_table; + ST_FIELD_INFO *field_info= schema_table->fields_info; + const char *field_name1= schema_table->idx_field1 >= 0 ? field_info[schema_table->idx_field1].field_name : ""; + const char *field_name2= schema_table->idx_field2 >= 0 ? field_info[schema_table->idx_field2].field_name : ""; + if (table->table != item_field->field->table || + (cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1), + (uchar *) item_field->field_name, + strlen(item_field->field_name), 0) && + cs->coll->strnncollsp(cs, (uchar *) field_name2, strlen(field_name2), + (uchar *) item_field->field_name, + strlen(item_field->field_name), 0))) + return 0; + } + else if (item->type() == Item::REF_ITEM) + return uses_only_table_name_fields(item->real_item(), table); + if (item->type() == Item::SUBSELECT_ITEM && + !item->const_item()) + return 0; + + return 1; +} + + +static COND * make_cond_for_info_schema(COND *cond, TABLE_LIST *table) +{ + if (!cond) + return (COND*) 0; + if (cond->type() == Item::COND_ITEM) + { + if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + { + /* Create new top level AND item */ + Item_cond_and *new_cond=new Item_cond_and; + if (!new_cond) + return (COND*) 0; + List_iterator<Item> li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item=li++)) + { + Item *fix= make_cond_for_info_schema(item, table); + if (fix) + new_cond->argument_list()->push_back(fix); + } + switch (new_cond->argument_list()->elements) { + case 0: + return (COND*) 0; + case 1: + return new_cond->argument_list()->head(); + default: + new_cond->quick_fix_field(); + return new_cond; + } + } + else + { // Or list + Item_cond_or *new_cond=new Item_cond_or; + if (!new_cond) + return (COND*) 0; + List_iterator<Item> li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item=li++)) + { + Item *fix=make_cond_for_info_schema(item, table); + if (!fix) + return (COND*) 0; + new_cond->argument_list()->push_back(fix); + } + new_cond->quick_fix_field(); + new_cond->top_level_item(); + return new_cond; + } + } + + if (!uses_only_table_name_fields(cond, table)) + return (COND*) 0; + return cond; +} + + +enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table) +{ + return (enum enum_schema_tables) (schema_table - &schema_tables[0]); +} + + +/* + Create db names list. Information schema name always is first in list + + SYNOPSIS + make_db_list() + thd thread handler + files list of db names + wild wild string + idx_field_vals idx_field_vals->db_name contains db name or + wild string + with_i_schema returns 1 if we added 'IS' name to list + otherwise returns 0 + is_wild_value if value is 1 then idx_field_vals->db_name is + wild string otherwise it's db name; + + RETURN + zero success + non-zero error +*/ + +int make_db_list(THD *thd, List<char> *files, + INDEX_FIELD_VALUES *idx_field_vals, + bool *with_i_schema, bool is_wild_value) +{ + LEX *lex= thd->lex; + *with_i_schema= 0; + get_index_field_values(lex, idx_field_vals); + if (is_wild_value) + { + /* + This part of code is only for SHOW DATABASES command. + idx_field_vals->db_value can be 0 when we don't use + LIKE clause (see also get_index_field_values() function) + */ + if (!idx_field_vals->db_value || + !wild_case_compare(system_charset_info, + information_schema_name.str, + idx_field_vals->db_value)) + { + *with_i_schema= 1; + if (files->push_back(thd->strdup(information_schema_name.str))) + return 1; + } + return (find_files(thd, files, NullS, mysql_data_home, + idx_field_vals->db_value, 1) != FIND_FILES_OK); + } + + /* + This part of code is for SHOW TABLES, SHOW TABLE STATUS commands. + idx_field_vals->db_value can't be 0 (see get_index_field_values() + function). lex->orig_sql_command can be not equal to SQLCOM_END + only in case of executing of SHOW commands. + */ + if (lex->orig_sql_command != SQLCOM_END) + { + if (!my_strcasecmp(system_charset_info, information_schema_name.str, + idx_field_vals->db_value)) + { + *with_i_schema= 1; + return files->push_back(thd->strdup(information_schema_name.str)); + } + return files->push_back(thd->strdup(idx_field_vals->db_value)); + } + + /* + Create list of existing databases. It is used in case + of select from information schema table + */ + if (files->push_back(thd->strdup(information_schema_name.str))) + return 1; + *with_i_schema= 1; + return (find_files(thd, files, NullS, + mysql_data_home, NullS, 1) != FIND_FILES_OK); +} + + +int schema_tables_add(THD *thd, List<char> *files, const char *wild) +{ + ST_SCHEMA_TABLE *tmp_schema_table= schema_tables; + for (; tmp_schema_table->table_name; tmp_schema_table++) + { + if (tmp_schema_table->hidden) continue; - for ( cl= all_charsets; cl < all_charsets+255 ;cl ++) + if (wild) { - if (!cl[0] || !(cl[0]->state & MY_CS_AVAILABLE) || - !my_charset_same(cs[0],cl[0])) - continue; - if (!(wild && wild[0] && - wild_case_compare(system_charset_info,cl[0]->name,wild))) + if (lower_case_table_names) { - if (write_collation(protocol, cl[0])) - goto err; + if (wild_case_compare(files_charset_info, + tmp_schema_table->table_name, + wild)) + continue; } + else if (wild_compare(tmp_schema_table->table_name, wild, 0)) + continue; } + if (files->push_back(thd->strdup(tmp_schema_table->table_name))) + return 1; } - send_eof(thd); - DBUG_RETURN(0); -err: - DBUG_RETURN(1); + return 0; } -static bool write_charset(Protocol *protocol, CHARSET_INFO *cs) + +int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) { - protocol->prepare_for_resend(); - protocol->store(cs->csname, system_charset_info); - protocol->store(cs->comment ? cs->comment : "", system_charset_info); - protocol->store(cs->name, system_charset_info); - protocol->store_short((longlong) cs->mbmaxlen); - return protocol->write(); + LEX *lex= thd->lex; + TABLE *table= tables->table; + SELECT_LEX *select_lex= &lex->select_lex; + SELECT_LEX *old_all_select_lex= lex->all_selects_list; + enum_sql_command save_sql_command= lex->sql_command; + SELECT_LEX *lsel= tables->schema_select_lex; + ST_SCHEMA_TABLE *schema_table= tables->schema_table; + SELECT_LEX sel; + INDEX_FIELD_VALUES idx_field_vals; + char path[FN_REFLEN], *end, *base_name, *orig_base_name, *file_name; + uint len; + bool with_i_schema; + enum enum_schema_tables schema_table_idx; + List<char> bases; + List_iterator_fast<char> it(bases); + COND *partial_cond; + uint derived_tables= lex->derived_tables; + int error= 1; + db_type not_used; + Open_tables_state open_tables_state_backup; + bool save_view_prepare_mode= lex->view_prepare_mode; + Query_tables_list query_tables_list_backup; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *sctx= thd->security_ctx; +#endif + DBUG_ENTER("get_all_tables"); + + LINT_INIT(end); + LINT_INIT(len); + + lex->view_prepare_mode= TRUE; + lex->reset_n_backup_query_tables_list(&query_tables_list_backup); + + /* + We should not introduce deadlocks even if we already have some + tables open and locked, since we won't lock tables which we will + open and will ignore possible name-locks for these tables. + */ + thd->reset_n_backup_open_tables_state(&open_tables_state_backup); + + if (lsel) + { + TABLE_LIST *show_table_list= (TABLE_LIST*) lsel->table_list.first; + bool res; + + lex->all_selects_list= lsel; + /* + Restore thd->temporary_tables to be able to process + temporary tables(only for 'show index' & 'show columns'). + This should be changed when processing of temporary tables for + I_S tables will be done. + */ + thd->temporary_tables= open_tables_state_backup.temporary_tables; + /* + Let us set fake sql_command so views won't try to merge + themselves into main statement. If we don't do this, + SELECT * from information_schema.xxxx will cause problems. + SQLCOM_SHOW_FIELDS is used because it satisfies 'only_view_structure()' + */ + lex->sql_command= SQLCOM_SHOW_FIELDS; + res= open_normal_and_derived_tables(thd, show_table_list, + MYSQL_LOCK_IGNORE_FLUSH); + lex->sql_command= save_sql_command; + /* + get_all_tables() returns 1 on failure and 0 on success thus + return only these and not the result code of ::process_table() + + We should use show_table_list->alias instead of + show_table_list->table_name because table_name + could be changed during opening of I_S tables. It's safe + to use alias because alias contains original table name + in this case(this part of code is used only for + 'show columns' & 'show statistics' commands). + */ + error= test(schema_table->process_table(thd, show_table_list, + table, res, + (show_table_list->view ? + show_table_list->view_db.str : + show_table_list->db), + show_table_list->alias)); + thd->temporary_tables= 0; + close_tables_for_reopen(thd, &show_table_list); + goto err; + } + + schema_table_idx= get_schema_table_idx(schema_table); + + if (make_db_list(thd, &bases, &idx_field_vals, + &with_i_schema, 0)) + goto err; + + partial_cond= make_cond_for_info_schema(cond, tables); + it.rewind(); /* To get access to new elements in basis list */ + while ((orig_base_name= base_name= it++) || + /* + generate error for non existing database. + (to save old behaviour for SHOW TABLES FROM db) + */ + ((lex->orig_sql_command == SQLCOM_SHOW_TABLES || + lex->orig_sql_command == SQLCOM_SHOW_TABLE_STATUS) && + (base_name= select_lex->db) && !bases.elements)) + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (!check_access(thd,SELECT_ACL, base_name, + &thd->col_access, 0, 1, with_i_schema) || + sctx->master_access & (DB_ACLS | SHOW_DB_ACL) || + acl_get(sctx->host, sctx->ip, sctx->priv_user, base_name,0) || + (grant_option && !check_grant_db(thd, base_name))) +#endif + { + List<char> files; + if (with_i_schema) // information schema table names + { + if (schema_tables_add(thd, &files, idx_field_vals.table_value)) + goto err; + } + else + { + strxmov(path, mysql_data_home, "/", base_name, NullS); + end= path + (len= unpack_dirname(path,path)); + len= FN_LEN - len; + find_files_result res= find_files(thd, &files, base_name, + path, idx_field_vals.table_value, 0); + if (res != FIND_FILES_OK) + { + /* + Downgrade errors about problems with database directory to + warnings if this is not a 'SHOW' command. Another thread + may have dropped database, and we may still have a name + for that directory. + */ + if (res == FIND_FILES_DIR && lex->orig_sql_command == SQLCOM_END) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + continue; + } + else + { + goto err; + } + } + if (lower_case_table_names) + orig_base_name= thd->strdup(base_name); + } + + List_iterator_fast<char> it_files(files); + while ((file_name= it_files++)) + { + restore_record(table, s->default_values); + table->field[schema_table->idx_field1]-> + store(base_name, strlen(base_name), system_charset_info); + table->field[schema_table->idx_field2]-> + store(file_name, strlen(file_name),system_charset_info); + if (!partial_cond || partial_cond->val_int()) + { + if (schema_table_idx == SCH_TABLE_NAMES) + { + if (lex->verbose || lex->orig_sql_command == SQLCOM_END) + { + if (with_i_schema) + { + table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), + system_charset_info); + } + else + { + my_snprintf(end, len, "/%s%s", file_name, reg_ext); + switch (mysql_frm_type(thd, path, ¬_used)) { + case FRMTYPE_ERROR: + table->field[3]->store(STRING_WITH_LEN("ERROR"), + system_charset_info); + break; + case FRMTYPE_TABLE: + table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), + system_charset_info); + break; + case FRMTYPE_VIEW: + table->field[3]->store(STRING_WITH_LEN("VIEW"), + system_charset_info); + break; + default: + DBUG_ASSERT(0); + } + } + } + if (schema_table_store_record(thd, table)) + goto err; + } + else + { + int res; + /* + Set the parent lex of 'sel' because it is needed by sel.init_query() + which is called inside make_table_list. + */ + sel.parent_lex= lex; + if (make_table_list(thd, &sel, base_name, file_name)) + goto err; + TABLE_LIST *show_table_list= (TABLE_LIST*) sel.table_list.first; + lex->all_selects_list= &sel; + lex->derived_tables= 0; + lex->sql_command= SQLCOM_SHOW_FIELDS; + res= open_normal_and_derived_tables(thd, show_table_list, + MYSQL_LOCK_IGNORE_FLUSH); + lex->sql_command= save_sql_command; + /* + We should use show_table_list->alias instead of + show_table_list->table_name because table_name + could be changed during opening of I_S tables. It's safe + to use alias because alias contains original table name + in this case. + */ + res= schema_table->process_table(thd, show_table_list, table, + res, orig_base_name, + show_table_list->alias); + close_tables_for_reopen(thd, &show_table_list); + DBUG_ASSERT(!lex->query_tables_own_last); + if (res) + goto err; + } + } + } + /* + If we have information schema its always the first table and only + the first table. Reset for other tables. + */ + with_i_schema= 0; + } + } + + error= 0; +err: + thd->restore_backup_open_tables_state(&open_tables_state_backup); + lex->restore_backup_query_tables_list(&query_tables_list_backup); + lex->derived_tables= derived_tables; + lex->all_selects_list= old_all_select_lex; + lex->view_prepare_mode= save_view_prepare_mode; + lex->sql_command= save_sql_command; + DBUG_RETURN(error); } -int mysqld_show_charsets(THD *thd, const char *wild) + +bool store_schema_shemata(THD* thd, TABLE *table, const char *db_name, + CHARSET_INFO *cs) { - char buff[8192]; - String packet2(buff,sizeof(buff),thd->charset()); - List<Item> field_list; - CHARSET_INFO **cs; - Protocol *protocol= thd->protocol; + restore_record(table, s->default_values); + table->field[1]->store(db_name, strlen(db_name), system_charset_info); + table->field[2]->store(cs->csname, strlen(cs->csname), system_charset_info); + table->field[3]->store(cs->name, strlen(cs->name), system_charset_info); + return schema_table_store_record(thd, table); +} + - DBUG_ENTER("mysqld_show_charsets"); +int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond) +{ + /* + TODO: fill_schema_shemata() is called when new client is connected. + Returning error status in this case leads to client hangup. + */ - field_list.push_back(new Item_empty_string("Charset",30)); - field_list.push_back(new Item_empty_string("Description",60)); - field_list.push_back(new Item_empty_string("Default collation",60)); - field_list.push_back(new Item_return_int("Maxlen",3, FIELD_TYPE_SHORT)); + INDEX_FIELD_VALUES idx_field_vals; + List<char> files; + char *file_name; + bool with_i_schema; + HA_CREATE_INFO create; + TABLE *table= tables->table; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *sctx= thd->security_ctx; +#endif + DBUG_ENTER("fill_schema_shemata"); - if (protocol->send_fields(&field_list, 1)) + if (make_db_list(thd, &files, &idx_field_vals, + &with_i_schema, 1)) DBUG_RETURN(1); - for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + List_iterator_fast<char> it(files); + while ((file_name=it++)) { - if (cs[0] && (cs[0]->state & MY_CS_PRIMARY) && - (cs[0]->state & MY_CS_AVAILABLE) && - !(wild && wild[0] && - wild_case_compare(system_charset_info,cs[0]->csname,wild))) + if (with_i_schema) // information schema name is always first in list { - if (write_charset(protocol, cs[0])) - goto err; + if (store_schema_shemata(thd, table, file_name, + system_charset_info)) + DBUG_RETURN(1); + with_i_schema= 0; + continue; + } +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (sctx->master_access & (DB_ACLS | SHOW_DB_ACL) || + acl_get(sctx->host, sctx->ip, sctx->priv_user, file_name,0) || + (grant_option && !check_grant_db(thd, file_name))) +#endif + { + load_db_opt_by_name(thd, file_name, &create); + + if (store_schema_shemata(thd, table, file_name, + create.default_table_charset)) + DBUG_RETURN(1); } } - send_eof(thd); DBUG_RETURN(0); -err: - DBUG_RETURN(1); } - -int mysqld_show(THD *thd, const char *wild, show_var_st *variables, - enum enum_var_type value_type, - pthread_mutex_t *mutex) +static int get_schema_tables_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) { - char buff[1024]; - List<Item> field_list; - Protocol *protocol= thd->protocol; - LEX_STRING null_lex_str; - DBUG_ENTER("mysqld_show"); + const char *tmp_buff; + TIME time; + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("get_schema_tables_record"); - field_list.push_back(new Item_empty_string("Variable_name",30)); - field_list.push_back(new Item_empty_string("Value",256)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); /* purecov: inspected */ - null_lex_str.str= 0; // For sys_var->value_ptr() - null_lex_str.length= 0; + restore_record(table, s->default_values); + table->field[1]->store(base_name, strlen(base_name), cs); + table->field[2]->store(file_name, strlen(file_name), cs); + if (res) + { + /* + there was errors during opening tables + */ + const char *error= thd->net.last_error; + if (tables->view) + table->field[3]->store(STRING_WITH_LEN("VIEW"), cs); + else if (tables->schema_table) + table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs); + else + table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs); + table->field[20]->store(error, strlen(error), cs); + thd->clear_error(); + } + else if (tables->view) + { + table->field[3]->store(STRING_WITH_LEN("VIEW"), cs); + table->field[20]->store(STRING_WITH_LEN("VIEW"), cs); + } + else + { + TABLE *show_table= tables->table; + TABLE_SHARE *share= show_table->s; + handler *file= show_table->file; + + file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_AUTO | + HA_STATUS_NO_LOCK); + if (share->tmp_table == SYSTEM_TMP_TABLE) + table->field[3]->store(STRING_WITH_LEN("SYSTEM VIEW"), cs); + else if (share->tmp_table) + table->field[3]->store(STRING_WITH_LEN("LOCAL TEMPORARY"), cs); + else + table->field[3]->store(STRING_WITH_LEN("BASE TABLE"), cs); - pthread_mutex_lock(mutex); - for (; variables->name; variables++) + for (int i= 4; i < 20; i++) + { + if (i == 7 || (i > 12 && i < 17) || i == 18) + continue; + table->field[i]->set_notnull(); + } + tmp_buff= file->table_type(); + table->field[4]->store(tmp_buff, strlen(tmp_buff), cs); + table->field[5]->store((longlong) share->frm_version, TRUE); + enum row_type row_type = file->get_row_type(); + switch (row_type) { + case ROW_TYPE_NOT_USED: + case ROW_TYPE_DEFAULT: + tmp_buff= ((share->db_options_in_use & + HA_OPTION_COMPRESS_RECORD) ? "Compressed" : + (share->db_options_in_use & HA_OPTION_PACK_RECORD) ? + "Dynamic" : "Fixed"); + break; + case ROW_TYPE_FIXED: + tmp_buff= "Fixed"; + break; + case ROW_TYPE_DYNAMIC: + tmp_buff= "Dynamic"; + break; + case ROW_TYPE_COMPRESSED: + tmp_buff= "Compressed"; + break; + case ROW_TYPE_REDUNDANT: + tmp_buff= "Redundant"; + break; + case ROW_TYPE_COMPACT: + tmp_buff= "Compact"; + break; + } + table->field[6]->store(tmp_buff, strlen(tmp_buff), cs); + if (!tables->schema_table) + { + table->field[7]->store((longlong) file->records, TRUE); + table->field[7]->set_notnull(); + } + table->field[8]->store((longlong) file->mean_rec_length, TRUE); + table->field[9]->store((longlong) file->data_file_length, TRUE); + if (file->max_data_file_length) + { + table->field[10]->store((longlong) file->max_data_file_length, TRUE); + } + table->field[11]->store((longlong) file->index_file_length, TRUE); + table->field[12]->store((longlong) file->delete_length, TRUE); + if (show_table->found_next_number_field) + { + table->field[13]->store((longlong) file->auto_increment_value, TRUE); + table->field[13]->set_notnull(); + } + if (file->create_time) + { + thd->variables.time_zone->gmt_sec_to_TIME(&time, + (my_time_t) file->create_time); + table->field[14]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + table->field[14]->set_notnull(); + } + if (file->update_time) + { + thd->variables.time_zone->gmt_sec_to_TIME(&time, + (my_time_t) file->update_time); + table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + table->field[15]->set_notnull(); + } + if (file->check_time) + { + thd->variables.time_zone->gmt_sec_to_TIME(&time, + (my_time_t) file->check_time); + table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + table->field[16]->set_notnull(); + } + tmp_buff= (share->table_charset ? + share->table_charset->name : "default"); + table->field[17]->store(tmp_buff, strlen(tmp_buff), cs); + if (file->table_flags() & (ulong) HA_HAS_CHECKSUM) + { + table->field[18]->store((longlong) file->checksum(), TRUE); + table->field[18]->set_notnull(); + } + + char option_buff[350],*ptr; + ptr=option_buff; + if (share->min_rows) + { + ptr=strmov(ptr," min_rows="); + ptr=longlong10_to_str(share->min_rows,ptr,10); + } + if (share->max_rows) + { + ptr=strmov(ptr," max_rows="); + ptr=longlong10_to_str(share->max_rows,ptr,10); + } + if (share->avg_row_length) + { + ptr=strmov(ptr," avg_row_length="); + ptr=longlong10_to_str(share->avg_row_length,ptr,10); + } + if (share->db_create_options & HA_OPTION_PACK_KEYS) + ptr=strmov(ptr," pack_keys=1"); + if (share->db_create_options & HA_OPTION_NO_PACK_KEYS) + ptr=strmov(ptr," pack_keys=0"); + if (share->db_create_options & HA_OPTION_CHECKSUM) + ptr=strmov(ptr," checksum=1"); + if (share->db_create_options & HA_OPTION_DELAY_KEY_WRITE) + ptr=strmov(ptr," delay_key_write=1"); + if (share->row_type != ROW_TYPE_DEFAULT) + ptr=strxmov(ptr, " row_format=", + ha_row_type[(uint) share->row_type], + NullS); + if (file->raid_type) + { + char buff[100]; + my_snprintf(buff,sizeof(buff), + " raid_type=%s raid_chunks=%d raid_chunksize=%ld", + my_raid_type(file->raid_type), file->raid_chunks, + file->raid_chunksize/RAID_BLOCK_SIZE); + ptr=strmov(ptr,buff); + } + table->field[19]->store(option_buff+1, + (ptr == option_buff ? 0 : + (uint) (ptr-option_buff)-1), cs); + { + char *comment; + comment= show_table->file->update_table_comment(share->comment.str); + if (comment) + { + table->field[20]->store(comment, + (comment == share->comment.str ? + share->comment.length : + strlen(comment)), cs); + if (comment != share->comment.str) + my_free(comment, MYF(0)); + } + } + } + DBUG_RETURN(schema_table_store_record(thd, table)); +} + + +static int get_schema_column_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + LEX *lex= thd->lex; + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + CHARSET_INFO *cs= system_charset_info; + TABLE *show_table; + handler *file; + Field **ptr,*field; + int count; + uint base_name_length, file_name_length; + DBUG_ENTER("get_schema_column_record"); + + if (res) { - if (!(wild && wild[0] && wild_case_compare(system_charset_info, - variables->name,wild))) + if (lex->orig_sql_command != SQLCOM_SHOW_FIELDS) { - protocol->prepare_for_resend(); - protocol->store(variables->name, system_charset_info); - SHOW_TYPE show_type=variables->type; - char *value=variables->value; - const char *pos, *end; - long nr; + /* + I.e. we are in SELECT FROM INFORMATION_SCHEMA.COLUMS + rather than in SHOW COLUMNS + */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + res= 0; + } + DBUG_RETURN(res); + } + + show_table= tables->table; + file= show_table->file; + count= 0; + file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); + restore_record(show_table, s->default_values); + base_name_length= strlen(base_name); + file_name_length= strlen(file_name); + + for (ptr=show_table->field; (field= *ptr) ; ptr++) + { + const char *tmp_buff; + byte *pos; + bool is_blob; + uint flags=field->flags; + char tmp[MAX_FIELD_WIDTH]; + char tmp1[MAX_FIELD_WIDTH]; + String type(tmp,sizeof(tmp), system_charset_info); + char *end; + int decimals, field_length; + + if (wild && wild[0] && + wild_case_compare(system_charset_info, field->field_name,wild)) + continue; + + flags= field->flags; + count++; + /* Get default row, with all NULL fields set to NULL */ + restore_record(table, s->default_values); - if (show_type == SHOW_SYS) +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint col_access; + check_access(thd,SELECT_ACL | EXTRA_ACL, base_name, + &tables->grant.privilege, 0, 0, test(tables->schema_table)); + col_access= get_column_grant(thd, &tables->grant, + base_name, file_name, + field->field_name) & COL_ACLS; + if (lex->orig_sql_command != SQLCOM_SHOW_FIELDS && + !tables->schema_table && !col_access) + continue; + end= tmp; + for (uint bitnr=0; col_access ; col_access>>=1,bitnr++) + { + if (col_access & 1) { - show_type= ((sys_var*) value)->type(); - value= (char*) ((sys_var*) value)->value_ptr(thd, value_type, - &null_lex_str); + *end++=','; + end=strmov(end,grant_types.type_names[bitnr]); } + } + table->field[17]->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), cs); - pos= end= buff; - switch (show_type) { - case SHOW_LONG: - case SHOW_LONG_CONST: - end= int10_to_str(*(long*) value, buff, 10); - break; - case SHOW_LONGLONG: - end= longlong10_to_str(*(longlong*) value, buff, 10); - break; - case SHOW_HA_ROWS: - end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10); - break; - case SHOW_BOOL: - end= strmov(buff, *(bool*) value ? "ON" : "OFF"); - break; - case SHOW_MY_BOOL: - end= strmov(buff, *(my_bool*) value ? "ON" : "OFF"); - break; - case SHOW_INT_CONST: - case SHOW_INT: - end= int10_to_str((long) *(uint32*) value, buff, 10); - break; - case SHOW_HAVE: +#endif + table->field[1]->store(base_name, base_name_length, cs); + table->field[2]->store(file_name, file_name_length, cs); + table->field[3]->store(field->field_name, strlen(field->field_name), + cs); + table->field[4]->store((longlong) count, TRUE); + field->sql_type(type); + table->field[14]->store(type.ptr(), type.length(), cs); + tmp_buff= strchr(type.ptr(), '('); + table->field[7]->store(type.ptr(), + (tmp_buff ? tmp_buff - type.ptr() : + type.length()), cs); + if (show_table->timestamp_field == field && + field->unireg_check != Field::TIMESTAMP_UN_FIELD) + { + table->field[5]->store(STRING_WITH_LEN("CURRENT_TIMESTAMP"), cs); + table->field[5]->set_notnull(); + } + else if (field->unireg_check != Field::NEXT_NUMBER && + !field->is_null() && + !(field->flags & NO_DEFAULT_VALUE_FLAG)) + { + String def(tmp1,sizeof(tmp1), cs); + type.set(tmp, sizeof(tmp), field->charset()); + field->val_str(&type); + uint dummy_errors; + def.copy(type.ptr(), type.length(), type.charset(), cs, &dummy_errors); + table->field[5]->store(def.ptr(), def.length(), def.charset()); + table->field[5]->set_notnull(); + } + else if (field->unireg_check == Field::NEXT_NUMBER || + lex->orig_sql_command != SQLCOM_SHOW_FIELDS || + field->maybe_null()) + table->field[5]->set_null(); // Null as default + else + { + table->field[5]->store("",0, cs); + table->field[5]->set_notnull(); + } + pos=(byte*) ((flags & NOT_NULL_FLAG) ? "NO" : "YES"); + table->field[6]->store((const char*) pos, + strlen((const char*) pos), cs); + is_blob= (field->type() == FIELD_TYPE_BLOB); + if (field->has_charset() || is_blob || + field->real_type() == MYSQL_TYPE_VARCHAR || // For varbinary type + field->real_type() == MYSQL_TYPE_STRING) // For binary type + { + uint32 octet_max_length= field->max_display_length(); + if (is_blob && octet_max_length != (uint32) 4294967295U) + octet_max_length /= field->charset()->mbmaxlen; + longlong char_max_len= is_blob ? + (longlong) octet_max_length / field->charset()->mbminlen : + (longlong) octet_max_length / field->charset()->mbmaxlen; + table->field[8]->store(char_max_len, TRUE); + table->field[8]->set_notnull(); + table->field[9]->store((longlong) octet_max_length, TRUE); + table->field[9]->set_notnull(); + } + + /* + Calculate field_length and decimals. + They are set to -1 if they should not be set (we should return NULL) + */ + + decimals= field->decimals(); + switch (field->type()) { + case FIELD_TYPE_NEWDECIMAL: + field_length= ((Field_new_decimal*) field)->precision; + break; + case FIELD_TYPE_DECIMAL: + field_length= field->field_length - (decimals ? 2 : 1); + break; + case FIELD_TYPE_TINY: + case FIELD_TYPE_SHORT: + case FIELD_TYPE_LONG: + case FIELD_TYPE_LONGLONG: + case FIELD_TYPE_INT24: + field_length= field->max_display_length() - 1; + break; + case FIELD_TYPE_BIT: + field_length= field->max_display_length(); + decimals= -1; // return NULL + break; + case FIELD_TYPE_FLOAT: + case FIELD_TYPE_DOUBLE: + field_length= field->field_length; + if (decimals == NOT_FIXED_DEC) + decimals= -1; // return NULL + break; + default: + field_length= decimals= -1; + break; + } + + if (field_length >= 0) + { + table->field[10]->store((longlong) field_length, TRUE); + table->field[10]->set_notnull(); + } + if (decimals >= 0) + { + table->field[11]->store((longlong) decimals, TRUE); + table->field[11]->set_notnull(); + } + + if (field->has_charset()) + { + pos=(byte*) field->charset()->csname; + table->field[12]->store((const char*) pos, + strlen((const char*) pos), cs); + table->field[12]->set_notnull(); + pos=(byte*) field->charset()->name; + table->field[13]->store((const char*) pos, + strlen((const char*) pos), cs); + table->field[13]->set_notnull(); + } + pos=(byte*) ((field->flags & PRI_KEY_FLAG) ? "PRI" : + (field->flags & UNIQUE_KEY_FLAG) ? "UNI" : + (field->flags & MULTIPLE_KEY_FLAG) ? "MUL":""); + table->field[15]->store((const char*) pos, + strlen((const char*) pos), cs); + + end= tmp; + if (field->unireg_check == Field::NEXT_NUMBER) + end=strmov(tmp,"auto_increment"); + table->field[16]->store(tmp, (uint) (end-tmp), cs); + + table->field[18]->store(field->comment.str, field->comment.length, cs); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + + +int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond) +{ + CHARSET_INFO **cs; + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + TABLE *table= tables->table; + CHARSET_INFO *scs= system_charset_info; + + for (cs= all_charsets ; cs < all_charsets+255 ; cs++) + { + CHARSET_INFO *tmp_cs= cs[0]; + if (tmp_cs && (tmp_cs->state & MY_CS_PRIMARY) && + (tmp_cs->state & MY_CS_AVAILABLE) && + !(wild && wild[0] && + wild_case_compare(scs, tmp_cs->csname,wild))) + { + const char *comment; + restore_record(table, s->default_values); + table->field[0]->store(tmp_cs->csname, strlen(tmp_cs->csname), scs); + table->field[1]->store(tmp_cs->name, strlen(tmp_cs->name), scs); + comment= tmp_cs->comment ? tmp_cs->comment : ""; + table->field[2]->store(comment, strlen(comment), scs); + table->field[3]->store((longlong) tmp_cs->mbmaxlen, TRUE); + if (schema_table_store_record(thd, table)) + return 1; + } + } + return 0; +} + + +int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond) +{ + CHARSET_INFO **cs; + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + TABLE *table= tables->table; + CHARSET_INFO *scs= system_charset_info; + for (cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + { + CHARSET_INFO **cl; + CHARSET_INFO *tmp_cs= cs[0]; + if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) || + !(tmp_cs->state & MY_CS_PRIMARY)) + continue; + for (cl= all_charsets; cl < all_charsets+255 ;cl ++) + { + CHARSET_INFO *tmp_cl= cl[0]; + if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) || + !my_charset_same(tmp_cs, tmp_cl)) + continue; + if (!(wild && wild[0] && + wild_case_compare(scs, tmp_cl->name,wild))) { - SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value; - pos= show_comp_option_name[(int) tmp]; - end= strend(pos); - break; + const char *tmp_buff; + restore_record(table, s->default_values); + table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); + table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); + table->field[2]->store((longlong) tmp_cl->number, TRUE); + tmp_buff= (tmp_cl->state & MY_CS_PRIMARY) ? "Yes" : ""; + table->field[3]->store(tmp_buff, strlen(tmp_buff), scs); + tmp_buff= (tmp_cl->state & MY_CS_COMPILED)? "Yes" : ""; + table->field[4]->store(tmp_buff, strlen(tmp_buff), scs); + table->field[5]->store((longlong) tmp_cl->strxfrm_multiply, TRUE); + if (schema_table_store_record(thd, table)) + return 1; } - case SHOW_CHAR: + } + } + return 0; +} + + +int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond) +{ + CHARSET_INFO **cs; + TABLE *table= tables->table; + CHARSET_INFO *scs= system_charset_info; + for (cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + { + CHARSET_INFO **cl; + CHARSET_INFO *tmp_cs= cs[0]; + if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) || + !(tmp_cs->state & MY_CS_PRIMARY)) + continue; + for (cl= all_charsets; cl < all_charsets+255 ;cl ++) + { + CHARSET_INFO *tmp_cl= cl[0]; + if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) || + !my_charset_same(tmp_cs,tmp_cl)) + continue; + restore_record(table, s->default_values); + table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); + table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); + if (schema_table_store_record(thd, table)) + return 1; + } + } + return 0; +} + + +bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table, + const char *wild, bool full_access, const char *sp_user) +{ + String tmp_string; + String sp_db, sp_name, definer; + TIME time; + LEX *lex= thd->lex; + CHARSET_INFO *cs= system_charset_info; + get_field(thd->mem_root, proc_table->field[0], &sp_db); + get_field(thd->mem_root, proc_table->field[1], &sp_name); + get_field(thd->mem_root, proc_table->field[11], &definer); + if (!full_access) + full_access= !strcmp(sp_user, definer.ptr()); + if (!full_access && check_some_routine_access(thd, sp_db.ptr(), sp_name.ptr(), + proc_table->field[2]->val_int() == + TYPE_ENUM_PROCEDURE)) + return 0; + + if (lex->orig_sql_command == SQLCOM_SHOW_STATUS_PROC && + proc_table->field[2]->val_int() == TYPE_ENUM_PROCEDURE || + lex->orig_sql_command == SQLCOM_SHOW_STATUS_FUNC && + proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION || + lex->orig_sql_command == SQLCOM_END) + { + restore_record(table, s->default_values); + if (!wild || !wild[0] || !wild_compare(sp_name.ptr(), wild, 0)) + { + int enum_idx= (int) proc_table->field[5]->val_int(); + table->field[3]->store(sp_name.ptr(), sp_name.length(), cs); + get_field(thd->mem_root, proc_table->field[3], &tmp_string); + table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->field[2]->store(sp_db.ptr(), sp_db.length(), cs); + get_field(thd->mem_root, proc_table->field[2], &tmp_string); + table->field[4]->store(tmp_string.ptr(), tmp_string.length(), cs); + if (proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION) { - if (!(pos= value)) - pos= ""; - end= strend(pos); - break; - } - case SHOW_STARTTIME: - nr= (long) (thd->query_start() - start_time); - end= int10_to_str(nr, buff, 10); - break; - case SHOW_QUESTION: - end= int10_to_str((long) thd->query_id, buff, 10); - break; -#ifdef HAVE_REPLICATION - case SHOW_RPL_STATUS: - end= strmov(buff, rpl_status_type[(int)rpl_status]); - break; - case SHOW_SLAVE_RUNNING: + get_field(thd->mem_root, proc_table->field[9], &tmp_string); + table->field[5]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->field[5]->set_notnull(); + } + if (full_access) { - pthread_mutex_lock(&LOCK_active_mi); - end= strmov(buff, (active_mi && active_mi->slave_running && - active_mi->rli.slave_running) ? "ON" : "OFF"); - pthread_mutex_unlock(&LOCK_active_mi); - break; + get_field(thd->mem_root, proc_table->field[10], &tmp_string); + table->field[7]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->field[7]->set_notnull(); } - case SHOW_SLAVE_RETRIED_TRANS: + table->field[6]->store(STRING_WITH_LEN("SQL"), cs); + table->field[10]->store(STRING_WITH_LEN("SQL"), cs); + get_field(thd->mem_root, proc_table->field[6], &tmp_string); + table->field[11]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->field[12]->store(sp_data_access_name[enum_idx].str, + sp_data_access_name[enum_idx].length , cs); + get_field(thd->mem_root, proc_table->field[7], &tmp_string); + table->field[14]->store(tmp_string.ptr(), tmp_string.length(), cs); + bzero((char *)&time, sizeof(time)); + ((Field_timestamp *) proc_table->field[12])->get_time(&time); + table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + bzero((char *)&time, sizeof(time)); + ((Field_timestamp *) proc_table->field[13])->get_time(&time); + table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + get_field(thd->mem_root, proc_table->field[14], &tmp_string); + table->field[17]->store(tmp_string.ptr(), tmp_string.length(), cs); + get_field(thd->mem_root, proc_table->field[15], &tmp_string); + table->field[18]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->field[19]->store(definer.ptr(), definer.length(), cs); + return schema_table_store_record(thd, table); + } + } + return 0; +} + + +int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond) +{ + TABLE *proc_table; + TABLE_LIST proc_tables; + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + int res= 0; + TABLE *table= tables->table; + bool full_access; + char definer[USER_HOST_BUFF_SIZE]; + Open_tables_state open_tables_state_backup; + DBUG_ENTER("fill_schema_proc"); + + strxmov(definer, thd->security_ctx->priv_user, "@", + thd->security_ctx->priv_host, NullS); + /* We use this TABLE_LIST instance only for checking of privileges. */ + bzero((char*) &proc_tables,sizeof(proc_tables)); + proc_tables.db= (char*) "mysql"; + proc_tables.db_length= 5; + proc_tables.table_name= proc_tables.alias= (char*) "proc"; + proc_tables.table_name_length= 4; + proc_tables.lock_type= TL_READ; + full_access= !check_table_access(thd, SELECT_ACL, &proc_tables, 1); + if (!(proc_table= open_proc_table_for_read(thd, &open_tables_state_backup))) + { + DBUG_RETURN(1); + } + proc_table->file->ha_index_init(0); + if ((res= proc_table->file->index_first(proc_table->record[0]))) + { + res= (res == HA_ERR_END_OF_FILE) ? 0 : 1; + goto err; + } + if (store_schema_proc(thd, table, proc_table, wild, full_access, definer)) + { + res= 1; + goto err; + } + while (!proc_table->file->index_next(proc_table->record[0])) + { + if (store_schema_proc(thd, table, proc_table, wild, full_access, definer)) + { + res= 1; + goto err; + } + } + +err: + proc_table->file->ha_index_end(); + close_proc_table(thd, &open_tables_state_backup); + DBUG_RETURN(res); +} + + +static int get_schema_stat_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("get_schema_stat_record"); + if (res) + { + if (thd->lex->orig_sql_command != SQLCOM_SHOW_KEYS) + { + /* + I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS + rather than in SHOW KEYS + */ + if (!tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + res= 0; + } + DBUG_RETURN(res); + } + else if (!tables->view) + { + TABLE *show_table= tables->table; + KEY *key_info=show_table->key_info; + show_table->file->info(HA_STATUS_VARIABLE | + HA_STATUS_NO_LOCK | + HA_STATUS_TIME); + for (uint i=0 ; i < show_table->s->keys ; i++,key_info++) + { + KEY_PART_INFO *key_part= key_info->key_part; + const char *str; + for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) { - /* - TODO: in 5.1 with multimaster, have one such counter per line in SHOW - SLAVE STATUS, and have the sum over all lines here. - */ - pthread_mutex_lock(&LOCK_active_mi); - if (active_mi) + restore_record(table, s->default_values); + table->field[1]->store(base_name, strlen(base_name), cs); + table->field[2]->store(file_name, strlen(file_name), cs); + table->field[3]->store((longlong) ((key_info->flags & + HA_NOSAME) ? 0 : 1), TRUE); + table->field[4]->store(base_name, strlen(base_name), cs); + table->field[5]->store(key_info->name, strlen(key_info->name), cs); + table->field[6]->store((longlong) (j+1), TRUE); + str=(key_part->field ? key_part->field->field_name : + "?unknown field?"); + table->field[7]->store(str, strlen(str), cs); + if (show_table->file->index_flags(i, j, 0) & HA_READ_ORDER) + { + table->field[8]->store(((key_part->key_part_flag & + HA_REVERSE_SORT) ? + "D" : "A"), 1, cs); + table->field[8]->set_notnull(); + } + KEY *key=show_table->key_info+i; + if (key->rec_per_key[j]) + { + ha_rows records=(show_table->file->records / + key->rec_per_key[j]); + table->field[9]->store((longlong) records, TRUE); + table->field[9]->set_notnull(); + } + if (!(key_info->flags & HA_FULLTEXT) && + (key_part->field && + key_part->length != + show_table->field[key_part->fieldnr-1]->key_length())) { - pthread_mutex_lock(&active_mi->rli.data_lock); - end= int10_to_str(active_mi->rli.retried_trans, buff, 10); - pthread_mutex_unlock(&active_mi->rli.data_lock); + table->field[10]->store((longlong) key_part->length / + key_part->field->charset()->mbmaxlen, 1); + table->field[10]->set_notnull(); } - pthread_mutex_unlock(&LOCK_active_mi); - break; + uint flags= key_part->field ? key_part->field->flags : 0; + const char *pos=(char*) ((flags & NOT_NULL_FLAG) ? "" : "YES"); + table->field[12]->store(pos, strlen(pos), cs); + pos= show_table->file->index_type(i); + table->field[13]->store(pos, strlen(pos), cs); + if (!show_table->s->keys_in_use.is_set(i)) + table->field[14]->store(STRING_WITH_LEN("disabled"), cs); + else + table->field[14]->store("", 0, cs); + table->field[14]->set_notnull(); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); } -#endif /* HAVE_REPLICATION */ - case SHOW_OPENTABLES: - end= int10_to_str((long) cached_tables(), buff, 10); - break; - case SHOW_CHAR_PTR: + } + } + DBUG_RETURN(res); +} + + +static int get_schema_views_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("get_schema_views_record"); + char definer[USER_HOST_BUFF_SIZE]; + uint definer_len; + + if (tables->view) + { + Security_context *sctx= thd->security_ctx; + if (!tables->allowed_show) + { + if (!my_strcasecmp(system_charset_info, tables->definer.user.str, + sctx->priv_user) && + !my_strcasecmp(system_charset_info, tables->definer.host.str, + sctx->priv_host)) + tables->allowed_show= TRUE; + } + restore_record(table, s->default_values); + table->field[1]->store(tables->view_db.str, tables->view_db.length, cs); + table->field[2]->store(tables->view_name.str, tables->view_name.length, cs); + if (tables->allowed_show) + { + char buff[2048]; + String qwe_str(buff, sizeof(buff), cs); + qwe_str.length(0); + qwe_str.append(STRING_WITH_LEN("/* ")); + append_algorithm(tables, &qwe_str); + qwe_str.append(STRING_WITH_LEN("*/ ")); + qwe_str.append(tables->query.str, tables->query.length); + table->field[3]->store(qwe_str.ptr(), qwe_str.length(), cs); + } + + if (tables->with_check != VIEW_CHECK_NONE) + { + if (tables->with_check == VIEW_CHECK_LOCAL) + table->field[4]->store(STRING_WITH_LEN("LOCAL"), cs); + else + table->field[4]->store(STRING_WITH_LEN("CASCADED"), cs); + } + else + table->field[4]->store(STRING_WITH_LEN("NONE"), cs); + + if (tables->updatable_view) + table->field[5]->store(STRING_WITH_LEN("YES"), cs); + else + table->field[5]->store(STRING_WITH_LEN("NO"), cs); + definer_len= (strxmov(definer, tables->definer.user.str, "@", + tables->definer.host.str, NullS) - definer); + table->field[6]->store(definer, definer_len, cs); + if (tables->view_suid) + table->field[7]->store(STRING_WITH_LEN("DEFINER"), cs); + else + table->field[7]->store(STRING_WITH_LEN("INVOKER"), cs); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); + if (res) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + } + if (res) + thd->clear_error(); + DBUG_RETURN(0); +} + + +bool store_constraints(THD *thd, TABLE *table, const char *db, + const char *tname, const char *key_name, + uint key_len, const char *con_type, uint con_len) +{ + CHARSET_INFO *cs= system_charset_info; + restore_record(table, s->default_values); + table->field[1]->store(db, strlen(db), cs); + table->field[2]->store(key_name, key_len, cs); + table->field[3]->store(db, strlen(db), cs); + table->field[4]->store(tname, strlen(tname), cs); + table->field[5]->store(con_type, con_len, cs); + return schema_table_store_record(thd, table); +} + + +static int get_schema_constraints_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + DBUG_ENTER("get_schema_constraints_record"); + if (res) + { + if (!tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + DBUG_RETURN(0); + } + else if (!tables->view) + { + List<FOREIGN_KEY_INFO> f_key_list; + TABLE *show_table= tables->table; + KEY *key_info=show_table->key_info; + uint primary_key= show_table->s->primary_key; + show_table->file->info(HA_STATUS_VARIABLE | + HA_STATUS_NO_LOCK | + HA_STATUS_TIME); + for (uint i=0 ; i < show_table->s->keys ; i++, key_info++) + { + if (i != primary_key && !(key_info->flags & HA_NOSAME)) + continue; + + if (i == primary_key && !strcmp(key_info->name, primary_key_name)) { - if (!(pos= *(char**) value)) - pos= ""; - end= strend(pos); - break; + if (store_constraints(thd, table, base_name, file_name, key_info->name, + strlen(key_info->name), + STRING_WITH_LEN("PRIMARY KEY"))) + DBUG_RETURN(1); } -#ifdef HAVE_OPENSSL - /* First group - functions relying on CTX */ - case SHOW_SSL_CTX_SESS_ACCEPT: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_ACCEPT_GOOD: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept_good(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CONNECT_GOOD: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect_good(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd-> ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CB_HITS: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_cb_hits(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_HITS: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_hits(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CACHE_FULL: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_cache_full(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_MISSES: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_misses(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_TIMEOUTS: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_SESS_NUMBER: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_SESS_CONNECT: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_SESS_GET_CACHE_SIZE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_GET_VERIFY_MODE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_GET_VERIFY_DEPTH: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_GET_SESSION_CACHE_MODE: - if (!ssl_acceptor_fd) - { - pos= "NONE"; - end= pos+4; - break; - } - switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context)) - { - case SSL_SESS_CACHE_OFF: - pos= "OFF"; - break; - case SSL_SESS_CACHE_CLIENT: - pos= "CLIENT"; - break; - case SSL_SESS_CACHE_SERVER: - pos= "SERVER"; - break; - case SSL_SESS_CACHE_BOTH: - pos= "BOTH"; - break; - case SSL_SESS_CACHE_NO_AUTO_CLEAR: - pos= "NO_AUTO_CLEAR"; - break; - case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP: - pos= "NO_INTERNAL_LOOKUP"; - break; - default: - pos= "Unknown"; - break; - } - end= strend(pos); - break; - /* First group - functions relying on SSL */ - case SHOW_SSL_GET_VERSION: - pos= (thd->net.vio->ssl_arg ? - SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); - end= strend(pos); - break; - case SHOW_SSL_SESSION_REUSED: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_session_reused((SSL*) thd->net.vio-> - ssl_arg) : - 0), - buff, 10); - break; - case SHOW_SSL_GET_DEFAULT_TIMEOUT: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_get_default_timeout((SSL*) thd->net.vio-> - ssl_arg) : - 0), - buff, 10); - break; - case SHOW_SSL_GET_VERIFY_MODE: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_get_verify_mode((SSL*) thd->net.vio-> - ssl_arg): - 0), - buff, 10); - break; - case SHOW_SSL_GET_VERIFY_DEPTH: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_get_verify_depth((SSL*) thd->net.vio-> - ssl_arg): - 0), - buff, 10); - break; - case SHOW_SSL_GET_CIPHER: - pos= (thd->net.vio->ssl_arg ? - SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : "" ); - end= strend(pos); - break; - case SHOW_SSL_GET_CIPHER_LIST: - if (thd->net.vio->ssl_arg) - { - char *to= buff; - for (int i=0 ; i++ ;) - { - const char *p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i); - if (p == NULL) - break; - to= strmov(to, p); - *to++= ':'; - } - if (to != buff) - to--; // Remove last ':' - end= to; + else if (key_info->flags & HA_NOSAME) + { + if (store_constraints(thd, table, base_name, file_name, key_info->name, + strlen(key_info->name), + STRING_WITH_LEN("UNIQUE"))) + DBUG_RETURN(1); + } + } + + show_table->file->get_foreign_key_list(thd, &f_key_list); + FOREIGN_KEY_INFO *f_key_info; + List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list); + while ((f_key_info=it++)) + { + if (store_constraints(thd, table, base_name, file_name, + f_key_info->forein_id->str, + strlen(f_key_info->forein_id->str), + "FOREIGN KEY", 11)) + DBUG_RETURN(1); + } + } + DBUG_RETURN(res); +} + + +static bool store_trigger(THD *thd, TABLE *table, const char *db, + const char *tname, LEX_STRING *trigger_name, + enum trg_event_type event, + enum trg_action_time_type timing, + LEX_STRING *trigger_stmt, + ulong sql_mode, + LEX_STRING *definer_buffer) +{ + CHARSET_INFO *cs= system_charset_info; + byte *sql_mode_str; + ulong sql_mode_len; + + restore_record(table, s->default_values); + table->field[1]->store(db, strlen(db), cs); + table->field[2]->store(trigger_name->str, trigger_name->length, cs); + table->field[3]->store(trg_event_type_names[event].str, + trg_event_type_names[event].length, cs); + table->field[5]->store(db, strlen(db), cs); + table->field[6]->store(tname, strlen(tname), cs); + table->field[9]->store(trigger_stmt->str, trigger_stmt->length, cs); + table->field[10]->store(STRING_WITH_LEN("ROW"), cs); + table->field[11]->store(trg_action_time_type_names[timing].str, + trg_action_time_type_names[timing].length, cs); + table->field[14]->store(STRING_WITH_LEN("OLD"), cs); + table->field[15]->store(STRING_WITH_LEN("NEW"), cs); + + sql_mode_str= + sys_var_thd_sql_mode::symbolic_mode_representation(thd, + sql_mode, + &sql_mode_len); + table->field[17]->store((const char*)sql_mode_str, sql_mode_len, cs); + table->field[18]->store((const char *)definer_buffer->str, definer_buffer->length, cs); + return schema_table_store_record(thd, table); +} + + +static int get_schema_triggers_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + DBUG_ENTER("get_schema_triggers_record"); + /* + res can be non zero value when processed table is a view or + error happened during opening of processed table. + */ + if (res) + { + if (!tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + DBUG_RETURN(0); + } + if (!tables->view && tables->table->triggers) + { + Table_triggers_list *triggers= tables->table->triggers; + int event, timing; + for (event= 0; event < (int)TRG_EVENT_MAX; event++) + { + for (timing= 0; timing < (int)TRG_ACTION_MAX; timing++) + { + LEX_STRING trigger_name; + LEX_STRING trigger_stmt; + ulong sql_mode; + char definer_holder[USER_HOST_BUFF_SIZE]; + LEX_STRING definer_buffer; + definer_buffer.str= definer_holder; + if (triggers->get_trigger_info(thd, (enum trg_event_type) event, + (enum trg_action_time_type)timing, + &trigger_name, &trigger_stmt, + &sql_mode, + &definer_buffer)) + continue; + + if (store_trigger(thd, table, base_name, file_name, &trigger_name, + (enum trg_event_type) event, + (enum trg_action_time_type) timing, &trigger_stmt, + sql_mode, + &definer_buffer)) + DBUG_RETURN(1); + } + } + } + DBUG_RETURN(0); +} + + +void store_key_column_usage(TABLE *table, const char*db, const char *tname, + const char *key_name, uint key_len, + const char *con_type, uint con_len, longlong idx) +{ + CHARSET_INFO *cs= system_charset_info; + table->field[1]->store(db, strlen(db), cs); + table->field[2]->store(key_name, key_len, cs); + table->field[4]->store(db, strlen(db), cs); + table->field[5]->store(tname, strlen(tname), cs); + table->field[6]->store(con_type, con_len, cs); + table->field[7]->store((longlong) idx, TRUE); +} + + +static int get_schema_key_column_usage_record(THD *thd, + struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + DBUG_ENTER("get_schema_key_column_usage_record"); + if (res) + { + if (!tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + DBUG_RETURN(0); + } + else if (!tables->view) + { + List<FOREIGN_KEY_INFO> f_key_list; + TABLE *show_table= tables->table; + KEY *key_info=show_table->key_info; + uint primary_key= show_table->s->primary_key; + show_table->file->info(HA_STATUS_VARIABLE | + HA_STATUS_NO_LOCK | + HA_STATUS_TIME); + for (uint i=0 ; i < show_table->s->keys ; i++, key_info++) + { + if (i != primary_key && !(key_info->flags & HA_NOSAME)) + continue; + uint f_idx= 0; + KEY_PART_INFO *key_part= key_info->key_part; + for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) + { + if (key_part->field) + { + f_idx++; + restore_record(table, s->default_values); + store_key_column_usage(table, base_name, file_name, + key_info->name, + strlen(key_info->name), + key_part->field->field_name, + strlen(key_part->field->field_name), + (longlong) f_idx); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); } - break; + } + } -#endif /* HAVE_OPENSSL */ - case SHOW_KEY_CACHE_LONG: - case SHOW_KEY_CACHE_CONST_LONG: - value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache; - end= int10_to_str(*(long*) value, buff, 10); - break; - case SHOW_KEY_CACHE_LONGLONG: - value= (value-(char*) &dflt_key_cache_var)+ (char*) dflt_key_cache; - end= longlong10_to_str(*(longlong*) value, buff, 10); - break; - case SHOW_UNDEF: // Show never happen - case SHOW_SYS: - break; // Return empty string - default: - break; + show_table->file->get_foreign_key_list(thd, &f_key_list); + FOREIGN_KEY_INFO *f_key_info; + List_iterator_fast<FOREIGN_KEY_INFO> fkey_it(f_key_list); + while ((f_key_info= fkey_it++)) + { + LEX_STRING *f_info; + LEX_STRING *r_info; + List_iterator_fast<LEX_STRING> it(f_key_info->foreign_fields), + it1(f_key_info->referenced_fields); + uint f_idx= 0; + while ((f_info= it++)) + { + r_info= it1++; + f_idx++; + restore_record(table, s->default_values); + store_key_column_usage(table, base_name, file_name, + f_key_info->forein_id->str, + f_key_info->forein_id->length, + f_info->str, f_info->length, + (longlong) f_idx); + table->field[8]->store((longlong) f_idx, TRUE); + table->field[8]->set_notnull(); + table->field[9]->store(f_key_info->referenced_db->str, + f_key_info->referenced_db->length, + system_charset_info); + table->field[9]->set_notnull(); + table->field[10]->store(f_key_info->referenced_table->str, + f_key_info->referenced_table->length, + system_charset_info); + table->field[10]->set_notnull(); + table->field[11]->store(r_info->str, r_info->length, + system_charset_info); + table->field[11]->set_notnull(); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); } - if (protocol->store(pos, (uint32) (end - pos), system_charset_info) || - protocol->write()) - goto err; /* purecov: inspected */ } } - pthread_mutex_unlock(mutex); - send_eof(thd); + DBUG_RETURN(res); +} + + +int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + DBUG_ENTER("fill_open_tables"); + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + TABLE *table= tables->table; + CHARSET_INFO *cs= system_charset_info; + OPEN_TABLE_LIST *open_list; + if (!(open_list=list_open_tables(thd,thd->lex->select_lex.db, wild)) + && thd->is_fatal_error) + DBUG_RETURN(1); + + for (; open_list ; open_list=open_list->next) + { + restore_record(table, s->default_values); + table->field[0]->store(open_list->db, strlen(open_list->db), cs); + table->field[1]->store(open_list->table, strlen(open_list->table), cs); + table->field[2]->store((longlong) open_list->in_use, TRUE); + table->field[3]->store((longlong) open_list->locked, TRUE); + if (schema_table_store_record(thd, table)) + DBUG_RETURN(1); + } DBUG_RETURN(0); +} + + +int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + DBUG_ENTER("fill_variables"); + int res= 0; + LEX *lex= thd->lex; + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + pthread_mutex_lock(&LOCK_global_system_variables); + res= show_status_array(thd, wild, init_vars, + lex->option_type, 0, "", tables->table); + pthread_mutex_unlock(&LOCK_global_system_variables); + DBUG_RETURN(res); +} + + +int fill_status(THD *thd, TABLE_LIST *tables, COND *cond) +{ + DBUG_ENTER("fill_status"); + LEX *lex= thd->lex; + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + int res= 0; + STATUS_VAR tmp; + ha_update_statistics(); /* Export engines statistics */ + pthread_mutex_lock(&LOCK_status); + if (lex->option_type == OPT_GLOBAL) + calc_sum_of_all_status(&tmp); + res= show_status_array(thd, wild, status_vars, OPT_GLOBAL, + (lex->option_type == OPT_GLOBAL ? + &tmp: &thd->status_var), "",tables->table); + pthread_mutex_unlock(&LOCK_status); + DBUG_RETURN(res); +} + + +/* + Find schema_tables elment by name + + SYNOPSIS + find_schema_table() + thd thread handler + table_name table name + + RETURN + 0 table not found + # pointer to 'shema_tables' element +*/ + +ST_SCHEMA_TABLE *find_schema_table(THD *thd, const char* table_name) +{ + ST_SCHEMA_TABLE *schema_table= schema_tables; + for (; schema_table->table_name; schema_table++) + { + if (!my_strcasecmp(system_charset_info, + schema_table->table_name, + table_name)) + return schema_table; + } + return 0; +} - err: - pthread_mutex_unlock(mutex); - DBUG_RETURN(1); + +ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx) +{ + return &schema_tables[schema_table_idx]; } -#ifdef __GNUC__ + +/* + Create information_schema table using schema_table data + + SYNOPSIS + create_schema_table() + thd thread handler + schema_table pointer to 'shema_tables' element + + RETURN + # Pointer to created table + 0 Can't create table +*/ + +TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list) +{ + int field_count= 0; + Item *item; + TABLE *table; + List<Item> field_list; + ST_SCHEMA_TABLE *schema_table= table_list->schema_table; + ST_FIELD_INFO *fields_info= schema_table->fields_info; + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("create_schema_table"); + + for (; fields_info->field_name; fields_info++) + { + switch (fields_info->field_type) { + case MYSQL_TYPE_LONG: + if (!(item= new Item_int(fields_info->field_name, + fields_info->value, + fields_info->field_length))) + { + DBUG_RETURN(0); + } + break; + case MYSQL_TYPE_TIMESTAMP: + if (!(item=new Item_datetime(fields_info->field_name))) + { + DBUG_RETURN(0); + } + break; + default: + /* this should be changed when Item_empty_string is fixed(in 4.1) */ + if (!(item= new Item_empty_string("", 0, cs))) + { + DBUG_RETURN(0); + } + item->max_length= fields_info->field_length * cs->mbmaxlen; + item->set_name(fields_info->field_name, + strlen(fields_info->field_name), cs); + break; + } + field_list.push_back(item); + item->maybe_null= fields_info->maybe_null; + field_count++; + } + TMP_TABLE_PARAM *tmp_table_param = + (TMP_TABLE_PARAM*) (thd->calloc(sizeof(TMP_TABLE_PARAM))); + tmp_table_param->init(); + tmp_table_param->table_charset= cs; + tmp_table_param->field_count= field_count; + tmp_table_param->schema_table= 1; + SELECT_LEX *select_lex= thd->lex->current_select; + if (!(table= create_tmp_table(thd, tmp_table_param, + field_list, (ORDER*) 0, 0, 0, + (select_lex->options | thd->options | + TMP_TABLE_ALL_COLUMNS), + HA_POS_ERROR, table_list->alias))) + DBUG_RETURN(0); + table_list->schema_table_param= tmp_table_param; + DBUG_RETURN(table); +} + + +/* + For old SHOW compatibility. It is used when + old SHOW doesn't have generated column names + Make list of fields for SHOW + + SYNOPSIS + make_old_format() + thd thread handler + schema_table pointer to 'schema_tables' element + + RETURN + -1 errror + 0 success +*/ + +int make_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + ST_FIELD_INFO *field_info= schema_table->fields_info; + Name_resolution_context *context= &thd->lex->select_lex.context; + for (; field_info->field_name; field_info++) + { + if (field_info->old_name) + { + Item_field *field= new Item_field(context, + NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; + } + } + } + return 0; +} + + +int make_schemata_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + char tmp[128]; + LEX *lex= thd->lex; + SELECT_LEX *sel= lex->current_select; + Name_resolution_context *context= &sel->context; + + if (!sel->item_list.elements) + { + ST_FIELD_INFO *field_info= &schema_table->fields_info[1]; + String buffer(tmp,sizeof(tmp), system_charset_info); + Item_field *field= new Item_field(context, + NullS, NullS, field_info->field_name); + if (!field || add_item_to_list(thd, field)) + return 1; + buffer.length(0); + buffer.append(field_info->old_name); + if (lex->wild && lex->wild->ptr()) + { + buffer.append(STRING_WITH_LEN(" (")); + buffer.append(lex->wild->ptr()); + buffer.append(')'); + } + field->set_name(buffer.ptr(), buffer.length(), system_charset_info); + } + return 0; +} + + +int make_table_names_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + char tmp[128]; + String buffer(tmp,sizeof(tmp), thd->charset()); + LEX *lex= thd->lex; + Name_resolution_context *context= &lex->select_lex.context; + + ST_FIELD_INFO *field_info= &schema_table->fields_info[2]; + buffer.length(0); + buffer.append(field_info->old_name); + buffer.append(lex->select_lex.db); + if (lex->wild && lex->wild->ptr()) + { + buffer.append(STRING_WITH_LEN(" (")); + buffer.append(lex->wild->ptr()); + buffer.append(')'); + } + Item_field *field= new Item_field(context, + NullS, NullS, field_info->field_name); + if (add_item_to_list(thd, field)) + return 1; + field->set_name(buffer.ptr(), buffer.length(), system_charset_info); + if (thd->lex->verbose) + { + field->set_name(buffer.ptr(), buffer.length(), system_charset_info); + field_info= &schema_table->fields_info[3]; + field= new Item_field(context, NullS, NullS, field_info->field_name); + if (add_item_to_list(thd, field)) + return 1; + field->set_name(field_info->old_name, strlen(field_info->old_name), + system_charset_info); + } + return 0; +} + + +int make_columns_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + int fields_arr[]= {3, 14, 13, 6, 15, 5, 16, 17, 18, -1}; + int *field_num= fields_arr; + ST_FIELD_INFO *field_info; + Name_resolution_context *context= &thd->lex->select_lex.context; + + for (; *field_num >= 0; field_num++) + { + field_info= &schema_table->fields_info[*field_num]; + if (!thd->lex->verbose && (*field_num == 13 || + *field_num == 17 || + *field_num == 18)) + continue; + Item_field *field= new Item_field(context, + NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; + } + } + return 0; +} + + +int make_character_sets_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + int fields_arr[]= {0, 2, 1, 3, -1}; + int *field_num= fields_arr; + ST_FIELD_INFO *field_info; + Name_resolution_context *context= &thd->lex->select_lex.context; + + for (; *field_num >= 0; field_num++) + { + field_info= &schema_table->fields_info[*field_num]; + Item_field *field= new Item_field(context, + NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; + } + } + return 0; +} + + +int make_proc_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + int fields_arr[]= {2, 3, 4, 19, 16, 15, 14, 18, -1}; + int *field_num= fields_arr; + ST_FIELD_INFO *field_info; + Name_resolution_context *context= &thd->lex->select_lex.context; + + for (; *field_num >= 0; field_num++) + { + field_info= &schema_table->fields_info[*field_num]; + Item_field *field= new Item_field(context, + NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; + } + } + return 0; +} + + +/* + Create information_schema table + + SYNOPSIS + mysql_schema_table() + thd thread handler + lex pointer to LEX + table_list pointer to table_list + + RETURN + 0 success + 1 error +*/ + +int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list) +{ + TABLE *table; + DBUG_ENTER("mysql_schema_table"); + if (!(table= table_list->schema_table->create_table(thd, table_list))) + { + DBUG_RETURN(1); + } + table->s->tmp_table= SYSTEM_TMP_TABLE; + table->grant.privilege= SELECT_ACL; + /* + This test is necessary to make + case insensitive file systems + + upper case table names(information schema tables) + + views + working correctly + */ + if (table_list->schema_table_name) + table->alias_name_used= my_strcasecmp(table_alias_charset, + table_list->schema_table_name, + table_list->alias); + table_list->table_name= (char*) table->s->table_name; + table_list->table_name_length= strlen(table->s->table_name); + table_list->table= table; + table->next= thd->derived_tables; + thd->derived_tables= table; + table_list->select_lex->options |= OPTION_SCHEMA_TABLE; + lex->safe_to_cache_query= 0; + + if (table_list->schema_table_reformed) // show command + { + SELECT_LEX *sel= lex->current_select; + Item *item; + Field_translator *transl, *org_transl; + + if (table_list->field_translation) + { + Field_translator *end= table_list->field_translation_end; + for (transl= table_list->field_translation; transl < end; transl++) + { + if (!transl->item->fixed && + transl->item->fix_fields(thd, &transl->item)) + DBUG_RETURN(1); + } + DBUG_RETURN(0); + } + List_iterator_fast<Item> it(sel->item_list); + if (!(transl= + (Field_translator*)(thd->stmt_arena-> + alloc(sel->item_list.elements * + sizeof(Field_translator))))) + { + DBUG_RETURN(1); + } + for (org_transl= transl; (item= it++); transl++) + { + transl->item= item; + transl->name= item->name; + if (!item->fixed && item->fix_fields(thd, &transl->item)) + { + DBUG_RETURN(1); + } + } + table_list->field_translation= org_transl; + table_list->field_translation_end= transl; + } + + DBUG_RETURN(0); +} + + +/* + Generate select from information_schema table + + SYNOPSIS + make_schema_select() + thd thread handler + sel pointer to SELECT_LEX + schema_table_idx index of 'schema_tables' element + + RETURN + 0 success + 1 error +*/ + +int make_schema_select(THD *thd, SELECT_LEX *sel, + enum enum_schema_tables schema_table_idx) +{ + ST_SCHEMA_TABLE *schema_table= get_schema_table(schema_table_idx); + LEX_STRING db, table; + DBUG_ENTER("mysql_schema_select"); + /* + We have to make non const db_name & table_name + because of lower_case_table_names + */ + make_lex_string(thd, &db, information_schema_name.str, + information_schema_name.length, 0); + make_lex_string(thd, &table, schema_table->table_name, + strlen(schema_table->table_name), 0); + if (schema_table->old_format(thd, schema_table) || /* Handle old syntax */ + !sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0), + 0, 0, TL_READ, (List<String> *) 0, + (List<String> *) 0)) + { + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* + Fill temporary schema tables before SELECT + + SYNOPSIS + get_schema_tables_result() + join join which use schema tables + executed_place place where I_S table processed + + RETURN + FALSE success + TRUE error +*/ + +bool get_schema_tables_result(JOIN *join, + enum enum_schema_table_state executed_place) +{ + JOIN_TAB *tmp_join_tab= join->join_tab+join->tables; + THD *thd= join->thd; + LEX *lex= thd->lex; + bool result= 0; + DBUG_ENTER("get_schema_tables_result"); + + thd->no_warnings_for_error= 1; + for (JOIN_TAB *tab= join->join_tab; tab < tmp_join_tab; tab++) + { + if (!tab->table || !tab->table->pos_in_table_list) + break; + + TABLE_LIST *table_list= tab->table->pos_in_table_list; + if (table_list->schema_table && thd->fill_derived_tables()) + { + bool is_subselect= (&lex->unit != lex->current_select->master_unit() && + lex->current_select->master_unit()->item); + /* + If schema table is already processed and + the statement is not a subselect then + we don't need to fill this table again. + If schema table is already processed and + schema_table_state != executed_place then + table is already processed and + we should skip second data processing. + */ + if (table_list->schema_table_state && + (!is_subselect || table_list->schema_table_state != executed_place)) + continue; + + /* + if table is used in a subselect and + table has been processed earlier with the same + 'executed_place' value then we should refresh the table. + */ + if (table_list->schema_table_state && is_subselect) + { + table_list->table->file->extra(HA_EXTRA_NO_CACHE); + table_list->table->file->extra(HA_EXTRA_RESET_STATE); + table_list->table->file->delete_all_rows(); + free_io_cache(table_list->table); + filesort_free_buffers(table_list->table,1); + table_list->table->null_row= 0; + } + else + table_list->table->file->records= 0; + + if (table_list->schema_table->fill_table(thd, table_list, + tab->select_cond)) + { + result= 1; + join->error= 1; + table_list->schema_table_state= executed_place; + break; + } + table_list->schema_table_state= executed_place; + } + } + thd->no_warnings_for_error= 0; + DBUG_RETURN(result); +} + + +ST_FIELD_INFO schema_fields_info[]= +{ + {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"SCHEMA_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, + {"DEFAULT_CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, + {"DEFAULT_COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SQL_PATH", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO tables_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"TABLE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ENGINE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Engine"}, + {"VERSION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Version"}, + {"ROW_FORMAT", 10, MYSQL_TYPE_STRING, 0, 1, "Row_format"}, + {"TABLE_ROWS", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Rows"}, + {"AVG_ROW_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Avg_row_length"}, + {"DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Data_length"}, + {"MAX_DATA_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Max_data_length"}, + {"INDEX_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Index_length"}, + {"DATA_FREE", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Data_free"}, + {"AUTO_INCREMENT", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + "Auto_increment"}, + {"CREATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Create_time"}, + {"UPDATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Update_time"}, + {"CHECK_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Check_time"}, + {"TABLE_COLLATION", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, + {"CHECKSUM", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, "Checksum"}, + {"CREATE_OPTIONS", 255, MYSQL_TYPE_STRING, 0, 1, "Create_options"}, + {"TABLE_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO columns_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Field"}, + {"ORDINAL_POSITION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 0, 0}, + {"COLUMN_DEFAULT", MAX_FIELD_VARCHARLENGTH, MYSQL_TYPE_STRING, 0, 1, "Default"}, + {"IS_NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, + {"DATA_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CHARACTER_MAXIMUM_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, + 0}, + {"CHARACTER_OCTET_LENGTH", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, + {"NUMERIC_PRECISION", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, + {"NUMERIC_SCALE", MY_INT64_NUM_DECIMAL_DIGITS , MYSQL_TYPE_LONG, 0, 1, 0}, + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 1, 0}, + {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, + {"COLUMN_TYPE", 65535, MYSQL_TYPE_STRING, 0, 0, "Type"}, + {"COLUMN_KEY", 3, MYSQL_TYPE_STRING, 0, 0, "Key"}, + {"EXTRA", 20, MYSQL_TYPE_STRING, 0, 0, "Extra"}, + {"PRIVILEGES", 80, MYSQL_TYPE_STRING, 0, 0, "Privileges"}, + {"COLUMN_COMMENT", 255, MYSQL_TYPE_STRING, 0, 0, "Comment"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO charsets_fields_info[]= +{ + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Charset"}, + {"DEFAULT_COLLATE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Default collation"}, + {"DESCRIPTION", 60, MYSQL_TYPE_STRING, 0, 0, "Description"}, + {"MAXLEN", 3 ,MYSQL_TYPE_LONG, 0, 0, "Maxlen"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO collation_fields_info[]= +{ + {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Collation"}, + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Charset"}, + {"ID", MY_INT32_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 0, "Id"}, + {"IS_DEFAULT", 3, MYSQL_TYPE_STRING, 0, 0, "Default"}, + {"IS_COMPILED", 3, MYSQL_TYPE_STRING, 0, 0, "Compiled"}, + {"SORTLEN", 3 ,MYSQL_TYPE_LONG, 0, 0, "Sortlen"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO coll_charset_app_fields_info[]= +{ + {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO proc_fields_info[]= +{ + {"SPECIFIC_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ROUTINE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ROUTINE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"}, + {"ROUTINE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"ROUTINE_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"}, + {"DTD_IDENTIFIER", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ROUTINE_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ROUTINE_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0}, + {"EXTERNAL_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"EXTERNAL_LANGUAGE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"PARAMETER_STYLE", 8, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_DETERMINISTIC", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SQL_DATA_ACCESS", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SQL_PATH", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, "Security_type"}, + {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Created"}, + {"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Modified"}, + {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ROUTINE_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Comment"}, + {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO stat_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"NON_UNIQUE", 1, MYSQL_TYPE_LONG, 0, 0, "Non_unique"}, + {"INDEX_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"INDEX_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Key_name"}, + {"SEQ_IN_INDEX", 2, MYSQL_TYPE_LONG, 0, 0, "Seq_in_index"}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Column_name"}, + {"COLLATION", 1, MYSQL_TYPE_STRING, 0, 1, "Collation"}, + {"CARDINALITY", MY_INT64_NUM_DECIMAL_DIGITS, MYSQL_TYPE_LONG, 0, 1, "Cardinality"}, + {"SUB_PART", 3, MYSQL_TYPE_LONG, 0, 1, "Sub_part"}, + {"PACKED", 10, MYSQL_TYPE_STRING, 0, 1, "Packed"}, + {"NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, + {"INDEX_TYPE", 16, MYSQL_TYPE_STRING, 0, 0, "Index_type"}, + {"COMMENT", 16, MYSQL_TYPE_STRING, 0, 1, "Comment"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO view_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"VIEW_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CHECK_OPTION", 8, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_UPDATABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO user_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO schema_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO table_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO column_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO table_constraints_fields_info[]= +{ + {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO key_column_usage_fields_info[]= +{ + {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ORDINAL_POSITION", 10 ,MYSQL_TYPE_LONG, 0, 0, 0}, + {"POSITION_IN_UNIQUE_CONSTRAINT", 10 ,MYSQL_TYPE_LONG, 0, 1, 0}, + {"REFERENCED_TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"REFERENCED_TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"REFERENCED_COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO table_names_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Tables_in_"}, + {"TABLE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_type"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO open_tables_fields_info[]= +{ + {"Database", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, + {"Table",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"In_use", 1, MYSQL_TYPE_LONG, 0, 0, "In_use"}, + {"Name_locked", 4, MYSQL_TYPE_LONG, 0, 0, "Name_locked"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO triggers_fields_info[]= +{ + {"TRIGGER_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TRIGGER_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TRIGGER_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Trigger"}, + {"EVENT_MANIPULATION", 6, MYSQL_TYPE_STRING, 0, 0, "Event"}, + {"EVENT_OBJECT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"EVENT_OBJECT_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"EVENT_OBJECT_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"ACTION_ORDER", 4, MYSQL_TYPE_LONG, 0, 0, 0}, + {"ACTION_CONDITION", 65535, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ACTION_STATEMENT", 65535, MYSQL_TYPE_STRING, 0, 0, "Statement"}, + {"ACTION_ORIENTATION", 9, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ACTION_TIMING", 6, MYSQL_TYPE_STRING, 0, 0, "Timing"}, + {"ACTION_REFERENCE_OLD_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ACTION_REFERENCE_NEW_TABLE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ACTION_REFERENCE_OLD_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ACTION_REFERENCE_NEW_ROW", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Created"}, + {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, "sql_mode"}, + {"DEFINER", 65535, MYSQL_TYPE_STRING, 0, 0, "Definer"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO variables_fields_info[]= +{ + {"Variable_name", 80, MYSQL_TYPE_STRING, 0, 0, "Variable_name"}, + {"Value", FN_REFLEN, MYSQL_TYPE_STRING, 0, 0, "Value"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +/* + Description of ST_FIELD_INFO in table.h +*/ + +ST_SCHEMA_TABLE schema_tables[]= +{ + {"CHARACTER_SETS", charsets_fields_info, create_schema_table, + fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0}, + {"COLLATIONS", collation_fields_info, create_schema_table, + fill_schema_collation, make_old_format, 0, -1, -1, 0}, + {"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info, + create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1, 0}, + {"COLUMNS", columns_fields_info, create_schema_table, + get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0}, + {"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table, + fill_schema_column_privileges, 0, 0, -1, -1, 0}, + {"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table, + get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0}, + {"OPEN_TABLES", open_tables_fields_info, create_schema_table, + fill_open_tables, make_old_format, 0, -1, -1, 1}, + {"ROUTINES", proc_fields_info, create_schema_table, + fill_schema_proc, make_proc_old_format, 0, -1, -1, 0}, + {"SCHEMATA", schema_fields_info, create_schema_table, + fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0}, + {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table, + fill_schema_schema_privileges, 0, 0, -1, -1, 0}, + {"STATISTICS", stat_fields_info, create_schema_table, + get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0}, + {"STATUS", variables_fields_info, create_schema_table, fill_status, + make_old_format, 0, -1, -1, 1}, + {"TABLES", tables_fields_info, create_schema_table, + get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0}, + {"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table, + get_all_tables, 0, get_schema_constraints_record, 3, 4, 0}, + {"TABLE_NAMES", table_names_fields_info, create_schema_table, + get_all_tables, make_table_names_old_format, 0, 1, 2, 1}, + {"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table, + fill_schema_table_privileges, 0, 0, -1, -1, 0}, + {"TRIGGERS", triggers_fields_info, create_schema_table, + get_all_tables, make_old_format, get_schema_triggers_record, 5, 6, 0}, + {"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table, + fill_schema_user_privileges, 0, 0, -1, -1, 0}, + {"VARIABLES", variables_fields_info, create_schema_table, fill_variables, + make_old_format, 0, -1, -1, 1}, + {"VIEWS", view_fields_info, create_schema_table, + get_all_tables, 0, get_schema_views_record, 1, 2, 0}, + {0, 0, 0, 0, 0, 0, 0, 0, 0} +}; + + +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List_iterator_fast<char>; template class List<char>; #endif diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 9f95ffa4884..da28ca07e2c 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -78,3 +77,4 @@ int merge_buffers(SORTPARAM *param,IO_CACHE *from_file, IO_CACHE *to_file, uchar *sort_buffer, BUFFPEK *lastbuff,BUFFPEK *Fb, BUFFPEK *Tb,int flag); +void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length); diff --git a/sql/sql_state.c b/sql/sql_state.c index 355b847f239..511dc65917b 100644 --- a/sql/sql_state.c +++ b/sql/sql_state.c @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/sql_string.cc b/sql/sql_string.cc index aaa85b0d96c..9d7df73cd7a 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -618,27 +617,26 @@ skip: } /* -** replace substring with string -** If wrong parameter or not enough memory, do nothing + Replace substring with string + If wrong parameter or not enough memory, do nothing */ - bool String::replace(uint32 offset,uint32 arg_length,const String &to) { return replace(offset,arg_length,to.ptr(),to.length()); } bool String::replace(uint32 offset,uint32 arg_length, - const char *to,uint32 length) + const char *to, uint32 to_length) { - long diff = (long) length-(long) arg_length; + long diff = (long) to_length-(long) arg_length; if (offset+arg_length <= str_length) { if (diff < 0) { - if (length) - memcpy(Ptr+offset,to,length); - bmove(Ptr+offset+length,Ptr+offset+arg_length, + if (to_length) + memcpy(Ptr+offset,to,to_length); + bmove(Ptr+offset+to_length,Ptr+offset+arg_length, str_length-offset-arg_length); } else @@ -650,8 +648,8 @@ bool String::replace(uint32 offset,uint32 arg_length, bmove_upp(Ptr+str_length+diff,Ptr+str_length, str_length-offset-arg_length); } - if (length) - memcpy(Ptr+offset,to,length); + if (to_length) + memcpy(Ptr+offset,to,to_length); } str_length+=(uint32) diff; } @@ -689,6 +687,19 @@ void String::qs_append(double *d) qs_append(ld); } +void String::qs_append(int i) +{ + char *buff= Ptr + str_length; + char *end= int10_to_str(i, buff, -10); + str_length+= (int) (end-buff); +} + +void String::qs_append(uint i) +{ + char *buff= Ptr + str_length; + char *end= int10_to_str(i, buff, 10); + str_length+= (int) (end-buff); +} /* Compare strings according to collation, without end space. @@ -712,8 +723,8 @@ void String::qs_append(double *d) int sortcmp(const String *s,const String *t, CHARSET_INFO *cs) { return cs->coll->strnncollsp(cs, - (unsigned char *) s->ptr(),s->length(), - (unsigned char *) t->ptr(),t->length()); + (unsigned char *) s->ptr(),s->length(), + (unsigned char *) t->ptr(),t->length(), 0); } @@ -841,6 +852,162 @@ outp: } +/* + copy a string, + with optional character set conversion, + with optional left padding (for binary -> UCS2 conversion) + + SYNOPSIS + well_formed_copy_nhars() + to Store result here + to_length Maxinum length of "to" string + to_cs Character set of "to" string + from Copy from here + from_length Length of from string + from_cs From character set + nchars Copy not more that nchars characters + well_formed_error_pos Return position when "from" is not well formed + or NULL otherwise. + cannot_convert_error_pos Return position where a not convertable + character met, or NULL otherwise. + from_end_pos Return position where scanning of "from" + string stopped. + NOTES + + RETURN + length of bytes copied to 'to' +*/ + + +uint32 +well_formed_copy_nchars(CHARSET_INFO *to_cs, + char *to, uint to_length, + CHARSET_INFO *from_cs, + const char *from, uint from_length, + uint nchars, + const char **well_formed_error_pos, + const char **cannot_convert_error_pos, + const char **from_end_pos) +{ + uint res; + + if ((to_cs == &my_charset_bin) || + (from_cs == &my_charset_bin) || + (to_cs == from_cs) || + my_charset_same(from_cs, to_cs)) + { + if (to_length < to_cs->mbminlen || !nchars) + { + *from_end_pos= from; + *cannot_convert_error_pos= NULL; + *well_formed_error_pos= NULL; + return 0; + } + + if (to_cs == &my_charset_bin) + { + res= min(min(nchars, to_length), from_length); + memmove(to, from, res); + *from_end_pos= from + res; + *well_formed_error_pos= NULL; + *cannot_convert_error_pos= NULL; + } + else + { + int well_formed_error; + uint from_offset; + + if ((from_offset= (from_length % to_cs->mbminlen)) && + (from_cs == &my_charset_bin)) + { + /* + Copying from BINARY to UCS2 needs to prepend zeros sometimes: + INSERT INTO t1 (ucs2_column) VALUES (0x01); + 0x01 -> 0x0001 + */ + uint pad_length= to_cs->mbminlen - from_offset; + bzero(to, pad_length); + memmove(to + pad_length, from, from_offset); + nchars--; + from+= from_offset; + from_length-= from_offset; + to+= to_cs->mbminlen; + to_length-= to_cs->mbminlen; + } + + set_if_smaller(from_length, to_length); + res= to_cs->cset->well_formed_len(to_cs, from, from + from_length, + nchars, &well_formed_error); + memmove(to, from, res); + *from_end_pos= from + res; + *well_formed_error_pos= well_formed_error ? from + res : NULL; + *cannot_convert_error_pos= NULL; + if (from_offset) + res+= to_cs->mbminlen; + } + } + else + { + int cnvres; + my_wc_t wc; + int (*mb_wc)(struct charset_info_st *, my_wc_t *, + const uchar *, const uchar *)= from_cs->cset->mb_wc; + int (*wc_mb)(struct charset_info_st *, my_wc_t, + uchar *s, uchar *e)= to_cs->cset->wc_mb; + const uchar *from_end= (const uchar*) from + from_length; + uchar *to_end= (uchar*) to + to_length; + char *to_start= to; + *well_formed_error_pos= NULL; + *cannot_convert_error_pos= NULL; + + for ( ; nchars; nchars--) + { + const char *from_prev= from; + if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from, from_end)) > 0) + from+= cnvres; + else if (cnvres == MY_CS_ILSEQ) + { + if (!*well_formed_error_pos) + *well_formed_error_pos= from; + from++; + wc= '?'; + } + else if (cnvres > MY_CS_TOOSMALL) + { + /* + A correct multibyte sequence detected + But it doesn't have Unicode mapping. + */ + if (!*cannot_convert_error_pos) + *cannot_convert_error_pos= from; + from+= (-cnvres); + wc= '?'; + } + else + break; // Not enough characters + +outp: + if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0) + to+= cnvres; + else if (cnvres == MY_CS_ILUNI && wc != '?') + { + if (!*cannot_convert_error_pos) + *cannot_convert_error_pos= from_prev; + wc= '?'; + goto outp; + } + else + break; + } + *from_end_pos= from; + res= to - to_start; + } + return (uint32) res; +} + + + + void String::print(String *str) { char *st= (char*)Ptr, *end= st+str_length; @@ -850,22 +1017,22 @@ void String::print(String *str) switch (c) { case '\\': - str->append("\\\\", 2); + str->append(STRING_WITH_LEN("\\\\")); break; case '\0': - str->append("\\0", 2); + str->append(STRING_WITH_LEN("\\0")); break; case '\'': - str->append("\\'", 2); + str->append(STRING_WITH_LEN("\\'")); break; case '\n': - str->append("\\n", 2); + str->append(STRING_WITH_LEN("\\n")); break; case '\r': - str->append("\\r", 2); + str->append(STRING_WITH_LEN("\\r")); break; - case 26: //Ctrl-Z - str->append("\\z", 2); + case '\032': // Ctrl-Z + str->append(STRING_WITH_LEN("\\Z")); break; default: str->append(c); diff --git a/sql/sql_string.h b/sql/sql_string.h index 31cdd6efb8a..c1d27cb1791 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -24,12 +23,22 @@ #define NOT_FIXED_DEC 31 #endif +#define STRING_WITH_LEN(X) ((const char*) X), ((uint) (sizeof(X) - 1)) + class String; int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); uint32 copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs, const char *from, uint32 from_length, CHARSET_INFO *from_cs, uint *errors); +uint32 well_formed_copy_nchars(CHARSET_INFO *to_cs, + char *to, uint to_length, + CHARSET_INFO *from_cs, + const char *from, uint from_length, + uint nchars, + const char **well_formed_error_pos, + const char **cannot_convert_error_pos, + const char **from_end_pos); class String { @@ -72,18 +81,20 @@ public: static void *operator new(size_t size, MEM_ROOT *mem_root) { return (void*) alloc_root(mem_root, (uint) size); } static void operator delete(void *ptr_arg,size_t size) - {} + { TRASH(ptr_arg, size); } static void operator delete(void *ptr_arg, MEM_ROOT *mem_root) - {} + { /* never called */ } ~String() { free(); } - inline void set_charset(CHARSET_INFO *charset) { str_charset= charset; } + inline void set_charset(CHARSET_INFO *charset_arg) + { str_charset= charset_arg; } inline CHARSET_INFO *charset() const { return str_charset; } inline uint32 length() const { return str_length;} inline uint32 alloced_length() const { return Alloced_length;} inline char& operator [] (uint32 i) const { return Ptr[i]; } inline void length(uint32 len) { str_length=len ; } inline bool is_empty() { return (str_length == 0); } + inline void mark_as_const() { Alloced_length= 0;} inline const char *ptr() const { return Ptr; } inline char *c_ptr() { @@ -141,6 +152,34 @@ public: bool set(longlong num, CHARSET_INFO *cs); bool set(ulonglong num, CHARSET_INFO *cs); bool set(double num,uint decimals, CHARSET_INFO *cs); + + /* + PMG 2004.11.12 + This is a method that works the same as perl's "chop". It simply + drops the last character of a string. This is useful in the case + of the federated storage handler where I'm building a unknown + number, list of values and fields to be used in a sql insert + statement to be run on the remote server, and have a comma after each. + When the list is complete, I "chop" off the trailing comma + + ex. + String stringobj; + stringobj.append("VALUES ('foo', 'fi', 'fo',"); + stringobj.chop(); + stringobj.append(")"); + + In this case, the value of string was: + + VALUES ('foo', 'fi', 'fo', + VALUES ('foo', 'fi', 'fo' + VALUES ('foo', 'fi', 'fo') + + */ + inline void chop() + { + Ptr[str_length--]= '\0'; + } + inline void free() { if (alloced) @@ -177,10 +216,6 @@ public: } } } - inline void shrink_to_length() - { - Alloced_length= str_length; - } bool is_alloced() { return alloced; } inline String& operator = (const String &s) { @@ -236,8 +271,6 @@ public: } bool fill(uint32 max_length,char fill); void strip_sp(); - inline void caseup() { my_caseup(str_charset,Ptr,str_length); } - inline void casedn() { my_casedn(str_charset,Ptr,str_length); } friend int sortcmp(const String *a,const String *b, CHARSET_INFO *cs); friend int stringcmp(const String *a,const String *b); friend String *copy_if_not_alloced(String *a,String *b,uint32 arg_length); @@ -293,6 +326,8 @@ public: Ptr[str_length]= c; str_length++; } + void qs_append(int i); + void qs_append(uint i); /* Inline (general) functions used by the protocol functions */ @@ -328,3 +363,9 @@ public: return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length); } }; + +static inline bool check_if_only_end_space(CHARSET_INFO *cs, char *str, + char *end) +{ + return str+ cs->cset->scan(cs, str, end, MY_SEQ_SPACES) == end; +} diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 0316d6a3c10..8b3028f5370 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -14,7 +13,6 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - /* drop and alter of tables */ #include "mysql_priv.h" @@ -24,6 +22,8 @@ #include <hash.h> #include <myisam.h> #include <my_dir.h> +#include "sp_head.h" +#include "sql_trigger.h" #ifdef __WIN__ #include <io.h> @@ -34,13 +34,15 @@ const char *primary_key_name="PRIMARY"; static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end); static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end); static int copy_data_between_tables(TABLE *from,TABLE *to, - List<create_field> &create, - enum enum_duplicates handle_duplicates, - bool ignore, - uint order_num, ORDER *order, - ha_rows *copied, ha_rows *deleted, + List<create_field> &create, bool ignore, + uint order_num, ORDER *order, + ha_rows *copied,ha_rows *deleted, enum enum_enable_or_disable keys_onoff); +static bool prepare_blob_field(THD *thd, create_field *sql_field); +static bool check_engine(THD *thd, const char *table_name, + enum db_type *new_engine); + /* Build the path to a file for a table (or the base path that can @@ -87,41 +89,41 @@ static uint build_table_path(char *buff, size_t bufflen, const char *db, Wait if global_read_lock (FLUSH TABLES WITH READ LOCK) is set. RETURN - 0 ok. In this case ok packet is sent to user - -1 Error (Error message given but not sent to user) + FALSE OK. In this case ok packet is sent to user + TRUE Error */ -int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, - my_bool drop_temporary) +bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, + my_bool drop_temporary) { - int error= 0; + bool error= FALSE, need_start_waiters= FALSE; DBUG_ENTER("mysql_rm_table"); /* mark for close and remove all cached entries */ - thd->mysys_var->current_mutex= &LOCK_open; - thd->mysys_var->current_cond= &COND_refresh; - VOID(pthread_mutex_lock(&LOCK_open)); - - if (!drop_temporary && global_read_lock) + if (!drop_temporary) { - if (thd->global_read_lock) + if ((error= wait_if_global_read_lock(thd, 0, 1))) { - my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE,MYF(0), - tables->real_name); - error= 1; - goto err; - } - while (global_read_lock && ! thd->killed) - { - (void) pthread_cond_wait(&COND_refresh,&LOCK_open); + my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), tables->table_name); + DBUG_RETURN(TRUE); } - + else + need_start_waiters= TRUE; } - error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, 0); - err: + /* + Acquire LOCK_open after wait_if_global_read_lock(). If we would hold + LOCK_open during wait_if_global_read_lock(), other threads could not + close their tables. This would make a pretty deadlock. + */ + thd->mysys_var->current_mutex= &LOCK_open; + thd->mysys_var->current_cond= &COND_refresh; + VOID(pthread_mutex_lock(&LOCK_open)); + + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0); + pthread_mutex_unlock(&LOCK_open); pthread_mutex_lock(&thd->mysys_var->mutex); @@ -129,10 +131,13 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, thd->mysys_var->current_cond= 0; pthread_mutex_unlock(&thd->mysys_var->mutex); + if (need_start_waiters) + start_waiting_global_read_lock(thd); + if (error) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); send_ok(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -140,11 +145,12 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, delete (drop) tables. SYNOPSIS - mysql_rm_table_part2_with_lock() - thd Thread handle - tables List of tables to delete - if_exists If 1, don't give error if one table doesn't exists - dont_log_query Don't write query to log files + mysql_rm_table_part2_with_lock() + thd Thread handle + tables List of tables to delete + if_exists If 1, don't give error if one table doesn't exists + dont_log_query Don't write query to log files. This will also not + generate warnings if the handler files doesn't exists NOTES Works like documented in mysql_rm_table(), but don't check @@ -164,8 +170,8 @@ int mysql_rm_table_part2_with_lock(THD *thd, thd->mysys_var->current_cond= &COND_refresh; VOID(pthread_mutex_lock(&LOCK_open)); - error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, - dont_log_query); + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 1, + dont_log_query); pthread_mutex_unlock(&LOCK_open); @@ -187,7 +193,9 @@ int mysql_rm_table_part2_with_lock(THD *thd, if_exists If set, don't give an error if table doesn't exists. In this case we give an warning of level 'NOTE' drop_temporary Only drop temporary tables - dont_log_query Don't log the query + drop_view Allow to delete VIEW .frm + dont_log_query Don't write query to log files. This will also not + generate warnings if the handler files doesn't exists TODO: When logging to the binary log, we should log @@ -206,7 +214,8 @@ int mysql_rm_table_part2_with_lock(THD *thd, */ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool dont_log_query) + bool drop_temporary, bool drop_view, + bool dont_log_query) { TABLE_LIST *table; char path[FN_REFLEN], *alias; @@ -215,14 +224,21 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, bool some_tables_deleted=0, tmp_table_deleted=0, foreign_key_error=0; DBUG_ENTER("mysql_rm_table_part2"); + LINT_INIT(alias); + if (!drop_temporary && lock_table_names(thd, tables)) DBUG_RETURN(1); - for (table=tables ; table ; table=table->next) + /* Don't give warnings for not found errors, as we already generate notes */ + thd->no_warnings_for_error= 1; + + for (table= tables; table; table= table->next_local) { char *db=table->db; + db_type table_type= DB_TYPE_UNKNOWN; + mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL, TRUE); - if (!close_temporary_table(thd, db, table->real_name)) + if (!close_temporary_table(thd, db, table->table_name)) { tmp_table_deleted=1; continue; // removed temporary table @@ -231,55 +247,69 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, error=0; if (!drop_temporary) { - abort_locked_tables(thd,db,table->real_name); - remove_table_from_cache(thd,db,table->real_name, - RTFC_WAIT_OTHER_THREAD_FLAG | - RTFC_CHECK_KILLED_FLAG); - drop_locked_tables(thd,db,table->real_name); + abort_locked_tables(thd, db, table->table_name); + remove_table_from_cache(thd, db, table->table_name, + RTFC_WAIT_OTHER_THREAD_FLAG | + RTFC_CHECK_KILLED_FLAG); + drop_locked_tables(thd, db, table->table_name); if (thd->killed) + { + thd->no_warnings_for_error= 0; DBUG_RETURN(-1); - alias= (lower_case_table_names == 2) ? table->alias : table->real_name; + } + alias= (lower_case_table_names == 2) ? table->alias : table->table_name; /* remove form file and isam files */ build_table_path(path, sizeof(path), db, alias, reg_ext); } if (drop_temporary || - (access(path,F_OK) && - ha_create_table_from_engine(thd, db, alias))) + (access(path,F_OK) && + ha_create_table_from_engine(thd,db,alias)) || + (!drop_view && + mysql_frm_type(thd, path, &table_type) != FRMTYPE_TABLE)) { // Table was not found on disk and table can't be created from engine if (if_exists) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), - table->real_name); + table->table_name); else error= 1; } else { char *end; - db_type table_type= get_table_type(path); + if (table_type == DB_TYPE_UNKNOWN) + mysql_frm_type(thd, path, &table_type); *(end=fn_ext(path))=0; // Remove extension for delete - error=ha_delete_table(table_type, path); - if (error == ENOENT && if_exists) - error = 0; + error= ha_delete_table(thd, table_type, path, table->table_name, + !dont_log_query); + if ((error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) && + (if_exists || table_type == DB_TYPE_UNKNOWN)) + error= 0; if (error == HA_ERR_ROW_IS_REFERENCED) { /* the table is referenced by a foreign key constraint */ foreign_key_error=1; } - if (!error || error == ENOENT) + if (!error || error == ENOENT || error == HA_ERR_NO_SUCH_TABLE) { + int new_error; /* Delete the table definition file */ strmov(end,reg_ext); - if (!(error=my_delete(path,MYF(MY_WME)))) + if (!(new_error=my_delete(path,MYF(MY_WME)))) + { some_tables_deleted=1; + new_error= Table_triggers_list::drop_all_triggers(thd, db, + table->table_name); + } + error|= new_error; } } if (error) { if (wrong_tables.length()) wrong_tables.append(','); - wrong_tables.append(String(table->real_name,system_charset_info)); + wrong_tables.append(String(table->table_name,system_charset_info)); } } thd->tmp_table_used= tmp_table_deleted; @@ -288,32 +318,27 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, { if (!foreign_key_error) my_printf_error(ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), MYF(0), - wrong_tables.c_ptr()); + wrong_tables.c_ptr()); else - my_error(ER_ROW_IS_REFERENCED, MYF(0)); + my_message(ER_ROW_IS_REFERENCED, ER(ER_ROW_IS_REFERENCED), MYF(0)); error= 1; } if (some_tables_deleted || tmp_table_deleted || !error) { query_cache_invalidate3(thd, tables, 0); - if (!dont_log_query) + if (!dont_log_query && mysql_bin_log.is_open()) { - mysql_update_log.write(thd, thd->query,thd->query_length); - if (mysql_bin_log.is_open()) - { - if (!error) - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - tmp_table_deleted && !some_tables_deleted, - FALSE); - mysql_bin_log.write(&qinfo); - } + if (!error) + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); + mysql_bin_log.write(&qinfo); } } if (!drop_temporary) - unlock_table_names(thd, tables); + unlock_table_names(thd, tables, (TABLE_LIST*) 0); + thd->no_warnings_for_error= 0; DBUG_RETURN(error); } @@ -327,7 +352,7 @@ int quick_rm_table(enum db_type base,const char *db, if (my_delete(path,MYF(0))) error=1; /* purecov: inspected */ *fn_ext(path)= 0; // Remove reg_ext - return ha_delete_table(base,path) || error; + return ha_delete_table(current_thd, base, path, table_name, 0) || error; } /* @@ -450,6 +475,171 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval, /* + Prepare a create_table instance for packing + + SYNOPSIS + prepare_create_field() + sql_field field to prepare for packing + blob_columns count for BLOBs + timestamps count for timestamps + table_flags table flags + + DESCRIPTION + This function prepares a create_field instance. + Fields such as pack_flag are valid after this call. + + RETURN VALUES + 0 ok + 1 Error +*/ + +int prepare_create_field(create_field *sql_field, + uint *blob_columns, + int *timestamps, int *timestamps_with_niladic, + uint table_flags) +{ + DBUG_ENTER("prepare_field"); + + /* + This code came from mysql_prepare_table. + Indent preserved to make patching easier + */ + DBUG_ASSERT(sql_field->charset); + + switch (sql_field->sql_type) { + case FIELD_TYPE_BLOB: + case FIELD_TYPE_MEDIUM_BLOB: + case FIELD_TYPE_TINY_BLOB: + case FIELD_TYPE_LONG_BLOB: + sql_field->pack_flag=FIELDFLAG_BLOB | + pack_length_to_packflag(sql_field->pack_length - + portable_sizeof_char_ptr); + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; + sql_field->length=8; // Unireg field length + sql_field->unireg_check=Field::BLOB_FIELD; + (*blob_columns)++; + break; + case FIELD_TYPE_GEOMETRY: +#ifdef HAVE_SPATIAL + if (!(table_flags & HA_CAN_GEOMETRY)) + { + my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED), + MYF(0), "GEOMETRY"); + DBUG_RETURN(1); + } + sql_field->pack_flag=FIELDFLAG_GEOM | + pack_length_to_packflag(sql_field->pack_length - + portable_sizeof_char_ptr); + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; + sql_field->length=8; // Unireg field length + sql_field->unireg_check=Field::BLOB_FIELD; + (*blob_columns)++; + break; +#else + my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED), MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); + DBUG_RETURN(1); +#endif /*HAVE_SPATIAL*/ + case MYSQL_TYPE_VARCHAR: +#ifndef QQ_ALL_HANDLERS_SUPPORT_VARCHAR + if (table_flags & HA_NO_VARCHAR) + { + /* convert VARCHAR to CHAR because handler is not yet up to date */ + sql_field->sql_type= MYSQL_TYPE_VAR_STRING; + sql_field->pack_length= calc_pack_length(sql_field->sql_type, + (uint) sql_field->length); + if ((sql_field->length / sql_field->charset->mbmaxlen) > + MAX_FIELD_CHARLENGTH) + { + my_printf_error(ER_TOO_BIG_FIELDLENGTH, ER(ER_TOO_BIG_FIELDLENGTH), + MYF(0), sql_field->field_name, MAX_FIELD_CHARLENGTH); + DBUG_RETURN(1); + } + } +#endif + /* fall through */ + case FIELD_TYPE_STRING: + sql_field->pack_flag=0; + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; + break; + case FIELD_TYPE_ENUM: + sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | + FIELDFLAG_INTERVAL; + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; + sql_field->unireg_check=Field::INTERVAL_FIELD; + check_duplicates_in_interval("ENUM",sql_field->field_name, + sql_field->interval, + sql_field->charset); + break; + case FIELD_TYPE_SET: + sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | + FIELDFLAG_BITFIELD; + if (sql_field->charset->state & MY_CS_BINSORT) + sql_field->pack_flag|=FIELDFLAG_BINARY; + sql_field->unireg_check=Field::BIT_FIELD; + check_duplicates_in_interval("SET",sql_field->field_name, + sql_field->interval, + sql_field->charset); + break; + case FIELD_TYPE_DATE: // Rest of string types + case FIELD_TYPE_NEWDATE: + case FIELD_TYPE_TIME: + case FIELD_TYPE_DATETIME: + case FIELD_TYPE_NULL: + sql_field->pack_flag=f_settype((uint) sql_field->sql_type); + break; + case FIELD_TYPE_BIT: + /* + We have sql_field->pack_flag already set here, see mysql_prepare_table(). + */ + break; + case FIELD_TYPE_NEWDECIMAL: + sql_field->pack_flag=(FIELDFLAG_NUMBER | + (sql_field->flags & UNSIGNED_FLAG ? 0 : + FIELDFLAG_DECIMAL) | + (sql_field->flags & ZEROFILL_FLAG ? + FIELDFLAG_ZEROFILL : 0) | + (sql_field->decimals << FIELDFLAG_DEC_SHIFT)); + break; + case FIELD_TYPE_TIMESTAMP: + /* We should replace old TIMESTAMP fields with their newer analogs */ + if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD) + { + if (!*timestamps) + { + sql_field->unireg_check= Field::TIMESTAMP_DNUN_FIELD; + (*timestamps_with_niladic)++; + } + else + sql_field->unireg_check= Field::NONE; + } + else if (sql_field->unireg_check != Field::NONE) + (*timestamps_with_niladic)++; + + (*timestamps)++; + /* fall-through */ + default: + sql_field->pack_flag=(FIELDFLAG_NUMBER | + (sql_field->flags & UNSIGNED_FLAG ? 0 : + FIELDFLAG_DECIMAL) | + (sql_field->flags & ZEROFILL_FLAG ? + FIELDFLAG_ZEROFILL : 0) | + f_settype((uint) sql_field->sql_type) | + (sql_field->decimals << FIELDFLAG_DEC_SHIFT)); + break; + } + if (!(sql_field->flags & NOT_NULL_FLAG)) + sql_field->pack_flag|= FIELDFLAG_MAYBE_NULL; + if (sql_field->flags & NO_DEFAULT_VALUE_FLAG) + sql_field->pack_flag|= FIELDFLAG_NO_DEFAULT; + DBUG_RETURN(0); +} + +/* Preparation for table creation SYNOPSIS @@ -461,20 +651,24 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval, DESCRIPTION Prepares the table and key structures for table creation. + NOTES + sets create_info->varchar if the table has a varchar + RETURN VALUES 0 ok -1 error */ -int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, - Alter_info *alter_info, - bool tmp_table, uint &db_options, - handler *file, KEY *&key_info_buffer, - uint *key_count, int select_field_count) +static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, + Alter_info *alter_info, + bool tmp_table, + uint *db_options, + handler *file, KEY **key_info_buffer, + uint *key_count, int select_field_count) { const char *key_name; create_field *sql_field,*dup_field; - uint field,null_fields,blob_columns; + uint field,null_fields,blob_columns,max_key_length; ulong record_offset= 0; KEY *key_info; KEY_PART_INFO *key_part_info; @@ -483,13 +677,18 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, int select_field_pos,auto_increment=0; List_iterator<create_field> it(alter_info->create_list); List_iterator<create_field> it2(alter_info->create_list); + uint total_uneven_bit_length= 0; DBUG_ENTER("mysql_prepare_table"); select_field_pos= alter_info->create_list.elements - select_field_count; null_fields=blob_columns=0; + create_info->varchar= 0; + max_key_length= file->max_key_length(); for (field_no=0; (sql_field=it++) ; field_no++) { + CHARSET_INFO *save_cs; + /* Initialize length from its original value (number of characters), which was set in the parser. This is necessary if we're @@ -507,42 +706,43 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (create_info->table_charset && sql_field->charset != &my_charset_bin) sql_field->charset= create_info->table_charset; - CHARSET_INFO *savecs= sql_field->charset; + save_cs= sql_field->charset; if ((sql_field->flags & BINCMP_FLAG) && !(sql_field->charset= get_charset_by_csname(sql_field->charset->csname, MY_CS_BINSORT,MYF(0)))) { char tmp[64]; - strmake(strmake(tmp, savecs->csname, sizeof(tmp)-4), "_bin", 4); + strmake(strmake(tmp, save_cs->csname, sizeof(tmp)-4), + STRING_WITH_LEN("_bin")); my_error(ER_UNKNOWN_COLLATION, MYF(0), tmp); DBUG_RETURN(-1); } /* - Convert the default value character + Convert the default value from client character set into the column character set if necessary. */ if (sql_field->def && - savecs != sql_field->def->collation.collation && + save_cs != sql_field->def->collation.collation && (sql_field->sql_type == FIELD_TYPE_VAR_STRING || sql_field->sql_type == FIELD_TYPE_STRING || sql_field->sql_type == FIELD_TYPE_SET || sql_field->sql_type == FIELD_TYPE_ENUM)) { - Item_arena backup_arena; - bool need_to_change_arena= - !thd->current_arena->is_conventional_execution(); + Query_arena backup_arena; + bool need_to_change_arena= !thd->stmt_arena->is_conventional(); if (need_to_change_arena) { - /* Assert that we don't do that at every PS execute */ - DBUG_ASSERT(thd->current_arena->is_first_stmt_execute()); - thd->set_n_backup_item_arena(thd->current_arena, &backup_arena); + /* Asser that we don't do that at every PS execute */ + DBUG_ASSERT(thd->stmt_arena->is_first_stmt_execute() || + thd->stmt_arena->is_first_sp_execute()); + thd->set_n_backup_active_arena(thd->stmt_arena, &backup_arena); } - sql_field->def= sql_field->def->safe_charset_converter(savecs); + sql_field->def= sql_field->def->safe_charset_converter(save_cs); if (need_to_change_arena) - thd->restore_backup_item_arena(thd->current_arena, &backup_arena); + thd->restore_active_arena(thd->stmt_arena, &backup_arena); if (sql_field->def == NULL) { @@ -570,34 +770,33 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, Create the typelib in prepared statement memory if we're executing one. */ - MEM_ROOT *stmt_root= thd->current_arena->mem_root; + MEM_ROOT *stmt_root= thd->stmt_arena->mem_root; interval= sql_field->interval= typelib(stmt_root, sql_field->interval_list); - List_iterator<String> it(sql_field->interval_list); + List_iterator<String> int_it(sql_field->interval_list); String conv, *tmp; char comma_buf[2]; int comma_length= cs->cset->wc_mb(cs, ',', (uchar*) comma_buf, (uchar*) comma_buf + sizeof(comma_buf)); DBUG_ASSERT(comma_length > 0); - for (uint i= 0; (tmp= it++); i++) + for (uint i= 0; (tmp= int_it++); i++) { + uint lengthsp; if (String::needs_conversion(tmp->length(), tmp->charset(), cs, &dummy)) { uint cnv_errs; conv.copy(tmp->ptr(), tmp->length(), tmp->charset(), cs, &cnv_errs); - char *buf= (char*) alloc_root(stmt_root, conv.length()+1); - memcpy(buf, conv.ptr(), conv.length()); - buf[conv.length()]= '\0'; - interval->type_names[i]= buf; + interval->type_names[i]= strmake_root(stmt_root, conv.ptr(), + conv.length()); interval->type_lengths[i]= conv.length(); } // Strip trailing spaces. - uint lengthsp= cs->cset->lengthsp(cs, interval->type_names[i], - interval->type_lengths[i]); + lengthsp= cs->cset->lengthsp(cs, interval->type_names[i], + interval->type_lengths[i]); interval->type_lengths[i]= lengthsp; ((uchar *)interval->type_names[i])[lengthsp]= '\0'; if (sql_field->sql_type == FIELD_TYPE_SET) @@ -606,9 +805,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, interval->type_lengths[i], comma_buf, comma_length, NULL, 0)) { - my_printf_error(ER_UNKNOWN_ERROR, - "Illegal %s '%-.64s' value found during parsing", - MYF(0), "set", tmp->ptr()); + my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "set", tmp->ptr()); DBUG_RETURN(-1); } } @@ -618,6 +815,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (sql_field->sql_type == FIELD_TYPE_SET) { + uint32 field_length; if (sql_field->def != NULL) { char *not_used; @@ -648,11 +846,12 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, DBUG_RETURN(-1); } } - calculate_interval_lengths(cs, interval, &dummy, &sql_field->length); - sql_field->length+= (interval->count - 1); + calculate_interval_lengths(cs, interval, &dummy, &field_length); + sql_field->length= field_length + (interval->count - 1); } else /* FIELD_TYPE_ENUM */ { + uint32 field_length; DBUG_ASSERT(sql_field->sql_type == FIELD_TYPE_ENUM); if (sql_field->def != NULL) { @@ -677,20 +876,25 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } } } - calculate_interval_lengths(cs, interval, &sql_field->length, &dummy); + calculate_interval_lengths(cs, interval, &field_length, &dummy); + sql_field->length= field_length; } set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1); } + if (sql_field->sql_type == FIELD_TYPE_BIT) + { + sql_field->pack_flag= FIELDFLAG_NUMBER; + if (file->table_flags() & HA_CAN_BIT_FIELD) + total_uneven_bit_length+= sql_field->length & 7; + else + sql_field->pack_flag|= FIELDFLAG_TREAT_BIT_AS_CHAR; + } + sql_field->create_length_to_internal_length(); + if (prepare_blob_field(thd, sql_field)) + DBUG_RETURN(-1); - /* Don't pack keys in old tables if the user has requested this */ - if ((sql_field->flags & BLOB_FLAG) || - sql_field->sql_type == FIELD_TYPE_VAR_STRING && - create_info->row_type != ROW_TYPE_FIXED) - { - db_options|=HA_OPTION_PACK_RECORD; - } if (!(sql_field->flags & NOT_NULL_FLAG)) null_fields++; @@ -713,7 +917,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, */ if (field_no < select_field_pos || dup_no >= select_field_pos) { - my_error(ER_DUP_FIELDNAME,MYF(0),sql_field->field_name); + my_error(ER_DUP_FIELDNAME, MYF(0), sql_field->field_name); DBUG_RETURN(-1); } else @@ -726,6 +930,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, create_info->default_table_charset); sql_field->length= dup_field->char_length; sql_field->pack_length= dup_field->pack_length; + sql_field->key_length= dup_field->key_length; sql_field->create_length_to_internal_length(); sql_field->decimals= dup_field->decimals; sql_field->unireg_check= dup_field->unireg_check; @@ -744,115 +949,29 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } } } + /* Don't pack rows in old tables if the user has requested this */ + if ((sql_field->flags & BLOB_FLAG) || + sql_field->sql_type == MYSQL_TYPE_VARCHAR && + create_info->row_type != ROW_TYPE_FIXED) + (*db_options)|= HA_OPTION_PACK_RECORD; it2.rewind(); } /* record_offset will be increased with 'length-of-null-bits' later */ record_offset= 0; + null_fields+= total_uneven_bit_length; it.rewind(); while ((sql_field=it++)) { - DBUG_ASSERT(sql_field->charset); - - switch (sql_field->sql_type) { - case FIELD_TYPE_BLOB: - case FIELD_TYPE_MEDIUM_BLOB: - case FIELD_TYPE_TINY_BLOB: - case FIELD_TYPE_LONG_BLOB: - sql_field->pack_flag=FIELDFLAG_BLOB | - pack_length_to_packflag(sql_field->pack_length - - portable_sizeof_char_ptr); - if (sql_field->charset->state & MY_CS_BINSORT) - sql_field->pack_flag|=FIELDFLAG_BINARY; - sql_field->length=8; // Unireg field length - sql_field->unireg_check=Field::BLOB_FIELD; - blob_columns++; - break; - case FIELD_TYPE_GEOMETRY: -#ifdef HAVE_SPATIAL - if (!(file->table_flags() & HA_CAN_GEOMETRY)) - { - my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED), - MYF(0), "GEOMETRY"); - DBUG_RETURN(-1); - } - sql_field->pack_flag=FIELDFLAG_GEOM | - pack_length_to_packflag(sql_field->pack_length - - portable_sizeof_char_ptr); - if (sql_field->charset->state & MY_CS_BINSORT) - sql_field->pack_flag|=FIELDFLAG_BINARY; - sql_field->length=8; // Unireg field length - sql_field->unireg_check=Field::BLOB_FIELD; - blob_columns++; - break; -#else - my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED), MYF(0), - sym_group_geom.name, sym_group_geom.needed_define); - DBUG_RETURN(-1); -#endif /*HAVE_SPATIAL*/ - case FIELD_TYPE_VAR_STRING: - case FIELD_TYPE_STRING: - sql_field->pack_flag=0; - if (sql_field->charset->state & MY_CS_BINSORT) - sql_field->pack_flag|=FIELDFLAG_BINARY; - break; - case FIELD_TYPE_ENUM: - sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | - FIELDFLAG_INTERVAL; - if (sql_field->charset->state & MY_CS_BINSORT) - sql_field->pack_flag|=FIELDFLAG_BINARY; - sql_field->unireg_check=Field::INTERVAL_FIELD; - check_duplicates_in_interval("ENUM",sql_field->field_name, - sql_field->interval, - sql_field->charset); - break; - case FIELD_TYPE_SET: - sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | - FIELDFLAG_BITFIELD; - if (sql_field->charset->state & MY_CS_BINSORT) - sql_field->pack_flag|=FIELDFLAG_BINARY; - sql_field->unireg_check=Field::BIT_FIELD; - check_duplicates_in_interval("SET",sql_field->field_name, - sql_field->interval, - sql_field->charset); - break; - case FIELD_TYPE_DATE: // Rest of string types - case FIELD_TYPE_NEWDATE: - case FIELD_TYPE_TIME: - case FIELD_TYPE_DATETIME: - case FIELD_TYPE_NULL: - sql_field->pack_flag=f_settype((uint) sql_field->sql_type); - break; - case FIELD_TYPE_TIMESTAMP: - /* We should replace old TIMESTAMP fields with their newer analogs */ - if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD) - { - if (!timestamps) - { - sql_field->unireg_check= Field::TIMESTAMP_DNUN_FIELD; - timestamps_with_niladic++; - } - else - sql_field->unireg_check= Field::NONE; - } - else if (sql_field->unireg_check != Field::NONE) - timestamps_with_niladic++; + DBUG_ASSERT(sql_field->charset != 0); - timestamps++; - /* fall-through */ - default: - sql_field->pack_flag=(FIELDFLAG_NUMBER | - (sql_field->flags & UNSIGNED_FLAG ? 0 : - FIELDFLAG_DECIMAL) | - (sql_field->flags & ZEROFILL_FLAG ? - FIELDFLAG_ZEROFILL : 0) | - f_settype((uint) sql_field->sql_type) | - (sql_field->decimals << FIELDFLAG_DEC_SHIFT)); - break; - } - if (!(sql_field->flags & NOT_NULL_FLAG)) - sql_field->pack_flag|=FIELDFLAG_MAYBE_NULL; + if (prepare_create_field(sql_field, &blob_columns, + ×tamps, ×tamps_with_niladic, + file->table_flags())) + DBUG_RETURN(-1); + if (sql_field->sql_type == MYSQL_TYPE_VARCHAR) + create_info->varchar= 1; sql_field->offset= record_offset; if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER) auto_increment++; @@ -860,24 +979,27 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } if (timestamps_with_niladic > 1) { - my_error(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS,MYF(0)); + my_message(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS, + ER(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS), MYF(0)); DBUG_RETURN(-1); } if (auto_increment > 1) { - my_error(ER_WRONG_AUTO_KEY,MYF(0)); + my_message(ER_WRONG_AUTO_KEY, ER(ER_WRONG_AUTO_KEY), MYF(0)); DBUG_RETURN(-1); } if (auto_increment && (file->table_flags() & HA_NO_AUTO_INCREMENT)) { - my_error(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT,MYF(0)); + my_message(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT, + ER(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT), MYF(0)); DBUG_RETURN(-1); } if (blob_columns && (file->table_flags() & HA_NO_BLOBS)) { - my_error(ER_TABLE_CANT_HANDLE_BLOB,MYF(0)); + my_message(ER_TABLE_CANT_HANDLE_BLOB, ER(ER_TABLE_CANT_HANDLE_BLOB), + MYF(0)); DBUG_RETURN(-1); } @@ -904,9 +1026,9 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (fk_key->ref_columns.elements && fk_key->ref_columns.elements != fk_key->columns.elements) { - my_error(ER_WRONG_FK_DEF, MYF(0), fk_key->name ? fk_key->name : - "foreign key without name", - ER(ER_KEY_REF_DO_NOT_MATCH_TABLE_REF)); + my_error(ER_WRONG_FK_DEF, MYF(0), + (fk_key->name ? fk_key->name : "foreign key without name"), + ER(ER_KEY_REF_DO_NOT_MATCH_TABLE_REF)); DBUG_RETURN(-1); } continue; @@ -971,9 +1093,9 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, DBUG_RETURN(-1); } - key_info_buffer=key_info=(KEY*) sql_calloc(sizeof(KEY)* *key_count); + (*key_info_buffer) = key_info= (KEY*) sql_calloc(sizeof(KEY)* *key_count); key_part_info=(KEY_PART_INFO*) sql_calloc(sizeof(KEY_PART_INFO)*key_parts); - if (!key_info_buffer || ! key_part_info) + if (!*key_info_buffer || ! key_part_info) DBUG_RETURN(-1); // Out of memory key_iterator.rewind(); @@ -1005,8 +1127,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, key_info->flags= HA_SPATIAL; break; #else - my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED),MYF(0), - sym_group_geom.name, sym_group_geom.needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); DBUG_RETURN(-1); #endif case Key::FOREIGN_KEY: @@ -1028,7 +1150,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { if (!(file->table_flags() & HA_CAN_FULLTEXT)) { - my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0)); + my_message(ER_TABLE_CANT_HANDLE_FT, ER(ER_TABLE_CANT_HANDLE_FT), + MYF(0)); DBUG_RETURN(-1); } } @@ -1043,10 +1166,15 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, /* TODO: Add proper checks if handler supports key_type and algorithm */ if (key_info->flags & HA_SPATIAL) { + if (!(file->table_flags() & HA_CAN_RTREEKEYS)) + { + my_message(ER_TABLE_CANT_HANDLE_SPKEYS, ER(ER_TABLE_CANT_HANDLE_SPKEYS), + MYF(0)); + DBUG_RETURN(-1); + } if (key_info->key_parts != 1) { - my_printf_error(ER_WRONG_ARGUMENTS, - ER(ER_WRONG_ARGUMENTS),MYF(0),"SPATIAL INDEX"); + my_error(ER_WRONG_ARGUMENTS, MYF(0), "SPATIAL INDEX"); DBUG_RETURN(-1); } } @@ -1055,17 +1183,15 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, #ifdef HAVE_RTREE_KEYS if ((key_info->key_parts & 1) == 1) { - my_printf_error(ER_WRONG_ARGUMENTS, - ER(ER_WRONG_ARGUMENTS),MYF(0),"RTREE INDEX"); + my_error(ER_WRONG_ARGUMENTS, MYF(0), "RTREE INDEX"); DBUG_RETURN(-1); } /* TODO: To be deleted */ - my_printf_error(ER_NOT_SUPPORTED_YET, ER(ER_NOT_SUPPORTED_YET), - MYF(0), "RTREE INDEX"); + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "RTREE INDEX"); DBUG_RETURN(-1); #else - my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED),MYF(0), - sym_group_rtree.name, sym_group_rtree.needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_rtree.name, sym_group_rtree.needed_define); DBUG_RETURN(-1); #endif } @@ -1074,6 +1200,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, CHARSET_INFO *ft_key_charset=0; // for FULLTEXT for (uint column_nr=0 ; (column=cols++) ; column_nr++) { + uint length; key_part_spec *dup_column; it.rewind(); @@ -1085,9 +1212,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, field++; if (!sql_field) { - my_printf_error(ER_KEY_COLUMN_DOES_NOT_EXITS, - ER(ER_KEY_COLUMN_DOES_NOT_EXITS),MYF(0), - column->field_name); + my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name); DBUG_RETURN(-1); } while ((dup_column= cols2++) != column) @@ -1104,15 +1229,14 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, cols2.rewind(); if (key->type == Key::FULLTEXT) { - if ((sql_field->sql_type != FIELD_TYPE_STRING && - sql_field->sql_type != FIELD_TYPE_VAR_STRING && + if ((sql_field->sql_type != MYSQL_TYPE_STRING && + sql_field->sql_type != MYSQL_TYPE_VARCHAR && !f_is_blob(sql_field->pack_flag)) || sql_field->charset == &my_charset_bin || sql_field->charset->mbminlen > 1 || // ucs2 doesn't work yet (ft_key_charset && sql_field->charset != ft_key_charset)) { - my_printf_error(ER_BAD_FT_COLUMN,ER(ER_BAD_FT_COLUMN),MYF(0), - column->field_name); + my_error(ER_BAD_FT_COLUMN, MYF(0), column->field_name); DBUG_RETURN(-1); } ft_key_charset=sql_field->charset; @@ -1129,32 +1253,33 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { column->length*= sql_field->charset->mbmaxlen; - if (f_is_blob(sql_field->pack_flag)) + if (f_is_blob(sql_field->pack_flag) || + (f_is_geom(sql_field->pack_flag) && key->type != Key::SPATIAL)) { if (!(file->table_flags() & HA_CAN_INDEX_BLOBS)) { - my_printf_error(ER_BLOB_USED_AS_KEY,ER(ER_BLOB_USED_AS_KEY),MYF(0), - column->field_name); + my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name); DBUG_RETURN(-1); } + if (f_is_geom(sql_field->pack_flag) && sql_field->geom_type == + Field::GEOM_POINT) + column->length= 21; if (!column->length) { - my_printf_error(ER_BLOB_KEY_WITHOUT_LENGTH, - ER(ER_BLOB_KEY_WITHOUT_LENGTH),MYF(0), - column->field_name); + my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name); DBUG_RETURN(-1); } } #ifdef HAVE_SPATIAL - if (key->type == Key::SPATIAL) + if (key->type == Key::SPATIAL) { - if (!column->length ) + if (!column->length) { /* - BAR: 4 is: (Xmin,Xmax,Ymin,Ymax), this is for 2D case - Lately we'll extend this code to support more dimensions + 4 is: (Xmin,Xmax,Ymin,Ymax), this is for 2D case + Lately we'll extend this code to support more dimensions */ - column->length=4*sizeof(double); + column->length= 4*sizeof(double); } } #endif @@ -1171,13 +1296,13 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, key_info->flags|= HA_NULL_PART_KEY; if (!(file->table_flags() & HA_NULL_IN_KEY)) { - my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), - MYF(0),column->field_name); + my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name); DBUG_RETURN(-1); } if (key->type == Key::SPATIAL) { - my_error(ER_SPATIAL_CANT_HAVE_NULL, MYF(0)); + my_message(ER_SPATIAL_CANT_HAVE_NULL, + ER(ER_SPATIAL_CANT_HAVE_NULL), MYF(0)); DBUG_RETURN(-1); } } @@ -1191,15 +1316,16 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, key_part_info->fieldnr= field; key_part_info->offset= (uint16) sql_field->offset; key_part_info->key_type=sql_field->pack_flag; - uint length=sql_field->pack_length; + length= sql_field->key_length; + if (column->length) { if (f_is_blob(sql_field->pack_flag)) { - if ((length=column->length) > file->max_key_length() || + if ((length=column->length) > max_key_length || length > file->max_key_part_length()) { - length=min(file->max_key_length(), file->max_key_part_length()); + length=min(max_key_length, file->max_key_part_length()); if (key->type == Key::MULTIPLE) { /* not a critical problem */ @@ -1218,12 +1344,13 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } else if (!f_is_geom(sql_field->pack_flag) && (column->length > length || + !Field::type_can_have_key_part (sql_field->sql_type) || ((f_is_packed(sql_field->pack_flag) || ((file->table_flags() & HA_NO_PREFIX_CHAR_KEYS) && (key_info->flags & HA_NOSAME))) && column->length != length))) { - my_error(ER_WRONG_SUB_KEY,MYF(0)); + my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0)); DBUG_RETURN(-1); } else if (!(file->table_flags() & HA_NO_PREFIX_CHAR_KEYS)) @@ -1231,13 +1358,14 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } else if (length == 0) { - my_printf_error(ER_WRONG_KEY_COLUMN, ER(ER_WRONG_KEY_COLUMN), MYF(0), - column->field_name); + my_error(ER_WRONG_KEY_COLUMN, MYF(0), column->field_name); DBUG_RETURN(-1); } - if (length > file->max_key_part_length()) + if (length > file->max_key_part_length() && key->type != Key::FULLTEXT) { - length=file->max_key_part_length(); + length= file->max_key_part_length(); + /* Align key length to multibyte char boundary */ + length-= length % sql_field->charset->mbmaxlen; if (key->type == Key::MULTIPLE) { /* not a critical problem */ @@ -1255,14 +1383,15 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } key_part_info->length=(uint16) length; /* Use packed keys for long strings on the first column */ - if (!(db_options & HA_OPTION_NO_PACK_KEYS) && + if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) && (length >= KEY_DEFAULT_PACK_LENGTH && - (sql_field->sql_type == FIELD_TYPE_STRING || - sql_field->sql_type == FIELD_TYPE_VAR_STRING || + (sql_field->sql_type == MYSQL_TYPE_STRING || + sql_field->sql_type == MYSQL_TYPE_VARCHAR || sql_field->pack_flag & FIELDFLAG_BLOB))) { - if (column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB)) - key_info->flags|= HA_BINARY_PACK_KEY; + if (column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB) || + sql_field->sql_type == MYSQL_TYPE_VARCHAR) + key_info->flags|= HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY; else key_info->flags|= HA_PACK_KEY; } @@ -1276,7 +1405,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { if (primary_key) { - my_error(ER_MULTIPLE_PRI_KEY,MYF(0)); + my_message(ER_MULTIPLE_PRI_KEY, ER(ER_MULTIPLE_PRI_KEY), + MYF(0)); DBUG_RETURN(-1); } key_name=primary_key_name; @@ -1284,10 +1414,10 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } else if (!(key_name = key->name)) key_name=make_unique_key_name(sql_field->field_name, - key_info_buffer,key_info); - if (check_if_keyname_exists(key_name,key_info_buffer,key_info)) + *key_info_buffer, key_info); + if (check_if_keyname_exists(key_name, *key_info_buffer, key_info)) { - my_error(ER_DUP_KEYNAME,MYF(0),key_name); + my_error(ER_DUP_KEYNAME, MYF(0), key_name); DBUG_RETURN(-1); } key_info->name=(char*) key_name; @@ -1301,7 +1431,6 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (!(key_info->flags & HA_NULL_PART_KEY)) unique_key=1; key_info->key_length=(uint16) key_length; - uint max_key_length= file->max_key_length(); if (key_length > max_key_length && key->type != Key::FULLTEXT) { my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length); @@ -1312,16 +1441,16 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (!unique_key && !primary_key && (file->table_flags() & HA_REQUIRE_PRIMARY_KEY)) { - my_error(ER_REQUIRES_PRIMARY_KEY,MYF(0)); + my_message(ER_REQUIRES_PRIMARY_KEY, ER(ER_REQUIRES_PRIMARY_KEY), MYF(0)); DBUG_RETURN(-1); } if (auto_increment > 0) { - my_error(ER_WRONG_AUTO_KEY,MYF(0)); + my_message(ER_WRONG_AUTO_KEY, ER(ER_WRONG_AUTO_KEY), MYF(0)); DBUG_RETURN(-1); } /* Sort keys in optimized order */ - qsort((gptr) key_info_buffer, *key_count, sizeof(KEY), + qsort((gptr) *key_info_buffer, *key_count, sizeof(KEY), (qsort_cmp) sort_keys); create_info->null_bits= null_fields; @@ -1330,6 +1459,109 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, /* + Extend long VARCHAR fields to blob & prepare field if it's a blob + + SYNOPSIS + prepare_blob_field() + sql_field Field to check + + RETURN + 0 ok + 1 Error (sql_field can't be converted to blob) + In this case the error is given +*/ + +static bool prepare_blob_field(THD *thd, create_field *sql_field) +{ + DBUG_ENTER("prepare_blob_field"); + + if (sql_field->length > MAX_FIELD_VARCHARLENGTH && + !(sql_field->flags & BLOB_FLAG)) + { + /* Convert long VARCHAR columns to TEXT or BLOB */ + char warn_buff[MYSQL_ERRMSG_SIZE]; + + if (sql_field->def || (thd->variables.sql_mode & (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))) + { + my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name, + MAX_FIELD_VARCHARLENGTH / sql_field->charset->mbmaxlen); + DBUG_RETURN(1); + } + sql_field->sql_type= FIELD_TYPE_BLOB; + sql_field->flags|= BLOB_FLAG; + sprintf(warn_buff, ER(ER_AUTO_CONVERT), sql_field->field_name, + (sql_field->charset == &my_charset_bin) ? "VARBINARY" : "VARCHAR", + (sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT"); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT, + warn_buff); + } + + if ((sql_field->flags & BLOB_FLAG) && sql_field->length) + { + if (sql_field->sql_type == FIELD_TYPE_BLOB) + { + /* The user has given a length to the blob column */ + sql_field->sql_type= get_blob_type_from_length(sql_field->length); + sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0); + } + sql_field->length= 0; + } + DBUG_RETURN(0); +} + + +/* + Preparation of create_field for SP function return values. + Based on code used in the inner loop of mysql_prepare_table() above + + SYNOPSIS + sp_prepare_create_field() + thd Thread object + sql_field Field to prepare + + DESCRIPTION + Prepares the field structures for field creation. + +*/ + +void sp_prepare_create_field(THD *thd, create_field *sql_field) +{ + if (sql_field->sql_type == FIELD_TYPE_SET || + sql_field->sql_type == FIELD_TYPE_ENUM) + { + uint32 field_length, dummy; + if (sql_field->sql_type == FIELD_TYPE_SET) + { + calculate_interval_lengths(sql_field->charset, + sql_field->interval, &dummy, + &field_length); + sql_field->length= field_length + + (sql_field->interval->count - 1); + } + else /* FIELD_TYPE_ENUM */ + { + calculate_interval_lengths(sql_field->charset, + sql_field->interval, + &field_length, &dummy); + sql_field->length= field_length; + } + set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1); + } + + if (sql_field->sql_type == FIELD_TYPE_BIT) + { + sql_field->pack_flag= FIELDFLAG_NUMBER | + FIELDFLAG_TREAT_BIT_AS_CHAR; + } + sql_field->create_length_to_internal_length(); + DBUG_ASSERT(sql_field->def == 0); + /* Can't go wrong as sql_field->def is not defined */ + (void) prepare_blob_field(thd, sql_field); +} + + +/* Create a table SYNOPSIS @@ -1339,7 +1571,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, table_name Table name create_info [in/out] Create information (like MAX_ROWS) alter_info [in/out] List of columns and indexes to create - tmp_table Set to 1 if this is an internal temporary table + internal_tmp_table Set to 1 if this is an internal temporary table (From ALTER TABLE) DESCRIPTION @@ -1356,46 +1588,38 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, prepared statements/stored routines. RETURN VALUES - 0 ok - -1 error + FALSE OK + TRUE error */ -int mysql_create_table(THD *thd,const char *db, const char *table_name, - HA_CREATE_INFO *create_info, - Alter_info *alter_info, - bool tmp_table, - uint select_field_count) +bool mysql_create_table(THD *thd,const char *db, const char *table_name, + HA_CREATE_INFO *create_info, + Alter_info *alter_info, + bool internal_tmp_table, + uint select_field_count) { char path[FN_REFLEN]; const char *alias; - int error= -1; uint db_options, key_count; KEY *key_info_buffer; handler *file; - enum db_type new_db_type; + bool error= TRUE; DBUG_ENTER("mysql_create_table"); /* Check for duplicate fields and check type of table to create */ if (!alter_info->create_list.elements) { - my_error(ER_TABLE_MUST_HAVE_COLUMNS,MYF(0)); - DBUG_RETURN(-1); - } - if ((new_db_type= ha_checktype(create_info->db_type)) != - create_info->db_type) - { - create_info->db_type= new_db_type; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_USING_OTHER_HANDLER, - ER(ER_WARN_USING_OTHER_HANDLER), - ha_get_storage_engine(new_db_type), - table_name); + my_message(ER_TABLE_MUST_HAVE_COLUMNS, ER(ER_TABLE_MUST_HAVE_COLUMNS), + MYF(0)); + DBUG_RETURN(TRUE); } - db_options=create_info->table_options; + if (check_engine(thd, table_name, &create_info->db_type)) + DBUG_RETURN(TRUE); + db_options= create_info->table_options; if (create_info->row_type == ROW_TYPE_DYNAMIC) db_options|=HA_OPTION_PACK_RECORD; alias= table_case_name(create_info, table_name); - file=get_new_handler((TABLE*) 0, create_info->db_type); + file= get_new_handler((TABLE*) 0, thd->mem_root, create_info->db_type); #ifdef NOT_USED /* @@ -1408,8 +1632,8 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) && (file->table_flags() & HA_NO_TEMP_TABLES)) { - my_error(ER_ILLEGAL_HA,MYF(0),table_name); - DBUG_RETURN(-1); + my_error(ER_ILLEGAL_HA, MYF(0), table_name); + DBUG_RETURN(TRUE); } #endif @@ -1421,18 +1645,17 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if (!create_info->default_table_charset) { HA_CREATE_INFO db_info; - char path[FN_REFLEN]; - /* Abuse build_table_path() to build the path to the db.opt file */ - build_table_path(path, sizeof(path), db, MY_DB_OPT_FILE, ""); - load_db_opt(thd, path, &db_info); + + load_db_opt_by_name(thd, db, &db_info); + create_info->default_table_charset= db_info.default_table_charset; } - if (mysql_prepare_table(thd, create_info, alter_info, tmp_table, - db_options, file, - key_info_buffer, &key_count, + if (mysql_prepare_table(thd, create_info, alter_info, internal_tmp_table, + &db_options, file, + &key_info_buffer, &key_count, select_field_count)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); /* Check if table exists */ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) @@ -1444,8 +1667,23 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, my_casedn_str(files_charset_info, path); create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE; } - else + else + { + #ifdef FN_DEVCHAR + /* check if the table name contains FN_DEVCHAR when defined */ + const char *start= alias; + while (*start != '\0') + { + if (*start == FN_DEVCHAR) + { + my_error(ER_WRONG_TABLE_NAME, MYF(0), alias); + DBUG_RETURN(TRUE); + } + start++; + } + #endif build_table_path(path, sizeof(path), db, alias, reg_ext); + } /* Check if table already exists */ if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) @@ -1457,15 +1695,13 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR), alias); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - if (wait_if_global_read_lock(thd, 0, 1)) - DBUG_RETURN(error); VOID(pthread_mutex_lock(&LOCK_open)); - if (!tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) + if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) { if (!access(path,F_OK)) { @@ -1521,35 +1757,26 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, } thd->tmp_table_used= 1; } - if (!tmp_table) + if (!internal_tmp_table && mysql_bin_log.is_open()) { - // Must be written before unlock - mysql_update_log.write(thd,thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - test(create_info->options & - HA_LEX_CREATE_TMP_TABLE), - FALSE); - mysql_bin_log.write(&qinfo); - } + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); + mysql_bin_log.write(&qinfo); } - error=0; - goto end; + error= FALSE; + +end: + VOID(pthread_mutex_unlock(&LOCK_open)); + thd->proc_info="After create"; + DBUG_RETURN(error); warn: - error= 0; + error= FALSE; push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR), alias); create_info->table_existed= 1; // Mark that table existed - -end: - VOID(pthread_mutex_unlock(&LOCK_open)); - start_waiting_global_read_lock(thd); - thd->proc_info="After create"; - DBUG_RETURN(error); + goto end; } /* @@ -1592,83 +1819,6 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end) /**************************************************************************** -** Create table from a list of fields and items -****************************************************************************/ - -TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, - const char *db, const char *name, - Alter_info *alter_info, - List<Item> *items, - MYSQL_LOCK **lock) -{ - TABLE tmp_table; // Used during 'create_field()' - TABLE *table= 0; - tmp_table.table_name=0; - uint select_field_count= items->elements; - DBUG_ENTER("create_table_from_items"); - - /* Add selected items to field list */ - List_iterator_fast<Item> it(*items); - Item *item; - Field *tmp_field; - tmp_table.db_create_options=0; - tmp_table.null_row=tmp_table.maybe_null=0; - tmp_table.blob_ptr_size=portable_sizeof_char_ptr; - tmp_table.db_low_byte_first= test(create_info->db_type == DB_TYPE_MYISAM || - create_info->db_type == DB_TYPE_HEAP); - - while ((item=it++)) - { - create_field *cr_field; - Field *field; - if (item->type() == Item::FUNC_ITEM) - field=item->tmp_table_field(&tmp_table); - else - field=create_tmp_field(thd, &tmp_table, item, item->type(), - (Item ***) 0, &tmp_field, 0, 0, 0, 0); - if (!field || - !(cr_field=new create_field(field,(item->type() == Item::FIELD_ITEM ? - ((Item_field *)item)->field : - (Field*) 0)))) - DBUG_RETURN(0); - alter_info->create_list.push_back(cr_field); - } - /* create and lock table */ - /* QQ: create and open should be done atomic ! */ - /* - We don't log the statement, it will be logged later. - If this is a HEAP table, the automatic DELETE FROM which is written to the - binlog when a HEAP table is opened for the first time since startup, must - not be written: 1) it would be wrong (imagine we're in CREATE SELECT: we - don't want to delete from it) 2) it would be written before the CREATE - TABLE, which is a wrong order. So we keep binary logging disabled when we - open_table(). - */ - tmp_disable_binlog(thd); - if (!mysql_create_table(thd, db, name, create_info, alter_info, - 0, select_field_count)) - { - if (!(table=open_table(thd,db,name,name,(bool*) 0))) - quick_rm_table(create_info->db_type,db,table_case_name(create_info,name)); - } - reenable_binlog(thd); - if (!table) - DBUG_RETURN(0); - table->reginfo.lock_type=TL_WRITE; - if (! ((*lock)= mysql_lock_tables(thd, &table, 1, MYSQL_LOCK_IGNORE_FLUSH))) - { - VOID(pthread_mutex_lock(&LOCK_open)); - hash_delete(&open_cache,(byte*) table); - VOID(pthread_mutex_unlock(&LOCK_open)); - quick_rm_table(create_info->db_type,db,table_case_name(create_info, name)); - DBUG_RETURN(0); - } - table->file->extra(HA_EXTRA_WRITE_CACHE); - DBUG_RETURN(table); -} - - -/**************************************************************************** ** Alter a table definition ****************************************************************************/ @@ -1679,10 +1829,12 @@ mysql_rename_table(enum db_type base, const char *new_db, const char *new_name) { + THD *thd= current_thd; char from[FN_REFLEN], to[FN_REFLEN], lc_from[FN_REFLEN], lc_to[FN_REFLEN]; char *from_base= from, *to_base= to; char tmp_name[NAME_LEN+1]; - handler *file=get_new_handler((TABLE*) 0, base); + handler *file= (base == DB_TYPE_UNKNOWN ? 0 : + get_new_handler((TABLE*) 0, thd->mem_root, base)); int error=0; DBUG_ENTER("mysql_rename_table"); @@ -1694,7 +1846,8 @@ mysql_rename_table(enum db_type base, file system) and the storage is not HA_FILE_BASED, we need to provide a lowercase file name, but we leave the .frm in mixed case. */ - if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED)) + if (lower_case_table_names == 2 && file && + !(file->table_flags() & HA_FILE_BASED)) { strmov(tmp_name, old_name); my_casedn_str(files_charset_info, tmp_name); @@ -1707,13 +1860,14 @@ mysql_rename_table(enum db_type base, to_base= lc_to; } - if (!(error=file->rename_table(from_base, to_base))) + if (!file || !(error=file->rename_table(from_base, to_base))) { if (rename_file_ext(from,to,reg_ext)) { error=my_errno; /* Restore old file name */ - file->rename_table(to_base, from_base); + if (file) + file->rename_table(to_base, from_base); } } delete file; @@ -1746,7 +1900,7 @@ mysql_rename_table(enum db_type base, static void wait_while_table_is_used(THD *thd,TABLE *table, enum ha_extra_function function) { - DBUG_PRINT("enter",("table: %s", table->real_name)); + DBUG_PRINT("enter",("table: %s", table->s->table_name)); DBUG_ENTER("wait_while_table_is_used"); safe_mutex_assert_owner(&LOCK_open); @@ -1755,8 +1909,8 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, mysql_lock_abort(thd, table); // end threads waiting on lock /* Wait until all there are no other threads that has this table open */ - remove_table_from_cache(thd,table->table_cache_key, - table->real_name, RTFC_WAIT_OTHER_THREAD_FLAG); + remove_table_from_cache(thd, table->s->db, + table->s->table_name, RTFC_WAIT_OTHER_THREAD_FLAG); DBUG_VOID_RETURN; } @@ -1777,7 +1931,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, Win32 clients must also have a WRITE LOCK on the table ! */ -static bool close_cached_table(THD *thd, TABLE *table) +void close_cached_table(THD *thd, TABLE *table) { DBUG_ENTER("close_cached_table"); @@ -1792,8 +1946,8 @@ static bool close_cached_table(THD *thd, TABLE *table) thd->open_tables=unlink_open_table(thd,thd->open_tables,table); /* When lock on LOCK_open is freed other threads can continue */ - pthread_cond_broadcast(&COND_refresh); - DBUG_RETURN(0); + broadcast_refresh(); + DBUG_VOID_RETURN; } static int send_check_errmsg(THD *thd, TABLE_LIST* table, @@ -1804,9 +1958,9 @@ static int send_check_errmsg(THD *thd, TABLE_LIST* table, protocol->prepare_for_resend(); protocol->store(table->alias, system_charset_info); protocol->store((char*) operator_name, system_charset_info); - protocol->store("error", 5, system_charset_info); + protocol->store(STRING_WITH_LEN("error"), system_charset_info); protocol->store(errmsg, system_charset_info); - thd->net.last_error[0]=0; + thd->clear_error(); if (protocol->write()) return -1; return 1; @@ -1828,8 +1982,8 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table, { char* backup_dir= thd->lex->backup_dir; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; - char* table_name = table->real_name; - char* db = thd->db ? thd->db : table->db; + char* table_name= table->table_name; + char* db= table->db; if (fn_format_relative_to_data_home(src_path, table_name, backup_dir, reg_ext)) @@ -1865,12 +2019,15 @@ static int prepare_for_restore(THD* thd, TABLE_LIST* table, Now we should be able to open the partially restored table to finish the restore in the handler later on */ - if (!(table->table = reopen_name_locked_table(thd, table))) + pthread_mutex_lock(&LOCK_open); + if (reopen_name_locked_table(thd, table)) { - pthread_mutex_lock(&LOCK_open); unlock_table_name(thd, table); pthread_mutex_unlock(&LOCK_open); + DBUG_RETURN(send_check_errmsg(thd, table, "restore", + "Failed to open partially restored table")); } + pthread_mutex_unlock(&LOCK_open); DBUG_RETURN(0); } @@ -1889,8 +2046,8 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, { char name[FN_REFLEN]; build_table_path(name, sizeof(name), table_list->db, - table_list->real_name, ""); - if (openfrm(name, "", 0, 0, 0, &tmp_table)) + table_list->table_name, ""); + if (openfrm(thd, name, "", 0, 0, 0, &tmp_table)) DBUG_RETURN(0); // Can't open frm file table= &tmp_table; } @@ -1916,7 +2073,7 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, if (!ext[0] || !ext[1]) goto end; // No data file - strxmov(from, table->path, ext[1], NullS); // Name of data file + strxmov(from, table->s->path, ext[1], NullS); // Name of data file if (!my_stat(from, &stat_info, MYF(0))) goto end; // Can't use USE_FRM flag @@ -1967,12 +2124,16 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, Now we should be able to open the partially repaired table to finish the repair in the handler later on. */ - if (!(table_list->table = reopen_name_locked_table(thd, table_list))) + pthread_mutex_lock(&LOCK_open); + if (reopen_name_locked_table(thd, table_list)) { - pthread_mutex_lock(&LOCK_open); unlock_table_name(thd, table_list); pthread_mutex_unlock(&LOCK_open); + error= send_check_errmsg(thd, table_list, "repair", + "Failed to open partially repaired table"); + goto end; } + pthread_mutex_unlock(&LOCK_open); end: if (table == &tmp_table) @@ -1981,27 +2142,33 @@ end: } + /* RETURN VALUES - 0 Message sent to net (admin operation went ok) - -1 Message should be sent by caller - (admin operation or network communication failed) + FALSE Message sent to net (admin operation went ok) + TRUE Message should be sent by caller + (admin operation or network communication failed) */ -static int mysql_admin_table(THD* thd, TABLE_LIST* tables, - HA_CHECK_OPT* check_opt, - const char *operator_name, - thr_lock_type lock_type, - bool open_for_modify, - uint extra_open_options, - int (*prepare_func)(THD *, TABLE_LIST *, - HA_CHECK_OPT *), - int (handler::*operator_func) - (THD *, HA_CHECK_OPT *)) +static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, + HA_CHECK_OPT* check_opt, + const char *operator_name, + thr_lock_type lock_type, + bool open_for_modify, + bool no_warnings_for_error, + uint extra_open_options, + int (*prepare_func)(THD *, TABLE_LIST *, + HA_CHECK_OPT *), + int (handler::*operator_func)(THD *, + HA_CHECK_OPT *), + int (view_operator_func)(THD *, TABLE_LIST*)) { TABLE_LIST *table; + SELECT_LEX *select= &thd->lex->select_lex; List<Item> field_list; Item *item; Protocol *protocol= thd->protocol; + LEX *lex= thd->lex; + int result_code; DBUG_ENTER("mysql_admin_table"); field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); @@ -2012,24 +2179,47 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, item->maybe_null = 1; field_list.push_back(item = new Item_empty_string("Msg_text", 255)); item->maybe_null = 1; - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); mysql_ha_flush(thd, tables, MYSQL_HA_CLOSE_FINAL, FALSE); - for (table = tables; table; table = table->next) + for (table= tables; table; table= table->next_local) { char table_name[NAME_LEN*2+2]; char* db = table->db; bool fatal_error=0; - strxmov(table_name, db, ".", table->real_name, NullS); + strxmov(table_name, db, ".", table->table_name, NullS); thd->open_options|= extra_open_options; - table->table = open_ltable(thd, table, lock_type); -#ifdef EMBEDDED_LIBRARY - thd->net.last_errno= 0; // these errors shouldn't get client -#endif - thd->open_options&= ~extra_open_options; + table->lock_type= lock_type; + /* open only one table from local list of command */ + { + TABLE_LIST *save_next_global, *save_next_local; + save_next_global= table->next_global; + table->next_global= 0; + save_next_local= table->next_local; + table->next_local= 0; + select->table_list.first= (byte*)table; + /* + Time zone tables and SP tables can be add to lex->query_tables list, + so it have to be prepared. + TODO: Investigate if we can put extra tables into argument instead of + using lex->query_tables + */ + lex->query_tables= table; + lex->query_tables_last= &table->next_global; + lex->query_tables_own_last= 0; + thd->no_warnings_for_error= no_warnings_for_error; + if (view_operator_func == NULL) + table->required_type=FRMTYPE_TABLE; + open_and_lock_tables(thd, table); + thd->no_warnings_for_error= 0; + table->next_global= save_next_global; + table->next_local= save_next_local; + thd->open_options&= ~extra_open_options; + } if (prepare_func) { switch ((*prepare_func)(thd, table, check_opt)) { @@ -2043,31 +2233,62 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, } } + /* + CHECK TABLE command is only command where VIEW allowed here and this + command use only temporary teble method for VIEWs resolving => there + can't be VIEW tree substitition of join view => if opening table + succeed then table->table will have real TABLE pointer as value (in + case of join view substitution table->table can be 0, but here it is + impossible) + */ if (!table->table) { + char buf[ERRMSGSIZE+ERRMSGSIZE+2]; const char *err_msg; protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); protocol->store(operator_name, system_charset_info); - protocol->store("error",5, system_charset_info); + protocol->store(STRING_WITH_LEN("error"), system_charset_info); if (!(err_msg=thd->net.last_error)) err_msg=ER(ER_CHECK_NO_SUCH_TABLE); + /* if it was a view will check md5 sum */ + if (table->view && + view_checksum(thd, table) == HA_ADMIN_WRONG_CHECKSUM) + { + strxmov(buf, err_msg, "; ", ER(ER_VIEW_CHECKSUM), NullS); + err_msg= (const char *)buf; + } protocol->store(err_msg, system_charset_info); - thd->net.last_error[0]=0; + lex->cleanup_after_one_table_open(); + thd->clear_error(); + /* + View opening can be interrupted in the middle of process so some + tables can be left opening + */ + close_thread_tables(thd); + lex->reset_query_tables_list(FALSE); if (protocol->write()) goto err; continue; } - table->table->pos_in_table_list= table; + + if (table->view) + { + result_code= (*view_operator_func)(thd, table); + goto send_result; + } + if ((table->table->db_stat & HA_READ_ONLY) && open_for_modify) { char buff[FN_REFLEN + MYSQL_ERRMSG_SIZE]; + uint length; protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); protocol->store(operator_name, system_charset_info); - protocol->store("error", 5, system_charset_info); - my_snprintf(buff, sizeof(buff), ER(ER_OPEN_AS_READONLY), table_name); - protocol->store(buff, system_charset_info); + protocol->store(STRING_WITH_LEN("error"), system_charset_info); + length= my_snprintf(buff, sizeof(buff), ER(ER_OPEN_AS_READONLY), + table_name); + protocol->store(buff, length, system_charset_info); close_thread_tables(thd); table->table=0; // For query cache if (protocol->write()) @@ -2076,14 +2297,14 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, } /* Close all instances of the table to allow repair to rename files */ - if (lock_type == TL_WRITE && table->table->version) + if (lock_type == TL_WRITE && table->table->s->version) { pthread_mutex_lock(&LOCK_open); const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open, "Waiting to get writelock"); mysql_lock_abort(thd,table->table); - remove_table_from_cache(thd, table->table->table_cache_key, - table->table->real_name, + remove_table_from_cache(thd, table->table->s->db, + table->table->s->table_name, RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG); thd->exit_cond(old_message); @@ -2094,10 +2315,42 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, open_for_modify= 0; } - int result_code = (table->table->file->*operator_func)(thd, check_opt); -#ifdef EMBEDDED_LIBRARY - thd->net.last_errno= 0; // these errors shouldn't get client -#endif + if (table->table->s->crashed && operator_func == &handler::ha_check) + { + protocol->prepare_for_resend(); + protocol->store(table_name, system_charset_info); + protocol->store(operator_name, system_charset_info); + protocol->store(STRING_WITH_LEN("warning"), system_charset_info); + protocol->store(STRING_WITH_LEN("Table is marked as crashed"), + system_charset_info); + if (protocol->write()) + goto err; + } + + if (operator_func == &handler::ha_repair) + { + if ((table->table->file->check_old_types() == HA_ADMIN_NEEDS_ALTER) || + (table->table->file->ha_check_for_upgrade(check_opt) == + HA_ADMIN_NEEDS_ALTER)) + { + my_bool save_no_send_ok= thd->net.no_send_ok; + close_thread_tables(thd); + tmp_disable_binlog(thd); // binlogging is done by caller if wanted + thd->net.no_send_ok= TRUE; + result_code= mysql_recreate_table(thd, table); + thd->net.no_send_ok= save_no_send_ok; + reenable_binlog(thd); + goto send_result; + } + + } + + result_code = (table->table->file->*operator_func)(thd, check_opt); + +send_result: + + lex->cleanup_after_one_table_open(); + thd->clear_error(); // these errors shouldn't get client protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); protocol->store(operator_name, system_charset_info); @@ -2111,41 +2364,55 @@ send_result_message: char buf[ERRMSGSIZE+20]; uint length=my_snprintf(buf, ERRMSGSIZE, ER(ER_CHECK_NOT_IMPLEMENTED), operator_name); - protocol->store("note", 4, system_charset_info); + protocol->store(STRING_WITH_LEN("note"), system_charset_info); protocol->store(buf, length, system_charset_info); } break; + case HA_ADMIN_NOT_BASE_TABLE: + { + char buf[ERRMSGSIZE+20]; + uint length= my_snprintf(buf, ERRMSGSIZE, + ER(ER_BAD_TABLE_ERROR), table_name); + protocol->store(STRING_WITH_LEN("note"), system_charset_info); + protocol->store(buf, length, system_charset_info); + } + break; + case HA_ADMIN_OK: - protocol->store("status", 6, system_charset_info); - protocol->store("OK",2, system_charset_info); + protocol->store(STRING_WITH_LEN("status"), system_charset_info); + protocol->store(STRING_WITH_LEN("OK"), system_charset_info); break; case HA_ADMIN_FAILED: - protocol->store("status", 6, system_charset_info); - protocol->store("Operation failed",16, system_charset_info); + protocol->store(STRING_WITH_LEN("status"), system_charset_info); + protocol->store(STRING_WITH_LEN("Operation failed"), + system_charset_info); break; case HA_ADMIN_REJECT: - protocol->store("status", 6, system_charset_info); - protocol->store("Operation need committed state",30, system_charset_info); + protocol->store(STRING_WITH_LEN("status"), system_charset_info); + protocol->store(STRING_WITH_LEN("Operation need committed state"), + system_charset_info); open_for_modify= FALSE; break; case HA_ADMIN_ALREADY_DONE: - protocol->store("status", 6, system_charset_info); - protocol->store("Table is already up to date", 27, system_charset_info); + protocol->store(STRING_WITH_LEN("status"), system_charset_info); + protocol->store(STRING_WITH_LEN("Table is already up to date"), + system_charset_info); break; case HA_ADMIN_CORRUPT: - protocol->store("error", 5, system_charset_info); - protocol->store("Corrupt", 7, system_charset_info); + protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(STRING_WITH_LEN("Corrupt"), system_charset_info); fatal_error=1; break; case HA_ADMIN_INVALID: - protocol->store("error", 5, system_charset_info); - protocol->store("Invalid argument",16, system_charset_info); + protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(STRING_WITH_LEN("Invalid argument"), + system_charset_info); break; case HA_ADMIN_TRY_ALTER: @@ -2157,8 +2424,9 @@ send_result_message: reopen the table and do ha_innobase::analyze() on it. */ close_thread_tables(thd); - TABLE_LIST *save_next= table->next; - table->next= 0; + TABLE_LIST *save_next_local= table->next_local, + *save_next_global= table->next_global; + table->next_local= table->next_global= 0; tmp_disable_binlog(thd); // binlogging is done by caller if wanted thd->net.no_send_ok= TRUE; result_code= mysql_recreate_table(thd, table); @@ -2183,7 +2451,7 @@ send_result_message: else { /* Hijack the row already in-progress. */ - protocol->store("error", 5, system_charset_info); + protocol->store(STRING_WITH_LEN("error"), system_charset_info); protocol->store(err_msg, system_charset_info); (void)protocol->write(); /* Start off another row for HA_ADMIN_FAILED */ @@ -2194,84 +2462,116 @@ send_result_message: } } result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK; - table->next= save_next; + table->next_local= save_next_local; + table->next_global= save_next_global; goto send_result_message; } + case HA_ADMIN_WRONG_CHECKSUM: + { + protocol->store(STRING_WITH_LEN("note"), system_charset_info); + protocol->store(ER(ER_VIEW_CHECKSUM), strlen(ER(ER_VIEW_CHECKSUM)), + system_charset_info); + break; + } - default: // Probably HA_ADMIN_INTERNAL_ERROR - protocol->store("error", 5, system_charset_info); - protocol->store("Unknown - internal error during operation", 41 - , system_charset_info); + case HA_ADMIN_NEEDS_UPGRADE: + case HA_ADMIN_NEEDS_ALTER: + { + char buf[ERRMSGSIZE]; + uint length; + + protocol->store(STRING_WITH_LEN("error"), system_charset_info); + length=my_snprintf(buf, ERRMSGSIZE, ER(ER_TABLE_NEEDS_UPGRADE), table->table_name); + protocol->store(buf, length, system_charset_info); fatal_error=1; break; } - if (fatal_error) - table->table->version=0; // Force close of table - else if (open_for_modify) + + default: // Probably HA_ADMIN_INTERNAL_ERROR + { + char buf[ERRMSGSIZE+20]; + uint length=my_snprintf(buf, ERRMSGSIZE, + "Unknown - internal error %d during operation", + result_code); + protocol->store(STRING_WITH_LEN("error"), system_charset_info); + protocol->store(buf, length, system_charset_info); + fatal_error=1; + break; + } + } + if (table->table) { - if (table->table->tmp_table) - table->table->file->info(HA_STATUS_CONST); - else + if (fatal_error) + table->table->s->version=0; // Force close of table + else if (open_for_modify) { - pthread_mutex_lock(&LOCK_open); - remove_table_from_cache(thd, table->table->table_cache_key, - table->table->real_name, RTFC_NO_FLAG); - pthread_mutex_unlock(&LOCK_open); + if (table->table->s->tmp_table) + table->table->file->info(HA_STATUS_CONST); + else + { + pthread_mutex_lock(&LOCK_open); + remove_table_from_cache(thd, table->table->s->db, + table->table->s->table_name, RTFC_NO_FLAG); + pthread_mutex_unlock(&LOCK_open); + } + /* May be something modified consequently we have to invalidate cache */ + query_cache_invalidate3(thd, table->table, 0); } - /* May be something modified consequently we have to invalidate cache */ - query_cache_invalidate3(thd, table->table, 0); } close_thread_tables(thd); + lex->reset_query_tables_list(FALSE); table->table=0; // For query cache if (protocol->write()) goto err; } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: close_thread_tables(thd); // Shouldn't be needed if (table) table->table=0; - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } -int mysql_backup_table(THD* thd, TABLE_LIST* table_list) +bool mysql_backup_table(THD* thd, TABLE_LIST* table_list) { DBUG_ENTER("mysql_backup_table"); DBUG_RETURN(mysql_admin_table(thd, table_list, 0, - "backup", TL_READ, 0, 0, 0, - &handler::backup)); + "backup", TL_READ, 0, 0, 0, 0, + &handler::backup, 0)); } -int mysql_restore_table(THD* thd, TABLE_LIST* table_list) +bool mysql_restore_table(THD* thd, TABLE_LIST* table_list) { DBUG_ENTER("mysql_restore_table"); DBUG_RETURN(mysql_admin_table(thd, table_list, 0, - "restore", TL_WRITE, 1, 0, + "restore", TL_WRITE, 1, 1, 0, &prepare_for_restore, - &handler::restore)); + &handler::restore, 0)); } -int mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) +bool mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) { DBUG_ENTER("mysql_repair_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, - "repair", TL_WRITE, 1, HA_OPEN_FOR_REPAIR, + "repair", TL_WRITE, 1, + test(check_opt->sql_flags & TT_USEFRM), + HA_OPEN_FOR_REPAIR, &prepare_for_repair, - &handler::repair)); + &handler::ha_repair, 0)); } -int mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) +bool mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) { DBUG_ENTER("mysql_optimize_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, - "optimize", TL_WRITE, 1,0,0, - &handler::optimize)); + "optimize", TL_WRITE, 1,0,0,0, + &handler::optimize, 0)); } @@ -2284,11 +2584,11 @@ int mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) tables Table list (one table only) RETURN VALUES - 0 ok - -1 error + FALSE ok + TRUE error */ -int mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, +bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, LEX_STRING *key_cache_name) { HA_CHECK_OPT check_opt; @@ -2301,13 +2601,13 @@ int mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, { pthread_mutex_unlock(&LOCK_global_system_variables); my_error(ER_UNKNOWN_KEY_CACHE, MYF(0), key_cache_name->str); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } pthread_mutex_unlock(&LOCK_global_system_variables); check_opt.key_cache= key_cache; DBUG_RETURN(mysql_admin_table(thd, tables, &check_opt, - "assign_to_keycache", TL_READ_NO_INSERT, 0, - 0, 0, &handler::assign_to_keycache)); + "assign_to_keycache", TL_READ_NO_INSERT, 0, 0, + 0, 0, &handler::assign_to_keycache, 0)); } @@ -2359,16 +2659,16 @@ int reassign_keycache_tables(THD *thd, KEY_CACHE *src_cache, tables Table list (one table only) RETURN VALUES - 0 ok - -1 error + FALSE ok + TRUE error */ -int mysql_preload_keys(THD* thd, TABLE_LIST* tables) +bool mysql_preload_keys(THD* thd, TABLE_LIST* tables) { DBUG_ENTER("mysql_preload_keys"); DBUG_RETURN(mysql_admin_table(thd, tables, 0, - "preload_keys", TL_READ, 0, 0, 0, - &handler::preload_keys)); + "preload_keys", TL_READ, 0, 0, 0, 0, + &handler::preload_keys, 0)); } @@ -2383,24 +2683,28 @@ int mysql_preload_keys(THD* thd, TABLE_LIST* tables) table_ident Src table_ident RETURN VALUES - 0 ok - -1 error + FALSE OK + TRUE error */ -int mysql_create_like_table(THD* thd, TABLE_LIST* table, - HA_CREATE_INFO *create_info, - Table_ident *table_ident) +bool mysql_create_like_table(THD* thd, TABLE_LIST* table, + HA_CREATE_INFO *create_info, + Table_ident *table_ident) { TABLE **tmp_table; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; char *db= table->db; - char *table_name= table->real_name; + char *table_name= table->table_name; char *src_db; char *src_table= table_ident->table.str; - int err, res= -1; + int err; + bool res= TRUE; + db_type not_used; + TABLE_LIST src_tables_list; DBUG_ENTER("mysql_create_like_table"); - src_db= table_ident->db.str ? table_ident->db.str : thd->db; + DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */ + src_db= table_ident->db.str; /* Validate the source table @@ -2410,7 +2714,7 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, check_table_name(src_table,table_ident->table.length))) { my_error(ER_WRONG_TABLE_NAME, MYF(0), src_table); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } if (!src_db || check_db_name(src_db)) { @@ -2418,15 +2722,15 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, DBUG_RETURN(-1); } + bzero((gptr)&src_tables_list, sizeof(src_tables_list)); src_tables_list.db= src_db; - src_tables_list.real_name= src_table; - src_tables_list.next= 0; + src_tables_list.table_name= src_table; if (lock_and_wait_for_table_name(thd, &src_tables_list)) goto err; if ((tmp_table= find_temporary_table(thd, src_db, src_table))) - strxmov(src_path, (*tmp_table)->path, reg_ext, NullS); + strxmov(src_path, (*tmp_table)->s->path, reg_ext, NullS); else { strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table, @@ -2442,6 +2746,15 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, } } + /* + create like should be not allowed for Views, Triggers, ... + */ + if (mysql_frm_type(thd, src_path, ¬_used) != FRMTYPE_TABLE) + { + my_error(ER_WRONG_OBJECT, MYF(0), src_db, src_table, "BASE TABLE"); + goto err; + } + /* Validate the destination table @@ -2505,17 +2818,13 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, } // Must be written before unlock - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - test(create_info->options & - HA_LEX_CREATE_TMP_TABLE), - FALSE); + Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); mysql_bin_log.write(&qinfo); } - res= 0; + res= FALSE; goto err; table_exists: @@ -2524,9 +2833,9 @@ table_exists: char warn_buff[MYSQL_ERRMSG_SIZE]; my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TABLE_EXISTS_ERROR), table_name); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR,warn_buff); - res= 0; + res= FALSE; } else my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name); @@ -2539,7 +2848,7 @@ err: } -int mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) +bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) { #ifdef OS2 thr_lock_type lock_type = TL_WRITE; @@ -2549,12 +2858,12 @@ int mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) DBUG_ENTER("mysql_analyze_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, - "analyze", lock_type, 1,0,0, - &handler::analyze)); + "analyze", lock_type, 1, 0, 0, 0, + &handler::analyze, 0)); } -int mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) +bool mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) { #ifdef OS2 thr_lock_type lock_type = TL_WRITE; @@ -2565,8 +2874,8 @@ int mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) DBUG_ENTER("mysql_check_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, "check", lock_type, - 0, HA_OPEN_FOR_REPAIR, 0, - &handler::check)); + 0, HA_OPEN_FOR_REPAIR, 0, 0, + &handler::ha_check, &view_checksum)); } @@ -2620,30 +2929,27 @@ mysql_discard_or_import_tablespace(THD *thd, error=1; if (error) goto err; - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); mysql_bin_log.write(&qinfo); } err: close_thread_tables(thd); thd->tablespace_op=FALSE; + if (error == 0) { send_ok(thd); DBUG_RETURN(0); } - if (error == HA_ERR_ROW_IS_REFERENCED) - my_error(ER_ROW_IS_REFERENCED, MYF(0)); - + table->file->print_error(error, MYF(0)); + DBUG_RETURN(-1); } - - /* Manages enabling/disabling of indexes for ALTER TABLE @@ -2683,7 +2989,7 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled, if (error == HA_ERR_WRONG_COMMAND) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, - ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), table->table_name); + ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), table->s->table_name); error= 0; } else if (error) table->file->print_error(error, MYF(0)); @@ -2703,26 +3009,28 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled, statements/stored routines. */ -int mysql_alter_table(THD *thd,char *new_db, char *new_name, - HA_CREATE_INFO *create_info, - TABLE_LIST *table_list, - Alter_info *alter_info, - uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, bool ignore) +bool mysql_alter_table(THD *thd,char *new_db, char *new_name, + HA_CREATE_INFO *create_info, + TABLE_LIST *table_list, + Alter_info *alter_info, + uint order_num, ORDER *order, bool ignore) { - TABLE *table,*new_table; - int error; + TABLE *table,*new_table=0; + int error= 0; char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN]; char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias; char index_file[FN_REFLEN], data_file[FN_REFLEN]; ha_rows copied,deleted; ulonglong next_insert_id; uint db_create_options, used_fields; - enum db_type old_db_type,new_db_type; + enum db_type old_db_type, new_db_type, table_type; + bool need_copy_table; + bool no_table_reopen= FALSE, varchar= FALSE; + frm_type_enum frm_type; DBUG_ENTER("mysql_alter_table"); thd->proc_info="init"; - table_name=table_list->real_name; + table_name=table_list->table_name; alias= (lower_case_table_names == 2) ? table_list->alias : table_name; db=table_list->db; @@ -2734,10 +3042,58 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, /* DISCARD/IMPORT TABLESPACE is always alone in an ALTER TABLE */ if (alter_info->tablespace_op != NO_TABLESPACE_OP) + /* Conditionally writes to binlog. */ DBUG_RETURN(mysql_discard_or_import_tablespace(thd,table_list, alter_info->tablespace_op)); + sprintf(new_name_buff,"%s/%s/%s%s",mysql_data_home, db, table_name, reg_ext); + unpack_filename(new_name_buff, new_name_buff); + if (lower_case_table_names != 2) + my_casedn_str(files_charset_info, new_name_buff); + frm_type= mysql_frm_type(thd, new_name_buff, &table_type); + /* Rename a view */ + if (frm_type == FRMTYPE_VIEW && !(alter_info->flags & ~ALTER_RENAME)) + { + /* + Avoid problems with a rename on a table that we have locked or + if the user is trying to to do this in a transcation context + */ + + if (thd->locked_tables || thd->active_transaction()) + { + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); + DBUG_RETURN(1); + } + + if (wait_if_global_read_lock(thd,0,1)) + DBUG_RETURN(1); + VOID(pthread_mutex_lock(&LOCK_open)); + if (lock_table_names(thd, table_list)) + { + error= 1; + goto view_err; + } + + if (!do_rename(thd, table_list, new_db, new_name, new_name, 1)) + { + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + send_ok(thd); + } + + unlock_table_names(thd, table_list, (TABLE_LIST*) 0); + +view_err: + pthread_mutex_unlock(&LOCK_open); + start_waiting_global_read_lock(thd); + DBUG_RETURN(error); + } if (!(table=open_ltable(thd,table_list,TL_WRITE_ALLOW_READ))) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); /* Check that we are not trying to rename to an existing table */ if (new_name) @@ -2764,12 +3120,12 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } else { - if (table->tmp_table) + if (table->s->tmp_table) { if (find_temporary_table(thd,new_db,new_name_buff)) { - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name_buff); - DBUG_RETURN(-1); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff); + DBUG_RETURN(TRUE); } } else @@ -2780,8 +3136,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, F_OK)) { /* Table will be closed in do_command() */ - my_error(ER_TABLE_EXISTS_ERROR,MYF(0), new_alias); - DBUG_RETURN(-1); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias); + DBUG_RETURN(TRUE); } } } @@ -2792,30 +3148,41 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, new_name= table_name; } - old_db_type=table->db_type; + old_db_type= table->s->db_type; if (create_info->db_type == DB_TYPE_DEFAULT) - create_info->db_type=old_db_type; - if ((new_db_type= ha_checktype(create_info->db_type)) != - create_info->db_type) - { - create_info->db_type= new_db_type; - push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - ER_WARN_USING_OTHER_HANDLER, - ER(ER_WARN_USING_OTHER_HANDLER), - ha_get_storage_engine(new_db_type), - new_name); - } + create_info->db_type= old_db_type; + if (check_engine(thd, new_name, &create_info->db_type)) + DBUG_RETURN(TRUE); + new_db_type= create_info->db_type; if (create_info->row_type == ROW_TYPE_NOT_USED) - create_info->row_type=table->row_type; + create_info->row_type= table->s->row_type; + DBUG_PRINT("info", ("old type: %d new type: %d", old_db_type, new_db_type)); + if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED) || + ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED)) + { + DBUG_PRINT("info", ("doesn't support alter")); + my_error(ER_ILLEGAL_HA, MYF(0), table_name); + DBUG_RETURN(TRUE); + } + thd->proc_info="setup"; - if (alter_info->is_simple && !table->tmp_table) + if (!(alter_info->flags & ~(ALTER_RENAME | ALTER_KEYS_ONOFF)) && + !table->s->tmp_table) // no need to touch frm { switch (alter_info->keys_onoff) { case LEAVE_AS_IS: - error= 0; break; case ENABLE: + /* + wait_while_table_is_used() ensures that table being altered is + opened only by this thread and that TABLE::TABLE_SHARE::version + of TABLE object corresponding to this table is 0. + The latter guarantees that no DML statement will open this table + until ALTER TABLE finishes (i.e. until close_thread_tables()) + while the fact that the table is still open gives us protection + from concurrent DDL statements. + */ VOID(pthread_mutex_lock(&LOCK_open)); wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); VOID(pthread_mutex_unlock(&LOCK_open)); @@ -2826,28 +3193,35 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, VOID(pthread_mutex_lock(&LOCK_open)); wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); VOID(pthread_mutex_unlock(&LOCK_open)); - error= table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + error=table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); /* COND_refresh will be signaled in close_thread_tables() */ break; } - if (error == HA_ERR_WRONG_COMMAND) { + error= 0; push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), - table->table_name); - error= 0; + table->alias); } + VOID(pthread_mutex_lock(&LOCK_open)); + /* + Unlike to the above case close_cached_table() below will remove ALL + instances of TABLE from table cache (it will also remove table lock + held by this thread). So to make actual table renaming and writing + to binlog atomic we have to put them into the same critical section + protected by LOCK_open mutex. This also removes gap for races between + access() and mysql_rename_table() calls. + */ + if (!error && (new_name != table_name || new_db != db)) { thd->proc_info="rename"; - VOID(pthread_mutex_lock(&LOCK_open)); /* Then do a 'simple' rename of the table */ - error=0; if (!access(new_name_buff,F_OK)) { - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name); error= -1; } else @@ -2856,25 +3230,30 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, close_cached_table(thd, table); if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias)) error= -1; + else if (Table_triggers_list::change_table_name(thd, db, table_name, + new_db, new_alias)) + { + VOID(mysql_rename_table(old_db_type, new_db, new_alias, db, + table_name)); + error= -1; + } } - VOID(pthread_mutex_unlock(&LOCK_open)); } if (error == HA_ERR_WRONG_COMMAND) { + error= 0; push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), - table->table_name); - error=0; + table->alias); } if (!error) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); mysql_bin_log.write(&qinfo); } send_ok(thd); @@ -2884,24 +3263,25 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, table->file->print_error(error, MYF(0)); error= -1; } - table_list->table=0; // For query cache + VOID(pthread_mutex_unlock(&LOCK_open)); + table_list->table= NULL; // For query cache query_cache_invalidate3(thd, table_list, 0); DBUG_RETURN(error); } /* Full alter table */ - /* let new create options override the old ones */ + /* Let new create options override the old ones */ if (!(used_fields & HA_CREATE_USED_MIN_ROWS)) - create_info->min_rows=table->min_rows; + create_info->min_rows= table->s->min_rows; if (!(used_fields & HA_CREATE_USED_MAX_ROWS)) - create_info->max_rows=table->max_rows; + create_info->max_rows= table->s->max_rows; if (!(used_fields & HA_CREATE_USED_AVG_ROW_LENGTH)) - create_info->avg_row_length=table->avg_row_length; + create_info->avg_row_length= table->s->avg_row_length; if (!(used_fields & HA_CREATE_USED_DEFAULT_CHARSET)) - create_info->default_table_charset= table->table_charset; + create_info->default_table_charset= table->s->table_charset; - restore_record(table,default_values); // Empty record for DEFAULT + restore_record(table, s->default_values); // Empty record for DEFAULT List_iterator<Alter_drop> drop_it(alter_info->drop_list); List_iterator<create_field> def_it(alter_info->create_list); List_iterator<Alter_column> alter_it(alter_info->alter_list); @@ -2915,6 +3295,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, Field **f_ptr,*field; for (f_ptr=table->field ; (field= *f_ptr) ; f_ptr++) { + if (field->type() == MYSQL_TYPE_STRING) + varchar= TRUE; /* Check if field should be dropped */ Alter_drop *drop; drop_it.rewind(); @@ -2969,10 +3351,13 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (def->sql_type == FIELD_TYPE_BLOB) { - my_error(ER_BLOB_CANT_HAVE_DEFAULT,MYF(0),def->change); - DBUG_RETURN(-1); + my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), def->change); + DBUG_RETURN(TRUE); } - def->def=alter->def; // Use new default + if ((def->def=alter->def)) // Use new default + def->flags&= ~NO_DEFAULT_VALUE_FLAG; + else + def->flags|= NO_DEFAULT_VALUE_FLAG; alter_it.remove(); } } @@ -2983,8 +3368,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (def->change && ! def->field) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),def->change,table_name); - DBUG_RETURN(-1); + my_error(ER_BAD_FIELD_ERROR, MYF(0), def->change, table_name); + DBUG_RETURN(TRUE); } if (!def->after) new_info.create_list.push_back(def); @@ -3001,22 +3386,23 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } if (!find) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),def->after,table_name); - DBUG_RETURN(-1); + my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after, table_name); + DBUG_RETURN(TRUE); } find_it.after(def); // Put element after this } } if (alter_info->alter_list.elements) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),alter_info->alter_list.head()->name, - table_name); - DBUG_RETURN(-1); + my_error(ER_BAD_FIELD_ERROR, MYF(0), + alter_info->alter_list.head()->name, table_name); + DBUG_RETURN(TRUE); } if (!new_info.create_list.elements) { - my_error(ER_CANT_REMOVE_ALL_FIELDS,MYF(0)); - DBUG_RETURN(-1); + my_message(ER_CANT_REMOVE_ALL_FIELDS, ER(ER_CANT_REMOVE_ALL_FIELDS), + MYF(0)); + DBUG_RETURN(TRUE); } /* @@ -3029,7 +3415,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, List<key_part_spec> key_parts; KEY *key_info=table->key_info; - for (uint i=0 ; i < table->keys ; i++,key_info++) + for (uint i=0 ; i < table->s->keys ; i++,key_info++) { char *key_name= key_info->name; Alter_drop *drop; @@ -3071,12 +3457,26 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, continue; // Field is removed uint key_part_length=key_part->length; if (cfield->field) // Not new field - { // Check if sub key - if (cfield->field->type() != FIELD_TYPE_BLOB && - (cfield->field->pack_length() == key_part_length || - cfield->length <= key_part_length / - key_part->field->charset()->mbmaxlen)) - key_part_length=0; // Use whole field + { + /* + If the field can't have only a part used in a key according to its + new type, or should not be used partially according to its + previous type, or the field length is less than the key part + length, unset the key part length. + + We also unset the key part length if it is the same as the + old field's length, so the whole new field will be used. + + BLOBs may have cfield->length == 0, which is why we test it before + checking whether cfield->length < key_part_length (in chars). + */ + if (!Field::type_can_have_key_part(cfield->field->type()) || + !Field::type_can_have_key_part(cfield->sql_type) || + (cfield->field->field_length == key_part_length && + !f_is_blob(key_part->key_type)) || + (cfield->length && (cfield->length < key_part_length / + key_part->field->charset()->mbmaxlen))) + key_part_length= 0; // Use whole field } key_part_length /= key_part->field->charset()->mbmaxlen; key_parts.push_back(new key_part_spec(cfield->field_name, @@ -3118,25 +3518,25 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, !my_strcasecmp(system_charset_info,key->name,primary_key_name)) { my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } } if (alter_info->drop_list.elements) { - my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0), - alter_info->drop_list.head()->name); + my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), + alter_info->drop_list.head()->name); goto err; } if (alter_info->alter_list.elements) { - my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0), - alter_info->alter_list.head()->name); + my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), + alter_info->alter_list.head()->name); goto err; } - db_create_options=table->db_create_options & ~(HA_OPTION_PACK_RECORD); + db_create_options= table->s->db_create_options & ~(HA_OPTION_PACK_RECORD); my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix, current_pid, thd->thread_id); /* Safety fix for innodb */ @@ -3147,8 +3547,11 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err; } create_info->db_type=new_db_type; - if (!create_info->comment) - create_info->comment=table->comment; + if (!create_info->comment.str) + { + create_info->comment.str= table->s->comment.str; + create_info->comment.length= table->s->comment.length; + } table->file->update_create_info(create_info); if ((create_info->table_options & @@ -3164,10 +3567,37 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, HA_OPTION_NO_DELAY_KEY_WRITE); create_info->table_options|= db_create_options; - if (table->tmp_table) + if (table->s->tmp_table) create_info->options|=HA_LEX_CREATE_TMP_TABLE; /* + better have a negative test here, instead of positive, like + alter_info->flags & ALTER_ADD_COLUMN|ALTER_ADD_INDEX|... + so that ALTER TABLE won't break when somebody will add new flag + + MySQL uses frm version to determine the type of the data fields and + their layout. See Field_string::type() for details. + Thus, if the table is too old we may have to rebuild the data to + update the layout. + + There was a bug prior to mysql-4.0.25. Number of null fields was + calculated incorrectly. As a result frm and data files gets out of + sync after fast alter table. There is no way to determine by which + mysql version (in 4.0 and 4.1 branches) table was created, thus we + disable fast alter table for all tables created by mysql versions + prior to 5.0 branch. + See BUG#6236. + */ + need_copy_table= (alter_info->flags & + ~(ALTER_CHANGE_COLUMN_DEFAULT|ALTER_OPTIONS) || + (create_info->used_fields & + ~(HA_CREATE_USED_COMMENT|HA_CREATE_USED_PASSWORD)) || + table->s->tmp_table || + !table->s->mysql_version || + (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar)); + create_info->frm_only= !need_copy_table; + + /* Handling of symlinked tables: If no rename: Create new data file and index file on the same disk as the @@ -3213,8 +3643,9 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } else create_info->data_file_name=create_info->index_file_name=0; + + /* We don't log the statement, it will be logged later. */ { - /* We don't log the statement, it will be logged later. */ tmp_disable_binlog(thd); error= mysql_create_table(thd, new_db, tmp_name, create_info, &new_info, 1, 0); @@ -3222,44 +3653,61 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (error) DBUG_RETURN(error); } - if (table->tmp_table) - new_table=open_table(thd,new_db,tmp_name,tmp_name,0); - else - { - char path[FN_REFLEN]; - build_table_path(path, sizeof(path), new_db, tmp_name, ""); - new_table=open_temporary_table(thd, path, new_db, tmp_name,0); - } - if (!new_table) + if (need_copy_table) { - VOID(quick_rm_table(new_db_type,new_db,tmp_name)); - goto err; + if (table->s->tmp_table) + { + TABLE_LIST tbl; + bzero((void*) &tbl, sizeof(tbl)); + tbl.db= new_db; + tbl.table_name= tbl.alias= tmp_name; + new_table= open_table(thd, &tbl, thd->mem_root, (bool*) 0, + MYSQL_LOCK_IGNORE_FLUSH); + } + else + { + char path[FN_REFLEN]; + my_snprintf(path, sizeof(path), "%s/%s/%s", mysql_data_home, + new_db, tmp_name); + fn_format(path,path,"","",4); + new_table=open_temporary_table(thd, path, new_db, tmp_name,0); + } + if (!new_table) + { + VOID(quick_rm_table(new_db_type,new_db,tmp_name)); + goto err; + } } - /* We don't want update TIMESTAMP fields during ALTER TABLE. */ - new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - new_table->next_number_field=new_table->found_next_number_field; thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields thd->cuted_fields=0L; thd->proc_info="copy to tmp table"; - next_insert_id=thd->next_insert_id; // Remember for loggin + next_insert_id=thd->next_insert_id; // Remember for logging copied=deleted=0; - if (!new_table->is_view) + if (new_table && !new_table->s->is_view) + { + new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + new_table->next_number_field=new_table->found_next_number_field; error= copy_data_between_tables(table, new_table, new_info.create_list, - handle_duplicates, ignore, - order_num, order, &copied, &deleted, - alter_info->keys_onoff); - /* - No need to have call to alter_table_manage_keys() in the else because - in 4.1 we always copy data, except for views. In 5.0 it could happen - that no data is copied and only frm is modified. Then we have to handle - alter_info->keys_onoff outside of copy_data_between_tables - */ + ignore, order_num, order, + &copied, &deleted, alter_info->keys_onoff); + } + else if (!new_table) + { + VOID(pthread_mutex_lock(&LOCK_open)); + wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); + table->file->external_lock(thd, F_WRLCK); + alter_table_manage_keys(table, table->file->indexes_are_disabled(), + alter_info->keys_onoff); + table->file->external_lock(thd, F_UNLCK); + VOID(pthread_mutex_unlock(&LOCK_open)); + } + thd->last_insert_id=next_insert_id; // Needed for correct log thd->count_cuted_fields= CHECK_FIELD_IGNORE; - if (table->tmp_table) + if (table->s->tmp_table) { /* We changed a temporary table */ if (error) @@ -3278,7 +3726,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, thd->lock=0; } /* Remove link to old table and rename the new one */ - close_temporary_table(thd,table->table_cache_key,table_name); + close_temporary_table(thd, table->s->db, table_name); /* Should pass the 'new_name' as we store table name in the cache */ if (rename_temporary_table(thd, new_table, new_db, new_name)) { // Fatal error @@ -3286,18 +3734,24 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, my_free((gptr) new_table,MYF(0)); goto err; } - mysql_update_log.write(thd, thd->query,thd->query_length); + /* + Writing to the binlog does not need to be synchronized for temporary tables, + which are thread-specific. + */ if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); mysql_bin_log.write(&qinfo); } goto end_temporary; } - intern_close_table(new_table); /* close temporary table */ - my_free((gptr) new_table,MYF(0)); + if (new_table) + { + intern_close_table(new_table); /* close temporary table */ + my_free((gptr) new_table,MYF(0)); + } VOID(pthread_mutex_lock(&LOCK_open)); if (error) { @@ -3309,7 +3763,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, /* Data is copied. Now we rename the old table to a temp name, rename the new one to the old name, remove all entries from the old table - from the cash, free all locks, close the old table and remove it. + from the cache, free all locks, close the old table and remove it. */ thd->proc_info="rename result table"; @@ -3322,7 +3776,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (!access(new_name_buff,F_OK)) { error=1; - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name_buff); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff); VOID(quick_rm_table(new_db_type,new_db,tmp_name)); VOID(pthread_mutex_unlock(&LOCK_open)); goto err; @@ -3337,14 +3791,9 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, Win32 and InnoDB can't drop a table that is in use, so we must close the original table at before doing the rename */ - table_name=thd->strdup(table_name); // must be saved - if (close_cached_table(thd, table)) - { // Aborted - VOID(quick_rm_table(new_db_type,new_db,tmp_name)); - VOID(pthread_mutex_unlock(&LOCK_open)); - goto err; - } + close_cached_table(thd, table); table=0; // Marker that table is closed + no_table_reopen= TRUE; } #if (!defined( __WIN__) && !defined( __EMX__) && !defined( OS2)) else @@ -3353,13 +3802,19 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, error=0; + if (!need_copy_table) + new_db_type=old_db_type=DB_TYPE_UNKNOWN; // this type cannot happen in regular ALTER if (mysql_rename_table(old_db_type,db,table_name,db,old_name)) { error=1; VOID(quick_rm_table(new_db_type,new_db,tmp_name)); } else if (mysql_rename_table(new_db_type,new_db,tmp_name,new_db, - new_alias)) + new_alias) || + (new_name != table_name || new_db != db) && // we also do rename + Table_triggers_list::change_table_name(thd, db, table_name, + new_db, new_alias)) + { // Try to get everything back error=1; VOID(quick_rm_table(new_db_type,new_db,new_alias)); @@ -3377,7 +3832,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, VOID(pthread_mutex_unlock(&LOCK_open)); goto err; } - if (thd->lock || new_name != table_name) // True if WIN32 + if (thd->lock || new_name != table_name || no_table_reopen) // True if WIN32 { /* Not table locking or alter table with rename @@ -3419,18 +3874,17 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (error) { VOID(pthread_mutex_unlock(&LOCK_open)); - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); goto err; } thd->proc_info="end"; - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); mysql_bin_log.write(&qinfo); } - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); VOID(pthread_mutex_unlock(&LOCK_open)); #ifdef HAVE_BERKELEY_DB if (old_db_type == DB_TYPE_BERKELEY_DB) @@ -3463,17 +3917,16 @@ end_temporary: (ulong) thd->cuted_fields); send_ok(thd, copied + deleted, 0L, tmp_name); thd->some_tables_deleted=0; - DBUG_RETURN(0); + DBUG_RETURN(FALSE); - err: - DBUG_RETURN(-1); +err: + DBUG_RETURN(TRUE); } static int copy_data_between_tables(TABLE *from,TABLE *to, List<create_field> &create, - enum enum_duplicates handle_duplicates, bool ignore, uint order_num, ORDER *order, ha_rows *copied, @@ -3484,7 +3937,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, Copy_field *copy,*copy_end; ulong found_count,delete_count; THD *thd= current_thd; - uint length; + uint length= 0; SORT_FIELD *sortorder; READ_RECORD info; TABLE_LIST tables; @@ -3505,7 +3958,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, if (error) DBUG_RETURN(-1); - if (!(copy= new Copy_field[to->fields])) + if (!(copy= new Copy_field[to->s->fields])) DBUG_RETURN(-1); /* purecov: inspected */ if (to->file->external_lock(thd, F_WRLCK)) @@ -3514,6 +3967,12 @@ copy_data_between_tables(TABLE *from,TABLE *to, /* We need external lock before we can disable/enable keys */ alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff); + /* We can abort alter table for any table type */ + thd->no_trans_update= 0; + thd->abort_on_warning= !ignore && test(thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); + from->file->info(HA_STATUS_VARIABLE); to->file->start_bulk_insert(from->file->records); @@ -3551,36 +4010,38 @@ copy_data_between_tables(TABLE *from,TABLE *to, from->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL)); bzero((char*) &tables,sizeof(tables)); - tables.table = from; - tables.alias = tables.real_name= from->real_name; - tables.db = from->table_cache_key; + tables.table= from; + tables.alias= tables.table_name= (char*) from->s->table_name; + tables.db= (char*) from->s->db; error=1; if (thd->lex->select_lex.setup_ref_array(thd, order_num) || setup_order(thd, thd->lex->select_lex.ref_pointer_array, &tables, fields, all_fields, order) || - !(sortorder=make_unireg_sortorder(order, &length)) || + !(sortorder=make_unireg_sortorder(order, &length, NULL)) || (from->sort.found_records = filesort(thd, from, sortorder, length, (SQL_SELECT *) 0, HA_POS_ERROR, - &examined_rows)) - == HA_POS_ERROR) + &examined_rows)) == + HA_POS_ERROR) goto err; }; - /* Handler must be told explicitly to retrieve all columns, because - this function does not set field->query_id in the columns to the - current query id */ + /* + Handler must be told explicitly to retrieve all columns, because + this function does not set field->query_id in the columns to the + current query id + */ from->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1,1); - if (ignore || - handle_duplicates == DUP_REPLACE) + if (ignore) to->file->extra(HA_EXTRA_IGNORE_DUP_KEY); thd->row_count= 0; + restore_record(to, s->default_values); // Create empty record while (!(error=info.read_record(&info))) { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error= 1; break; } @@ -3592,20 +4053,23 @@ copy_data_between_tables(TABLE *from,TABLE *to, else to->next_number_field->reset(); } + for (Copy_field *copy_ptr=copy ; copy_ptr != copy_end ; copy_ptr++) { copy_ptr->do_copy(copy_ptr); } - if ((error=to->file->write_row((byte*) to->record[0]))) + error=to->file->write_row((byte*) to->record[0]); + to->auto_increment_field_not_null= FALSE; + if (error) { - if ((!ignore && - handle_duplicates != DUP_REPLACE) || + if (!ignore || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) { to->file->print_error(error,MYF(0)); break; } + to->file->restore_auto_increment(); delete_count++; } else @@ -3615,7 +4079,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, free_io_cache(from); delete [] copy; // This is never 0 - if (to->file->end_bulk_insert() && !error) + if (to->file->end_bulk_insert() && error <= 0) { to->file->print_error(my_errno,MYF(0)); error=1; @@ -3635,6 +4099,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, err: thd->variables.sql_mode= save_sql_mode; + thd->abort_on_warning= 0; free_io_cache(from); *copied= found_count; *deleted=delete_count; @@ -3655,26 +4120,26 @@ copy_data_between_tables(TABLE *from,TABLE *to, RETURN Like mysql_alter_table(). */ -int mysql_recreate_table(THD *thd, TABLE_LIST *table_list) +bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list) { - LEX *lex= thd->lex; HA_CREATE_INFO create_info; Alter_info alter_info; DBUG_ENTER("mysql_recreate_table"); - bzero((char*) &create_info,sizeof(create_info)); + bzero((char*) &create_info, sizeof(create_info)); create_info.db_type=DB_TYPE_DEFAULT; - create_info.row_type=ROW_TYPE_DEFAULT; + create_info.row_type=ROW_TYPE_NOT_USED; create_info.default_table_charset=default_charset_info; - alter_info.is_simple= 0; // Force full recreate + /* Force alter table to recreate table */ + alter_info.flags= ALTER_CHANGE_COLUMN; DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, table_list, &alter_info, - 0, (ORDER *) 0, DUP_ERROR, 0)); + 0, (ORDER *) 0, 0)); } -int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) +bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) { TABLE_LIST *table; List<Item> field_list; @@ -3684,19 +4149,21 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); item->maybe_null= 1; - field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); + field_list.push_back(item= new Item_int("Checksum", (longlong) 1, + MY_INT64_NUM_DECIMAL_DIGITS)); item->maybe_null= 1; - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); - for (table= tables; table; table= table->next) + for (table= tables; table; table= table->next_local) { char table_name[NAME_LEN*2+2]; TABLE *t; - strxmov(table_name, table->db ,".", table->real_name, NullS); + strxmov(table_name, table->db ,".", table->table_name, NullS); - t= table->table= open_ltable(thd, table, TL_READ_NO_INSERT); + t= table->table= open_ltable(thd, table, TL_READ); thd->clear_error(); // these errors shouldn't get client protocol->prepare_for_resend(); @@ -3706,12 +4173,10 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) { /* Table didn't exist */ protocol->store_null(); - thd->net.last_error[0]=0; + thd->clear_error(); } else { - t->pos_in_table_list= table; - if (t->file->table_flags() & HA_HAS_CHECKSUM && !(check_opt->flags & T_EXTEND)) protocol->store((ulonglong)t->file->checksum()); @@ -3722,6 +4187,7 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) { /* calculating table's checksum */ ha_checksum crc= 0; + uchar null_mask=256 - (1 << t->s->last_null_bit_pos); /* InnoDB must be told explicitly to retrieve all columns, because this function does not set field->query_id in the columns to the @@ -3742,14 +4208,21 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) continue; break; } - if (t->record[0] != (byte*) t->field[0]->ptr) - row_crc= my_checksum(row_crc, t->record[0], - ((byte*) t->field[0]->ptr) - t->record[0]); + if (t->s->null_bytes) + { + /* fix undefined null bits */ + t->record[0][t->s->null_bytes-1] |= null_mask; + if (!(t->s->db_create_options & HA_OPTION_PACK_RECORD)) + t->record[0][0] |= 1; - for (uint i= 0; i < t->fields; i++ ) + row_crc= my_checksum(row_crc, t->record[0], t->s->null_bytes); + } + + for (uint i= 0; i < t->s->fields; i++ ) { Field *f= t->field[i]; - if (f->type() == FIELD_TYPE_BLOB) + if ((f->type() == FIELD_TYPE_BLOB) || + (f->type() == MYSQL_TYPE_VARCHAR)) { String tmp; f->val_str(&tmp); @@ -3775,11 +4248,32 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: close_thread_tables(thd); // Shouldn't be needed if (table) table->table=0; - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); +} + +static bool check_engine(THD *thd, const char *table_name, + enum db_type *new_engine) +{ + enum db_type req_engine= *new_engine; + bool no_substitution= + test(thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION); + if ((*new_engine= + ha_checktype(thd, req_engine, no_substitution, 1)) == DB_TYPE_UNKNOWN) + return TRUE; + + if (req_engine != *new_engine) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_USING_OTHER_HANDLER, + ER(ER_WARN_USING_OTHER_HANDLER), + ha_get_storage_engine(*new_engine), + table_name); + } + return FALSE; } diff --git a/sql/sql_test.cc b/sql/sql_test.cc index d6afc888be2..465f53cc30c 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -79,7 +78,7 @@ void print_cached_tables(void) { TABLE *entry=(TABLE*) hash_element(&open_cache,idx); printf("%-14.14s %-32s%6ld%8ld%10ld%6d %s\n", - entry->table_cache_key,entry->real_name,entry->version, + entry->s->db, entry->s->table_name, entry->s->version, entry->in_use ? entry->in_use->thread_id : 0L, entry->in_use ? entry->in_use->dbug_thread_id : 0L, entry->db_stat ? 1 : 0, entry->in_use ? lock_descriptions[(int)entry->reginfo.lock_type] : "Not in use"); @@ -131,7 +130,7 @@ void TEST_filesort(SORT_FIELD *sortorder,uint s_length) { if (sortorder->field->table_name) { - out.append(sortorder->field->table_name); + out.append(*sortorder->field->table_name); out.append('.'); } out.append(sortorder->field->field_name ? sortorder->field->field_name: @@ -167,7 +166,7 @@ TEST_join(JOIN *join) TABLE *form=tab->table; char key_map_buff[128]; fprintf(DBUG_FILE,"%-16.16s type: %-7s q_keys: %s refs: %d key: %d len: %d\n", - form->table_name, + form->alias, join_type_str[tab->type], tab->keys.print(key_map_buff), tab->ref.key_parts, @@ -181,9 +180,10 @@ TEST_join(JOIN *join) " quick select checked for each record (keys: %s)\n", tab->select->quick_keys.print(buf)); else if (tab->select->quick) - fprintf(DBUG_FILE," quick select used on key %s, length: %d\n", - form->key_info[tab->select->quick->index].name, - tab->select->quick->max_used_key_length); + { + fprintf(DBUG_FILE, " quick select used:\n"); + tab->select->quick->dbug_dump(18, FALSE); + } else VOID(fputs(" select used\n",DBUG_FILE)); } @@ -202,6 +202,106 @@ TEST_join(JOIN *join) DBUG_VOID_RETURN; } + +/* + Print the current state during query optimization. + + SYNOPSIS + print_plan() + join pointer to the structure providing all context info for + the query + read_time the cost of the best partial plan + record_count estimate for the number of records returned by the best + partial plan + idx length of the partial QEP in 'join->positions'; + also an index in the array 'join->best_ref'; + info comment string to appear above the printout + + DESCRIPTION + This function prints to the log file DBUG_FILE the members of 'join' that + are used during query optimization (join->positions, join->best_positions, + and join->best_ref) and few other related variables (read_time, + record_count). + Useful to trace query optimizer functions. + + RETURN + None +*/ + +void +print_plan(JOIN* join, uint idx, double record_count, double read_time, + double current_read_time, const char *info) +{ + uint i; + POSITION pos; + JOIN_TAB *join_table; + JOIN_TAB **plan_nodes; + TABLE* table; + + if (info == 0) + info= ""; + + DBUG_LOCK_FILE; + if (join->best_read == DBL_MAX) + { + fprintf(DBUG_FILE, + "%s; idx:%u, best: DBL_MAX, atime: %g, itime: %g, count: %g\n", + info, idx, current_read_time, read_time, record_count); + } + else + { + fprintf(DBUG_FILE, + "%s; idx:%u, best: %g, accumulated: %g, increment: %g, count: %g\n", + info, idx, join->best_read, current_read_time, read_time, record_count); + } + + /* Print the tables in JOIN->positions */ + fputs(" POSITIONS: ", DBUG_FILE); + for (i= 0; i < idx ; i++) + { + pos = join->positions[i]; + table= pos.table->table; + if (table) + fputs(table->s->table_name, DBUG_FILE); + fputc(' ', DBUG_FILE); + } + fputc('\n', DBUG_FILE); + + /* + Print the tables in JOIN->best_positions only if at least one complete plan + has been found. An indicator for this is the value of 'join->best_read'. + */ + if (join->best_read < DBL_MAX) + { + fputs("BEST_POSITIONS: ", DBUG_FILE); + for (i= 0; i < idx ; i++) + { + pos= join->best_positions[i]; + table= pos.table->table; + if (table) + fputs(table->s->table_name, DBUG_FILE); + fputc(' ', DBUG_FILE); + } + } + fputc('\n', DBUG_FILE); + + /* Print the tables in JOIN->best_ref */ + fputs(" BEST_REF: ", DBUG_FILE); + for (plan_nodes= join->best_ref ; *plan_nodes ; plan_nodes++) + { + join_table= (*plan_nodes); + fputs(join_table->table->s->table_name, DBUG_FILE); + fprintf(DBUG_FILE, "(%lu,%lu,%lu)", + (ulong) join_table->found_records, + (ulong) join_table->records, + (ulong) join_table->read_time); + fputc(' ', DBUG_FILE); + } + fputc('\n', DBUG_FILE); + + DBUG_UNLOCK_FILE; +} + #endif typedef struct st_debug_lock @@ -233,12 +333,12 @@ static void push_locks_into_array(DYNAMIC_ARRAY *ar, THR_LOCK_DATA *data, if (data) { TABLE *table=(TABLE *)data->debug_print_param; - if (table && table->tmp_table == NO_TMP_TABLE) + if (table && table->s->tmp_table == NO_TMP_TABLE) { TABLE_LOCK_INFO table_lock_info; - table_lock_info.thread_id=table->in_use->thread_id; - memcpy(table_lock_info.table_name, table->table_cache_key, - table->key_length); + table_lock_info.thread_id= table->in_use->thread_id; + memcpy(table_lock_info.table_name, table->s->table_cache_key, + table->s->key_length); table_lock_info.table_name[strlen(table_lock_info.table_name)]='.'; table_lock_info.waiting=wait; table_lock_info.lock_text=text; @@ -344,29 +444,24 @@ reads: %10s\n\n", } -void mysql_print_status(THD *thd) +void mysql_print_status() { char current_dir[FN_REFLEN]; + STATUS_VAR tmp; + + calc_sum_of_all_status(&tmp); printf("\nStatus information:\n\n"); - my_getwd(current_dir, sizeof(current_dir),MYF(0)); + VOID(my_getwd(current_dir, sizeof(current_dir),MYF(0))); printf("Current dir: %s\n", current_dir); printf("Running threads: %d Stack size: %ld\n", thread_count, (long) thread_stack); - if (thd) - thd->proc_info="locks"; thr_print_locks(); // Write some debug info #ifndef DBUG_OFF - if (thd) - thd->proc_info="table cache"; print_cached_tables(); #endif /* Print key cache status */ - if (thd) - thd->proc_info="key cache"; puts("\nKey caches:"); process_key_caches(print_key_cache_status); - if (thd) - thd->proc_info="status"; pthread_mutex_lock(&LOCK_status); printf("\nhandler status:\n\ read_key: %10lu\n\ @@ -376,16 +471,20 @@ read_first: %10lu\n\ write: %10lu\n\ delete %10lu\n\ update: %10lu\n", - ha_read_key_count, ha_read_next_count, - ha_read_rnd_count, ha_read_first_count, - ha_write_count, ha_delete_count, ha_update_count); + tmp.ha_read_key_count, + tmp.ha_read_next_count, + tmp.ha_read_rnd_count, + tmp.ha_read_first_count, + tmp.ha_write_count, + tmp.ha_delete_count, + tmp.ha_update_count); pthread_mutex_unlock(&LOCK_status); printf("\nTable status:\n\ Opened tables: %10lu\n\ Open tables: %10lu\n\ Open files: %10lu\n\ Open streams: %10lu\n", - opened_tables, + tmp.opened_tables, (ulong) cached_tables(), (ulong) my_file_opened, (ulong) my_stream_opened); @@ -403,8 +502,6 @@ Next alarm time: %lu\n", #endif display_table_locks(); fflush(stdout); - if (thd) - thd->proc_info="malloc"; my_checkmalloc(); TERMINATE(stdout); // Write malloc information @@ -435,6 +532,4 @@ Estimated memory (with thread stack): %ld\n", (long) (thread_count * thread_stack + info.hblkhd + info.arena)); #endif puts(""); - if (thd) - thd->proc_info=0; } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc new file mode 100644 index 00000000000..55d51ad07b7 --- /dev/null +++ b/sql/sql_trigger.cc @@ -0,0 +1,1723 @@ +/* Copyright (C) 2004-2005 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +#define MYSQL_LEX 1 +#include "mysql_priv.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "parse_file.h" + +static const LEX_STRING triggers_file_type= + {(char *) STRING_WITH_LEN("TRIGGERS")}; + +const char * const triggers_file_ext= ".TRG"; + +/* + Table of .TRG file field descriptors. + We have here only one field now because in nearest future .TRG + files will be merged into .FRM files (so we don't need something + like md5 or created fields). +*/ +static File_option triggers_file_parameters[]= +{ + { + {(char *) STRING_WITH_LEN("triggers") }, + my_offsetof(class Table_triggers_list, definitions_list), + FILE_OPTIONS_STRLIST + }, + { + {(char *) STRING_WITH_LEN("sql_modes") }, + my_offsetof(class Table_triggers_list, definition_modes_list), + FILE_OPTIONS_ULLLIST + }, + { + {(char *) STRING_WITH_LEN("definers") }, + my_offsetof(class Table_triggers_list, definers_list), + FILE_OPTIONS_STRLIST + }, + { { 0, 0 }, 0, FILE_OPTIONS_STRING } +}; + +File_option sql_modes_parameters= +{ + {(char*) STRING_WITH_LEN("sql_modes") }, + my_offsetof(class Table_triggers_list, definition_modes_list), + FILE_OPTIONS_ULLLIST +}; + +/* + This must be kept up to date whenever a new option is added to the list + above, as it specifies the number of required parameters of the trigger in + .trg file. +*/ + +static const int TRG_NUM_REQUIRED_PARAMETERS= 4; + +/* + Structure representing contents of .TRN file which are used to support + database wide trigger namespace. +*/ + +struct st_trigname +{ + LEX_STRING trigger_table; +}; + +static const LEX_STRING trigname_file_type= + {(char *) STRING_WITH_LEN("TRIGGERNAME")}; + +const char * const trigname_file_ext= ".TRN"; + +static File_option trigname_file_parameters[]= +{ + { + {(char *) STRING_WITH_LEN("trigger_table")}, + offsetof(struct st_trigname, trigger_table), + FILE_OPTIONS_ESTRING + }, + { { 0, 0 }, 0, FILE_OPTIONS_STRING } +}; + + +const LEX_STRING trg_action_time_type_names[]= +{ + { (char *) STRING_WITH_LEN("BEFORE") }, + { (char *) STRING_WITH_LEN("AFTER") } +}; + +const LEX_STRING trg_event_type_names[]= +{ + { (char *) STRING_WITH_LEN("INSERT") }, + { (char *) STRING_WITH_LEN("UPDATE") }, + { (char *) STRING_WITH_LEN("DELETE") } +}; + + +class Handle_old_incorrect_sql_modes_hook: public Unknown_key_hook +{ +private: + char *path; +public: + Handle_old_incorrect_sql_modes_hook(char *file_path) + :path(file_path) + {}; + virtual bool process_unknown_string(char *&unknown_key, gptr base, + MEM_ROOT *mem_root, char *end); +}; + +class Handle_old_incorrect_trigger_table_hook: public Unknown_key_hook +{ +public: + Handle_old_incorrect_trigger_table_hook(char *file_path, + LEX_STRING *trigger_table_arg) + :path(file_path), trigger_table_value(trigger_table_arg) + {}; + virtual bool process_unknown_string(char *&unknown_key, gptr base, + MEM_ROOT *mem_root, char *end); +private: + char *path; + LEX_STRING *trigger_table_value; +}; + +/* + Create or drop trigger for table. + + SYNOPSIS + mysql_create_or_drop_trigger() + thd - current thread context (including trigger definition in LEX) + tables - table list containing one table for which trigger is created. + create - whenever we create (TRUE) or drop (FALSE) trigger + + NOTE + This function is mainly responsible for opening and locking of table and + invalidation of all its instances in table cache after trigger creation. + Real work on trigger creation/dropping is done inside Table_triggers_list + methods. + + RETURN VALUE + FALSE Success + TRUE error +*/ +bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) +{ + /* + FIXME: The code below takes too many different paths depending on the + 'create' flag, so that the justification for a single function + 'mysql_create_or_drop_trigger', compared to two separate functions + 'mysql_create_trigger' and 'mysql_drop_trigger' is not apparent. + This is a good candidate for a minor refactoring. + */ + TABLE *table; + bool result= TRUE; + String stmt_query; + + DBUG_ENTER("mysql_create_or_drop_trigger"); + + /* Charset of the buffer for statement must be system one. */ + stmt_query.set_charset(system_charset_info); + + /* + QQ: This function could be merged in mysql_alter_table() function + But do we want this ? + */ + + /* + Note that once we will have check for TRIGGER privilege in place we won't + need second part of condition below, since check_access() function also + checks that db is specified. + */ + if (!thd->lex->spname->m_db.length || create && !tables->db_length) + { + my_error(ER_NO_DB_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + + /* + We don't allow creating triggers on tables in the 'mysql' schema + */ + if (create && !my_strcasecmp(system_charset_info, "mysql", tables->db)) + { + my_error(ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA, MYF(0)); + DBUG_RETURN(TRUE); + } + + /* + TODO: We should check if user has TRIGGER privilege for table here. + Now we just require SUPER privilege for creating/dropping because + we don't have proper privilege checking for triggers in place yet. + */ + if (check_global_access(thd, SUPER_ACL)) + DBUG_RETURN(TRUE); + + /* + There is no DETERMINISTIC clause for triggers, so can't check it. + But a trigger can in theory be used to do nasty things (if it supported + DROP for example) so we do the check for privileges. For now there is + already a stronger test right above; but when this stronger test will + be removed, the test below will hold. Because triggers have the same + nature as functions regarding binlogging: their body is implicitly + binlogged, so they share the same danger, so trust_function_creators + applies to them too. + */ + if (!trust_function_creators && mysql_bin_log.is_open() && + !(thd->security_ctx->master_access & SUPER_ACL)) + { + my_error(ER_BINLOG_CREATE_ROUTINE_NEED_SUPER, MYF(0)); + DBUG_RETURN(TRUE); + } + + /* + We don't want perform our operations while global read lock is held + so we have to wait until its end and then prevent it from occurring + again until we are done. (Acquiring LOCK_open is not enough because + global read lock is held without holding LOCK_open). + */ + if (wait_if_global_read_lock(thd, 0, 1)) + DBUG_RETURN(TRUE); + + VOID(pthread_mutex_lock(&LOCK_open)); + + if (!create) + { + bool if_exists= thd->lex->drop_if_exists; + + if (add_table_for_trigger(thd, thd->lex->spname, if_exists, & tables)) + goto end; + + if (!tables) + { + DBUG_ASSERT(if_exists); + /* + Since the trigger does not exist, there is no associated table, + and therefore : + - no TRIGGER privileges to check, + - no trigger to drop, + - no table to lock/modify, + so the drop statement is successful. + */ + result= FALSE; + /* Still, we need to log the query ... */ + stmt_query.append(thd->query, thd->query_length); + goto end; + } + } + + /* We should have only one table in table list. */ + DBUG_ASSERT(tables->next_global == 0); + + /* We do not allow creation of triggers on temporary tables. */ + if (create && find_temporary_table(thd, tables->db, tables->table_name)) + { + my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias); + goto end; + } + + if (lock_table_names(thd, tables)) + goto end; + + /* We also don't allow creation of triggers on views. */ + tables->required_type= FRMTYPE_TABLE; + + if (reopen_name_locked_table(thd, tables)) + { + unlock_table_name(thd, tables); + goto end; + } + table= tables->table; + + if (!table->triggers) + { + if (!create) + { + my_error(ER_TRG_DOES_NOT_EXIST, MYF(0)); + goto end; + } + + if (!(table->triggers= new (&table->mem_root) Table_triggers_list(table))) + goto end; + } + + result= (create ? + table->triggers->create_trigger(thd, tables, &stmt_query): + table->triggers->drop_trigger(thd, tables, &stmt_query)); + +end: + + if (!result) + { + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + + /* Such a statement can always go directly to binlog, no trans cache. */ + Query_log_event qinfo(thd, stmt_query.ptr(), stmt_query.length(), 0, + FALSE); + mysql_bin_log.write(&qinfo); + } + } + + VOID(pthread_mutex_unlock(&LOCK_open)); + start_waiting_global_read_lock(thd); + + if (!result) + send_ok(thd); + + DBUG_RETURN(result); +} + + +/* + Create trigger for table. + + SYNOPSIS + create_trigger() + thd - current thread context (including trigger definition in + LEX) + tables - table list containing one open table for which the + trigger is created. + stmt_query - [OUT] after successful return, this string contains + well-formed statement for creation this trigger. + + NOTE + - Assumes that trigger name is fully qualified. + - NULL-string means the following LEX_STRING instance: + { str = 0; length = 0 }. + - In other words, definer_user and definer_host should contain + simultaneously NULL-strings (non-SUID/old trigger) or valid strings + (SUID/new trigger). + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, + String *stmt_query) +{ + LEX *lex= thd->lex; + TABLE *table= tables->table; + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN], trigname_buff[FN_REFLEN], + trigname_path[FN_REFLEN]; + LEX_STRING dir, file, trigname_file; + LEX_STRING *trg_def; + LEX_STRING definer_user; + LEX_STRING definer_host; + ulonglong *trg_sql_mode; + char trg_definer_holder[USER_HOST_BUFF_SIZE]; + LEX_STRING *trg_definer; + Item_trigger_field *trg_field; + struct st_trigname trigname; + + + /* Trigger must be in the same schema as target table. */ + if (my_strcasecmp(table_alias_charset, table->s->db, lex->spname->m_db.str)) + { + my_error(ER_TRG_IN_WRONG_SCHEMA, MYF(0)); + return 1; + } + + /* We don't allow creation of several triggers of the same type yet */ + if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time]) + { + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "multiple triggers with the same action time" + " and event for one table"); + return 1; + } + + if (!lex->definer) + { + /* + DEFINER-clause is missing. + + If we are in slave thread, this means that we received CREATE TRIGGER + from the master, that does not support definer in triggers. So, we + should mark this trigger as non-SUID. Note that this does not happen + when we parse triggers' definitions during opening .TRG file. + LEX::definer is ignored in that case. + + Otherwise, we should use CURRENT_USER() as definer. + + NOTE: when CREATE TRIGGER statement is allowed to be executed in PS/SP, + it will be required to create the definer below in persistent MEM_ROOT + of PS/SP. + */ + + if (!thd->slave_thread) + { + if (!(lex->definer= create_default_definer(thd))) + return 1; + } + } + + /* + If the specified definer differs from the current user, we should check + that the current user has SUPER privilege (in order to create trigger + under another user one must have SUPER privilege). + */ + + if (lex->definer && + (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) || + my_strcasecmp(system_charset_info, + lex->definer->host.str, + thd->security_ctx->priv_host))) + { + if (check_global_access(thd, SUPER_ACL)) + { + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); + return TRUE; + } + } + + /* + Let us check if all references to fields in old/new versions of row in + this trigger are ok. + + NOTE: We do it here more from ease of use standpoint. We still have to + do some checks on each execution. E.g. we can catch privilege changes + only during execution. Also in near future, when we will allow access + to other tables from trigger we won't be able to catch changes in other + tables... + + Since we don't plan to access to contents of the fields it does not + matter that we choose for both OLD and NEW values the same versions + of Field objects here. + */ + old_field= new_field= table->field; + + for (trg_field= (Item_trigger_field *)(lex->trg_table_fields.first); + trg_field; trg_field= trg_field->next_trg_field) + { + /* + NOTE: now we do not check privileges at CREATE TRIGGER time. This will + be changed in the future. + */ + trg_field->setup_field(thd, table, NULL); + + if (!trg_field->fixed && + trg_field->fix_fields(thd, (Item **)0)) + return 1; + } + + /* + Here we are creating file with triggers and save all triggers in it. + sql_create_definition_file() files handles renaming and backup of older + versions + */ + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, "/", NullS); + dir.length= unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + file.length= strxnmov(file_buff, FN_REFLEN, tables->table_name, + triggers_file_ext, NullS) - file_buff; + file.str= file_buff; + trigname_file.length= strxnmov(trigname_buff, FN_REFLEN, + lex->spname->m_name.str, + trigname_file_ext, NullS) - trigname_buff; + trigname_file.str= trigname_buff; + strxnmov(trigname_path, FN_REFLEN, dir_buff, trigname_buff, NullS); + + /* Use the filesystem to enforce trigger namespace constraints. */ + if (!access(trigname_path, F_OK)) + { + my_error(ER_TRG_ALREADY_EXISTS, MYF(0)); + return 1; + } + + trigname.trigger_table.str= tables->table_name; + trigname.trigger_table.length= tables->table_name_length; + + if (sql_create_definition_file(&dir, &trigname_file, &trigname_file_type, + (gptr)&trigname, trigname_file_parameters, 0)) + return 1; + + /* + Soon we will invalidate table object and thus Table_triggers_list object + so don't care about place to which trg_def->ptr points and other + invariants (e.g. we don't bother to update names_list) + + QQ: Hmm... probably we should not care about setting up active thread + mem_root too. + */ + if (!(trg_def= (LEX_STRING *)alloc_root(&table->mem_root, + sizeof(LEX_STRING))) || + definitions_list.push_back(trg_def, &table->mem_root) || + !(trg_sql_mode= (ulonglong*)alloc_root(&table->mem_root, + sizeof(ulonglong))) || + definition_modes_list.push_back(trg_sql_mode, &table->mem_root) || + !(trg_definer= (LEX_STRING*) alloc_root(&table->mem_root, + sizeof(LEX_STRING))) || + definers_list.push_back(trg_definer, &table->mem_root)) + goto err_with_cleanup; + + *trg_sql_mode= thd->variables.sql_mode; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (lex->definer && !is_acl_user(lex->definer->host.str, + lex->definer->user.str)) + { + push_warning_printf(thd, + MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_NO_SUCH_USER, + ER(ER_NO_SUCH_USER), + lex->definer->user.str, + lex->definer->host.str); + } +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + + if (lex->definer) + { + /* SUID trigger. */ + + definer_user= lex->definer->user; + definer_host= lex->definer->host; + + trg_definer->str= trg_definer_holder; + trg_definer->length= strxmov(trg_definer->str, definer_user.str, "@", + definer_host.str, NullS) - trg_definer->str; + } + else + { + /* non-SUID trigger. */ + + definer_user.str= 0; + definer_user.length= 0; + + definer_host.str= 0; + definer_host.length= 0; + + trg_definer->str= (char*) ""; + trg_definer->length= 0; + } + + /* + Create well-formed trigger definition query. Original query is not + appropriated, because definer-clause can be not truncated. + */ + + stmt_query->append(STRING_WITH_LEN("CREATE ")); + + if (trg_definer) + { + /* + Append definer-clause if the trigger is SUID (a usual trigger in + new MySQL versions). + */ + + append_definer(thd, stmt_query, &definer_user, &definer_host); + } + + stmt_query->append(thd->lex->stmt_definition_begin, + (char *) thd->lex->sphead->m_body_begin - + thd->lex->stmt_definition_begin + + thd->lex->sphead->m_body.length); + + trg_def->str= stmt_query->c_ptr(); + trg_def->length= stmt_query->length(); + + /* Create trigger definition file. */ + + if (!sql_create_definition_file(&dir, &file, &triggers_file_type, + (gptr)this, triggers_file_parameters, 0)) + return 0; + +err_with_cleanup: + my_delete(trigname_path, MYF(MY_WME)); + return 1; +} + + +/* + Deletes the .TRG file for a table + + SYNOPSIS + rm_trigger_file() + path - char buffer of size FN_REFLEN to be used + for constructing path to .TRG file. + db - table's database name + table_name - table's name + + RETURN VALUE + False - success + True - error +*/ + +static bool rm_trigger_file(char *path, const char *db, + const char *table_name) +{ + strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", table_name, + triggers_file_ext, NullS); + unpack_filename(path, path); + return my_delete(path, MYF(MY_WME)); +} + + +/* + Deletes the .TRN file for a trigger + + SYNOPSIS + rm_trigname_file() + path - char buffer of size FN_REFLEN to be used + for constructing path to .TRN file. + db - trigger's database name + table_name - trigger's name + + RETURN VALUE + False - success + True - error +*/ + +static bool rm_trigname_file(char *path, const char *db, + const char *trigger_name) +{ + strxnmov(path, FN_REFLEN, mysql_data_home, "/", db, "/", trigger_name, + trigname_file_ext, NullS); + unpack_filename(path, path); + return my_delete(path, MYF(MY_WME)); +} + + +/* + Helper function that saves .TRG file for Table_triggers_list object. + + SYNOPSIS + save_trigger_file() + triggers Table_triggers_list object for which file should be saved + db Name of database for subject table + table_name Name of subject table + + RETURN VALUE + FALSE Success + TRUE Error +*/ + +static bool save_trigger_file(Table_triggers_list *triggers, const char *db, + const char *table_name) +{ + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", db, "/", NullS); + dir.length= unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + file.length= strxnmov(file_buff, FN_REFLEN, table_name, triggers_file_ext, + NullS) - file_buff; + file.str= file_buff; + + return sql_create_definition_file(&dir, &file, &triggers_file_type, + (gptr)triggers, triggers_file_parameters, 0); +} + + +/* + Drop trigger for table. + + SYNOPSIS + drop_trigger() + thd - current thread context + (including trigger definition in LEX) + tables - table list containing one open table for which trigger + is dropped. + stmt_query - [OUT] after successful return, this string contains + well-formed statement for creation this trigger. + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables, + String *stmt_query) +{ + LEX *lex= thd->lex; + LEX_STRING *name; + List_iterator_fast<LEX_STRING> it_name(names_list); + List_iterator<LEX_STRING> it_def(definitions_list); + List_iterator<ulonglong> it_mod(definition_modes_list); + List_iterator<LEX_STRING> it_definer(definers_list); + char path[FN_REFLEN]; + + stmt_query->append(thd->query, thd->query_length); + + while ((name= it_name++)) + { + it_def++; + it_mod++; + it_definer++; + + if (my_strcasecmp(table_alias_charset, lex->spname->m_name.str, + name->str) == 0) + { + /* + Again we don't care much about other things required for + clean trigger removing since table will be reopened anyway. + */ + it_def.remove(); + it_mod.remove(); + it_definer.remove(); + + if (definitions_list.is_empty()) + { + /* + TODO: Probably instead of removing .TRG file we should move + to archive directory but this should be done as part of + parse_file.cc functionality (because we will need it + elsewhere). + */ + if (rm_trigger_file(path, tables->db, tables->table_name)) + return 1; + } + else + { + if (save_trigger_file(this, tables->db, tables->table_name)) + return 1; + } + + if (rm_trigname_file(path, tables->db, lex->spname->m_name.str)) + return 1; + return 0; + } + } + + my_message(ER_TRG_DOES_NOT_EXIST, ER(ER_TRG_DOES_NOT_EXIST), MYF(0)); + return 1; +} + + +Table_triggers_list::~Table_triggers_list() +{ + for (int i= 0; i < (int)TRG_EVENT_MAX; i++) + for (int j= 0; j < (int)TRG_ACTION_MAX; j++) + delete bodies[i][j]; + + if (record1_field) + for (Field **fld_ptr= record1_field; *fld_ptr; fld_ptr++) + delete *fld_ptr; +} + + +/* + Prepare array of Field objects referencing to TABLE::record[1] instead + of record[0] (they will represent OLD.* row values in ON UPDATE trigger + and in ON DELETE trigger which will be called during REPLACE execution). + + SYNOPSIS + prepare_record1_accessors() + table - pointer to TABLE object for which we are creating fields. + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::prepare_record1_accessors(TABLE *table) +{ + Field **fld, **old_fld; + + if (!(record1_field= (Field **)alloc_root(&table->mem_root, + (table->s->fields + 1) * + sizeof(Field*)))) + return 1; + + for (fld= table->field, old_fld= record1_field; *fld; fld++, old_fld++) + { + /* + QQ: it is supposed that it is ok to use this function for field + cloning... + */ + if (!(*old_fld= (*fld)->new_field(&table->mem_root, table, + table == (*fld)->table))) + return 1; + (*old_fld)->move_field((my_ptrdiff_t)(table->record[1] - + table->record[0])); + } + *old_fld= 0; + + return 0; +} + + +/* + Adjust Table_triggers_list with new TABLE pointer. + + SYNOPSIS + set_table() + new_table - new pointer to TABLE instance +*/ + +void Table_triggers_list::set_table(TABLE *new_table) +{ + trigger_table= new_table; + for (Field **field= new_table->triggers->record1_field ; *field ; field++) + { + (*field)->table= (*field)->orig_table= new_table; + (*field)->table_name= &new_table->alias; + } +} + + +/* + Check whenever .TRG file for table exist and load all triggers it contains. + + SYNOPSIS + check_n_load() + thd - current thread context + db - table's database name + table_name - table's name + table - pointer to table object + names_only - stop after loading trigger names + + RETURN VALUE + False - success + True - error +*/ + +bool Table_triggers_list::check_n_load(THD *thd, const char *db, + const char *table_name, TABLE *table, + bool names_only) +{ + char path_buff[FN_REFLEN]; + LEX_STRING path; + File_parser *parser; + LEX_STRING save_db; + + DBUG_ENTER("Table_triggers_list::check_n_load"); + + strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", db, "/", table_name, + triggers_file_ext, NullS); + path.length= unpack_filename(path_buff, path_buff); + path.str= path_buff; + + // QQ: should we analyze errno somehow ? + if (access(path_buff, F_OK)) + DBUG_RETURN(0); + + /* + File exists so we got to load triggers. + FIXME: A lot of things to do here e.g. how about other funcs and being + more paranoical ? + */ + + if ((parser= sql_parse_prepare(&path, &table->mem_root, 1))) + { + if (is_equal(&triggers_file_type, parser->type())) + { + Table_triggers_list *triggers= + new (&table->mem_root) Table_triggers_list(table); + Handle_old_incorrect_sql_modes_hook sql_modes_hook(path.str); + + if (!triggers) + DBUG_RETURN(1); + + /* + We don't have the following attributes in old versions of .TRG file, so + we should initialize the list for safety: + - sql_modes; + - definers; + */ + triggers->definition_modes_list.empty(); + triggers->definers_list.empty(); + + if (parser->parse((gptr)triggers, &table->mem_root, + triggers_file_parameters, + TRG_NUM_REQUIRED_PARAMETERS, + &sql_modes_hook)) + DBUG_RETURN(1); + + List_iterator_fast<LEX_STRING> it(triggers->definitions_list); + LEX_STRING *trg_create_str; + ulonglong *trg_sql_mode; + + if (triggers->definition_modes_list.is_empty() && + !triggers->definitions_list.is_empty()) + { + /* + It is old file format => we should fill list of sql_modes. + + We use one mode (current) for all triggers, because we have not + information about mode in old format. + */ + if (!(trg_sql_mode= (ulonglong*)alloc_root(&table->mem_root, + sizeof(ulonglong)))) + { + DBUG_RETURN(1); // EOM + } + *trg_sql_mode= global_system_variables.sql_mode; + while (it++) + { + if (triggers->definition_modes_list.push_back(trg_sql_mode, + &table->mem_root)) + { + DBUG_RETURN(1); // EOM + } + } + it.rewind(); + } + + if (triggers->definers_list.is_empty() && + !triggers->definitions_list.is_empty()) + { + /* + It is old file format => we should fill list of definers. + + If there is no definer information, we should not switch context to + definer when checking privileges. I.e. privileges for such triggers + are checked for "invoker" rather than for "definer". + */ + + LEX_STRING *trg_definer; + + if (! (trg_definer= (LEX_STRING*)alloc_root(&table->mem_root, + sizeof(LEX_STRING)))) + DBUG_RETURN(1); // EOM + + trg_definer->str= (char*) ""; + trg_definer->length= 0; + + while (it++) + { + if (triggers->definers_list.push_back(trg_definer, + &table->mem_root)) + { + DBUG_RETURN(1); // EOM + } + } + + it.rewind(); + } + + DBUG_ASSERT(triggers->definition_modes_list.elements == + triggers->definitions_list.elements); + DBUG_ASSERT(triggers->definers_list.elements == + triggers->definitions_list.elements); + + table->triggers= triggers; + + /* + Construct key that will represent triggers for this table in the set + of routines used by statement. + */ + triggers->sroutines_key.length= 1+strlen(db)+1+strlen(table_name)+1; + if (!(triggers->sroutines_key.str= + alloc_root(&table->mem_root, triggers->sroutines_key.length))) + DBUG_RETURN(1); + triggers->sroutines_key.str[0]= TYPE_ENUM_TRIGGER; + strxmov(triggers->sroutines_key.str+1, db, ".", table_name, NullS); + + /* + TODO: This could be avoided if there is no triggers + for UPDATE and DELETE. + */ + if (!names_only && triggers->prepare_record1_accessors(table)) + DBUG_RETURN(1); + + List_iterator_fast<ulonglong> itm(triggers->definition_modes_list); + List_iterator_fast<LEX_STRING> it_definer(triggers->definers_list); + LEX *old_lex= thd->lex, lex; + sp_rcontext *save_spcont= thd->spcont; + ulong save_sql_mode= thd->variables.sql_mode; + LEX_STRING *on_table_name; + + thd->lex= &lex; + + save_db.str= thd->db; + save_db.length= thd->db_length; + thd->reset_db((char*) db, strlen(db)); + while ((trg_create_str= it++)) + { + trg_sql_mode= itm++; + LEX_STRING *trg_definer= it_definer++; + + thd->variables.sql_mode= (ulong)*trg_sql_mode; + lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length); + + thd->spcont= 0; + if (MYSQLparse((void *)thd) || thd->is_fatal_error) + { + /* Currently sphead is always deleted in case of a parse error */ + DBUG_ASSERT(lex.sphead == 0); + goto err_with_lex_cleanup; + } + + lex.sphead->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode); + + triggers->bodies[lex.trg_chistics.event] + [lex.trg_chistics.action_time]= lex.sphead; + + if (!trg_definer->length) + { + /* + This trigger was created/imported from the previous version of + MySQL, which does not support triggers definers. We should emit + warning here. + */ + + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRG_NO_DEFINER, ER(ER_TRG_NO_DEFINER), + (const char*) db, + (const char*) lex.sphead->m_name.str); + + /* + Set definer to the '' to correct displaying in the information + schema. + */ + + lex.sphead->set_definer("", 0); + + /* + Triggers without definer information are executed under the + authorization of the invoker. + */ + + lex.sphead->m_chistics->suid= SP_IS_NOT_SUID; + } + else + lex.sphead->set_definer(trg_definer->str, trg_definer->length); + + if (triggers->names_list.push_back(&lex.sphead->m_name, + &table->mem_root)) + goto err_with_lex_cleanup; + + if (!(on_table_name= (LEX_STRING*) alloc_root(&table->mem_root, + sizeof(LEX_STRING)))) + goto err_with_lex_cleanup; + *on_table_name= lex.ident; + if (triggers->on_table_names_list.push_back(on_table_name, &table->mem_root)) + goto err_with_lex_cleanup; + + /* + Let us check that we correctly update trigger definitions when we + rename tables with triggers. + */ + DBUG_ASSERT(!my_strcasecmp(table_alias_charset, lex.query_tables->db, db) && + !my_strcasecmp(table_alias_charset, lex.query_tables->table_name, + table_name)); + + if (names_only) + { + lex_end(&lex); + continue; + } + + /* + Gather all Item_trigger_field objects representing access to fields + in old/new versions of row in trigger into lists containing all such + objects for the triggers with same action and timing. + */ + triggers->trigger_fields[lex.trg_chistics.event] + [lex.trg_chistics.action_time]= + (Item_trigger_field *)(lex.trg_table_fields.first); + /* + Also let us bind these objects to Field objects in table being + opened. + + We ignore errors here, because if even something is wrong we still + will be willing to open table to perform some operations (e.g. + SELECT)... + Anyway some things can be checked only during trigger execution. + */ + for (Item_trigger_field *trg_field= + (Item_trigger_field *)(lex.trg_table_fields.first); + trg_field; + trg_field= trg_field->next_trg_field) + { + trg_field->setup_field(thd, table, + &triggers->subject_table_grants[lex.trg_chistics.event] + [lex.trg_chistics.action_time]); + } + + lex_end(&lex); + } + thd->reset_db(save_db.str, save_db.length); + thd->lex= old_lex; + thd->spcont= save_spcont; + thd->variables.sql_mode= save_sql_mode; + + DBUG_RETURN(0); + +err_with_lex_cleanup: + // QQ: anything else ? + lex_end(&lex); + thd->lex= old_lex; + thd->spcont= save_spcont; + thd->variables.sql_mode= save_sql_mode; + thd->reset_db(save_db.str, save_db.length); + DBUG_RETURN(1); + } + + /* + We don't care about this error message much because .TRG files will + be merged into .FRM anyway. + */ + my_error(ER_WRONG_OBJECT, MYF(0), + table_name, triggers_file_ext+1, "TRIGGER"); + DBUG_RETURN(1); + } + + DBUG_RETURN(1); +} + + +/* + Obtains and returns trigger metadata + + SYNOPSIS + get_trigger_info() + thd - current thread context + event - trigger event type + time_type - trigger action time + name - returns name of trigger + stmt - returns statement of trigger + sql_mode - returns sql_mode of trigger + definer_user - returns definer/creator of trigger. The caller is + responsible to allocate enough space for storing definer + information. + + RETURN VALUE + False - success + True - error +*/ + +bool Table_triggers_list::get_trigger_info(THD *thd, trg_event_type event, + trg_action_time_type time_type, + LEX_STRING *trigger_name, + LEX_STRING *trigger_stmt, + ulong *sql_mode, + LEX_STRING *definer) +{ + sp_head *body; + DBUG_ENTER("get_trigger_info"); + if ((body= bodies[event][time_type])) + { + *trigger_name= body->m_name; + *trigger_stmt= body->m_body; + *sql_mode= body->m_sql_mode; + + if (body->m_chistics->suid == SP_IS_NOT_SUID) + { + definer->str[0]= 0; + definer->length= 0; + } + else + { + definer->length= strxmov(definer->str, body->m_definer_user.str, "@", + body->m_definer_host.str, NullS) - definer->str; + } + + DBUG_RETURN(0); + } + DBUG_RETURN(1); +} + + +/* + Find trigger's table from trigger identifier and add it to + the statement table list. + + SYNOPSIS + mysql_table_for_trigger() + thd - current thread context + trig - identifier for trigger + if_exists - treat a not existing trigger as a warning if TRUE + table - pointer to TABLE_LIST object for the table trigger (output) + + RETURN VALUE + 0 Success + 1 Error +*/ + +int +add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists, + TABLE_LIST **table) +{ + LEX *lex= thd->lex; + char path_buff[FN_REFLEN]; + LEX_STRING path; + File_parser *parser; + struct st_trigname trigname; + Handle_old_incorrect_trigger_table_hook trigger_table_hook( + path_buff, &trigname.trigger_table); + + DBUG_ENTER("add_table_for_trigger"); + DBUG_ASSERT(table != NULL); + + strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", trig->m_db.str, "/", + trig->m_name.str, trigname_file_ext, NullS); + path.length= unpack_filename(path_buff, path_buff); + path.str= path_buff; + + if (access(path_buff, F_OK)) + { + if (if_exists) + { + push_warning_printf(thd, + MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_TRG_DOES_NOT_EXIST, + ER(ER_TRG_DOES_NOT_EXIST)); + *table= NULL; + DBUG_RETURN(0); + } + + my_error(ER_TRG_DOES_NOT_EXIST, MYF(0)); + DBUG_RETURN(1); + } + + if (!(parser= sql_parse_prepare(&path, thd->mem_root, 1))) + DBUG_RETURN(1); + + if (!is_equal(&trigname_file_type, parser->type())) + { + my_error(ER_WRONG_OBJECT, MYF(0), trig->m_name.str, trigname_file_ext+1, + "TRIGGERNAME"); + DBUG_RETURN(1); + } + + if (parser->parse((gptr)&trigname, thd->mem_root, + trigname_file_parameters, 1, + &trigger_table_hook)) + DBUG_RETURN(1); + + /* We need to reset statement table list to be PS/SP friendly. */ + lex->query_tables= 0; + lex->query_tables_last= &lex->query_tables; + *table= sp_add_to_query_tables(thd, lex, trig->m_db.str, + trigname.trigger_table.str, TL_IGNORE); + + if (! *table) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + + +/* + Drop all triggers for table. + + SYNOPSIS + drop_all_triggers() + thd - current thread context + db - schema for table + name - name for table + + NOTE + The calling thread should hold the LOCK_open mutex; + + RETURN VALUE + False - success + True - error +*/ + +bool Table_triggers_list::drop_all_triggers(THD *thd, char *db, char *name) +{ + TABLE table; + char path[FN_REFLEN]; + bool result= 0; + DBUG_ENTER("drop_all_triggers"); + + bzero(&table, sizeof(table)); + init_alloc_root(&table.mem_root, 8192, 0); + + safe_mutex_assert_owner(&LOCK_open); + + if (Table_triggers_list::check_n_load(thd, db, name, &table, 1)) + { + result= 1; + goto end; + } + if (table.triggers) + { + LEX_STRING *trigger; + List_iterator_fast<LEX_STRING> it_name(table.triggers->names_list); + + while ((trigger= it_name++)) + { + if (rm_trigname_file(path, db, trigger->str)) + { + /* + Instead of immediately bailing out with error if we were unable + to remove .TRN file we will try to drop other files. + */ + result= 1; + continue; + } + } + + if (rm_trigger_file(path, db, name)) + { + result= 1; + goto end; + } + } +end: + if (table.triggers) + delete table.triggers; + free_root(&table.mem_root, MYF(0)); + DBUG_RETURN(result); +} + + +/* + Update .TRG file after renaming triggers' subject table + (change name of table in triggers' definitions). + + SYNOPSIS + change_table_name_in_triggers() + thd Thread context + db_name Database of subject table + old_table_name Old subject table's name + new_table_name New subject table's name + + RETURN VALUE + FALSE Success + TRUE Failure +*/ + +bool +Table_triggers_list::change_table_name_in_triggers(THD *thd, + const char *db_name, + LEX_STRING *old_table_name, + LEX_STRING *new_table_name) +{ + char path_buff[FN_REFLEN]; + LEX_STRING *def, *on_table_name, new_def; + ulong save_sql_mode= thd->variables.sql_mode; + List_iterator_fast<LEX_STRING> it_def(definitions_list); + List_iterator_fast<LEX_STRING> it_on_table_name(on_table_names_list); + List_iterator_fast<ulonglong> it_mode(definition_modes_list); + uint on_q_table_name_len, before_on_len; + String buff; + + DBUG_ASSERT(definitions_list.elements == on_table_names_list.elements && + definitions_list.elements == definition_modes_list.elements); + + while ((def= it_def++)) + { + on_table_name= it_on_table_name++; + thd->variables.sql_mode= (ulong) *(it_mode++); + + /* Construct CREATE TRIGGER statement with new table name. */ + buff.length(0); + before_on_len= on_table_name->str - def->str; + buff.append(def->str, before_on_len); + buff.append(STRING_WITH_LEN("ON ")); + append_identifier(thd, &buff, new_table_name->str, new_table_name->length); + buff.append(STRING_WITH_LEN(" ")); + on_q_table_name_len= buff.length() - before_on_len; + buff.append(on_table_name->str + on_table_name->length, + def->length - (before_on_len + on_table_name->length)); + /* + It is OK to allocate some memory on table's MEM_ROOT since this + table instance will be thrown out at the end of rename anyway. + */ + new_def.str= memdup_root(&trigger_table->mem_root, buff.ptr(), + buff.length()); + new_def.length= buff.length(); + on_table_name->str= new_def.str + before_on_len; + on_table_name->length= on_q_table_name_len; + *def= new_def; + } + + thd->variables.sql_mode= save_sql_mode; + + if (thd->is_fatal_error) + return TRUE; /* OOM */ + + if (save_trigger_file(this, db_name, new_table_name->str)) + return TRUE; + if (rm_trigger_file(path_buff, db_name, old_table_name->str)) + { + (void) rm_trigger_file(path_buff, db_name, new_table_name->str); + return TRUE; + } + return FALSE; +} + + +/* + Iterate though Table_triggers_list::names_list list and update .TRN files + after renaming triggers' subject table. + + SYNOPSIS + change_table_name_in_trignames() + db_name Database of subject table + new_table_name New subject table's name + stopper Pointer to Table_triggers_list::names_list at + which we should stop updating. + + RETURN VALUE + 0 Success + non-0 Failure, pointer to Table_triggers_list::names_list element + for which update failed. +*/ + +LEX_STRING* +Table_triggers_list::change_table_name_in_trignames(const char *db_name, + LEX_STRING *new_table_name, + LEX_STRING *stopper) +{ + char dir_buff[FN_REFLEN], trigname_buff[FN_REFLEN]; + struct st_trigname trigname; + LEX_STRING dir, trigname_file; + LEX_STRING *trigger; + List_iterator_fast<LEX_STRING> it_name(names_list); + + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", db_name, "/", NullS); + dir.length= unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + + while ((trigger= it_name++) != stopper) + { + trigname_file.length= strxnmov(trigname_buff, FN_REFLEN, trigger->str, + trigname_file_ext, NullS) - trigname_buff; + trigname_file.str= trigname_buff; + + trigname.trigger_table= *new_table_name; + + if (sql_create_definition_file(&dir, &trigname_file, &trigname_file_type, + (gptr)&trigname, trigname_file_parameters, 0)) + return trigger; + } + + return 0; +} + + +/* + Update .TRG and .TRN files after renaming triggers' subject table. + + SYNOPSIS + change_table_name() + thd Thread context + db Old database of subject table + old_table Old name of subject table + new_db New database for subject table + new_table New name of subject table + + NOTE + This method tries to leave trigger related files in consistent state, + i.e. it either will complete successfully, or will fail leaving files + in their initial state. + Also this method assumes that subject table is not renamed to itself. + + RETURN VALUE + FALSE Success + TRUE Error +*/ + +bool Table_triggers_list::change_table_name(THD *thd, const char *db, + const char *old_table, + const char *new_db, + const char *new_table) +{ + TABLE table; + bool result= 0; + LEX_STRING *err_trigname; + DBUG_ENTER("change_table_name"); + + bzero(&table, sizeof(table)); + init_alloc_root(&table.mem_root, 8192, 0); + + safe_mutex_assert_owner(&LOCK_open); + + DBUG_ASSERT(my_strcasecmp(table_alias_charset, db, new_db) || + my_strcasecmp(table_alias_charset, old_table, new_table)); + + if (Table_triggers_list::check_n_load(thd, db, old_table, &table, TRUE)) + { + result= 1; + goto end; + } + if (table.triggers) + { + LEX_STRING_WITH_INIT old_table_name(old_table, strlen(old_table)); + LEX_STRING_WITH_INIT new_table_name(new_table, strlen(new_table)); + /* + Since triggers should be in the same schema as their subject tables + moving table with them between two schemas raises too many questions. + (E.g. what should happen if in new schema we already have trigger + with same name ?). + */ + if (my_strcasecmp(table_alias_charset, db, new_db)) + { + my_error(ER_TRG_IN_WRONG_SCHEMA, MYF(0)); + result= 1; + goto end; + } + if (table.triggers->change_table_name_in_triggers(thd, db, + &old_table_name, + &new_table_name)) + { + result= 1; + goto end; + } + if ((err_trigname= table.triggers->change_table_name_in_trignames( + db, &new_table_name, 0))) + { + /* + If we were unable to update one of .TRN files properly we will + revert all changes that we have done and report about error. + We assume that we will be able to undo our changes without errors + (we can't do much if there will be an error anyway). + */ + (void) table.triggers->change_table_name_in_trignames(db, + &old_table_name, + err_trigname); + (void) table.triggers->change_table_name_in_triggers(thd, db, + &new_table_name, + &old_table_name); + result= 1; + goto end; + } + } +end: + delete table.triggers; + free_root(&table.mem_root, MYF(0)); + DBUG_RETURN(result); +} + + +bool Table_triggers_list::process_triggers(THD *thd, trg_event_type event, + trg_action_time_type time_type, + bool old_row_is_record1) +{ + bool err_status= FALSE; + sp_head *sp_trigger= bodies[event][time_type]; + + if (sp_trigger) + { + Sub_statement_state statement_state; + + if (old_row_is_record1) + { + old_field= record1_field; + new_field= trigger_table->field; + } + else + { + new_field= record1_field; + old_field= trigger_table->field; + } + + thd->reset_sub_statement_state(&statement_state, SUB_STMT_TRIGGER); + err_status= sp_trigger->execute_trigger + (thd, trigger_table->s->db, trigger_table->s->table_name, + &subject_table_grants[event][time_type]); + thd->restore_sub_statement_state(&statement_state); + } + + return err_status; +} + + +/* + Mark fields of subject table which we read/set in its triggers as such. + + SYNOPSIS + mark_fields_used() + thd Current thread context + event Type of event triggers for which we are going to inspect + + DESCRIPTION + This method marks fields of subject table which are read/set in its + triggers as such (by setting Field::query_id equal to THD::query_id) + and thus informs handler that values for these fields should be + retrieved/stored during execution of statement. +*/ + +void Table_triggers_list::mark_fields_used(THD *thd, trg_event_type event) +{ + int action_time; + Item_trigger_field *trg_field; + + for (action_time= 0; action_time < (int)TRG_ACTION_MAX; action_time++) + { + for (trg_field= trigger_fields[event][action_time]; trg_field; + trg_field= trg_field->next_trg_field) + { + /* We cannot mark fields which does not present in table. */ + if (trg_field->field_idx != (uint)-1) + trigger_table->field[trg_field->field_idx]->query_id = thd->query_id; + } + } +} + + +/* + Check if field of subject table can be changed in before update trigger. + + SYNOPSIS + is_updated_in_before_update_triggers() + field Field object for field to be checked + + NOTE + Field passed to this function should be bound to the same + TABLE object as Table_triggers_list. + + RETURN VALUE + TRUE Field is changed + FALSE Otherwise +*/ + +bool Table_triggers_list::is_updated_in_before_update_triggers(Field *fld) +{ + Item_trigger_field *trg_fld; + for (trg_fld= trigger_fields[TRG_EVENT_UPDATE][TRG_ACTION_BEFORE]; + trg_fld != 0; + trg_fld= trg_fld->next_trg_field) + { + if (trg_fld->get_settable_routine_parameter() && + trg_fld->field_idx != (uint)-1 && + trigger_table->field[trg_fld->field_idx]->eq(fld)) + return TRUE; + } + return FALSE; +} + + +/* + Trigger BUG#14090 compatibility hook + + SYNOPSIS + Handle_old_incorrect_sql_modes_hook::process_unknown_string() + unknown_key [in/out] reference on the line with unknown + parameter and the parsing point + base [in] base address for parameter writing (structure + like TABLE) + mem_root [in] MEM_ROOT for parameters allocation + end [in] the end of the configuration + + NOTE: this hook process back compatibility for incorrectly written + sql_modes parameter (see BUG#14090). + + RETURN + FALSE OK + TRUE Error +*/ + +#define INVALID_SQL_MODES_LENGTH 13 + +bool +Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key, + gptr base, + MEM_ROOT *mem_root, + char *end) +{ + DBUG_ENTER("Handle_old_incorrect_sql_modes_hook::process_unknown_string"); + DBUG_PRINT("info", ("unknown key:%60s", unknown_key)); + + if (unknown_key + INVALID_SQL_MODES_LENGTH + 1 < end && + unknown_key[INVALID_SQL_MODES_LENGTH] == '=' && + !memcmp(unknown_key, STRING_WITH_LEN("sql_modes"))) + { + char *ptr= unknown_key + INVALID_SQL_MODES_LENGTH + 1; + + DBUG_PRINT("info", ("sql_modes affected by BUG#14090 detected")); + push_warning_printf(current_thd, + MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_OLD_FILE_FORMAT, + ER(ER_OLD_FILE_FORMAT), + (char *)path, "TRIGGER"); + if (get_file_options_ulllist(ptr, end, unknown_key, base, + &sql_modes_parameters, mem_root)) + { + DBUG_RETURN(TRUE); + } + /* + Set parsing pointer to the last symbol of string (\n) + 1) to avoid problem with \0 in the junk after sql_modes + 2) to speed up skipping this line by parser. + */ + unknown_key= ptr-1; + } + DBUG_RETURN(FALSE); +} + +/* + Trigger BUG#15921 compatibility hook. For details see + Handle_old_incorrect_sql_modes_hook::process_unknown_string(). +*/ + +#define INVALID_TRIGGER_TABLE_LENGTH 15 + +bool +Handle_old_incorrect_trigger_table_hook:: +process_unknown_string(char *&unknown_key, gptr base, MEM_ROOT *mem_root, + char *end) +{ + DBUG_ENTER("Handle_old_incorrect_trigger_table_hook::process_unknown_string"); + DBUG_PRINT("info", ("unknown key:%60s", unknown_key)); + + if (unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1 < end && + unknown_key[INVALID_TRIGGER_TABLE_LENGTH] == '=' && + !memcmp(unknown_key, STRING_WITH_LEN("trigger_table"))) + { + char *ptr= unknown_key + INVALID_TRIGGER_TABLE_LENGTH + 1; + + DBUG_PRINT("info", ("trigger_table affected by BUG#15921 detected")); + push_warning_printf(current_thd, + MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_OLD_FILE_FORMAT, + ER(ER_OLD_FILE_FORMAT), + (char *)path, "TRIGGER"); + + if (!(ptr= parse_escaped_string(ptr, end, mem_root, trigger_table_value))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), "trigger_table", + unknown_key); + DBUG_RETURN(TRUE); + } + + /* Set parsing pointer to the last symbol of string (\n). */ + unknown_key= ptr-1; + } + DBUG_RETURN(FALSE); +} diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h new file mode 100644 index 00000000000..19b2b24a3fe --- /dev/null +++ b/sql/sql_trigger.h @@ -0,0 +1,145 @@ +/* Copyright (C) 2004-2005 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +/* + This class holds all information about triggers of table. + + QQ: Will it be merged into TABLE in future ? +*/ + +class Table_triggers_list: public Sql_alloc +{ + /* Triggers as SPs grouped by event, action_time */ + sp_head *bodies[TRG_EVENT_MAX][TRG_ACTION_MAX]; + /* + Heads of the lists linking items for all fields used in triggers + grouped by event and action_time. + */ + Item_trigger_field *trigger_fields[TRG_EVENT_MAX][TRG_ACTION_MAX]; + /* + Copy of TABLE::Field array with field pointers set to TABLE::record[1] + buffer instead of TABLE::record[0] (used for OLD values in on UPDATE + trigger and DELETE trigger when it is called for REPLACE). + */ + Field **record1_field; + /* + During execution of trigger new_field and old_field should point to the + array of fields representing new or old version of row correspondingly + (so it can point to TABLE::field or to Tale_triggers_list::record1_field) + */ + Field **new_field; + Field **old_field; + + /* TABLE instance for which this triggers list object was created */ + TABLE *trigger_table; + /* + Names of triggers. + Should correspond to order of triggers on definitions_list, + used in CREATE/DROP TRIGGER for looking up trigger by name. + */ + List<LEX_STRING> names_list; + /* + List of "ON table_name" parts in trigger definitions, used for + updating trigger definitions during RENAME TABLE. + */ + List<LEX_STRING> on_table_names_list; + /* + Key representing triggers for this table in set of all stored + routines used by statement. + TODO: We won't need this member once triggers namespace will be + database-wide instead of table-wide because then we will be able + to use key based on sp_name as for other stored routines. + */ + LEX_STRING sroutines_key; + + /* + Grant information for each trigger (pair: subject table, trigger definer). + */ + GRANT_INFO subject_table_grants[TRG_EVENT_MAX][TRG_ACTION_MAX]; + +public: + /* + Field responsible for storing triggers definitions in file. + It have to be public because we are using it directly from parser. + */ + List<LEX_STRING> definitions_list; + /* + List of sql modes for triggers + */ + List<ulonglong> definition_modes_list; + + List<LEX_STRING> definers_list; + + Table_triggers_list(TABLE *table_arg): + record1_field(0), trigger_table(table_arg) + { + bzero((char *)bodies, sizeof(bodies)); + bzero((char *)trigger_fields, sizeof(trigger_fields)); + bzero((char *)&subject_table_grants, sizeof(subject_table_grants)); + } + ~Table_triggers_list(); + + bool create_trigger(THD *thd, TABLE_LIST *table, String *stmt_query); + bool drop_trigger(THD *thd, TABLE_LIST *table, String *stmt_query); + bool process_triggers(THD *thd, trg_event_type event, + trg_action_time_type time_type, + bool old_row_is_record1); + bool get_trigger_info(THD *thd, trg_event_type event, + trg_action_time_type time_type, + LEX_STRING *trigger_name, LEX_STRING *trigger_stmt, + ulong *sql_mode, + LEX_STRING *definer); + + static bool check_n_load(THD *thd, const char *db, const char *table_name, + TABLE *table, bool names_only); + static bool drop_all_triggers(THD *thd, char *db, char *table_name); + static bool change_table_name(THD *thd, const char *db, + const char *old_table, + const char *new_db, + const char *new_table); + bool has_delete_triggers() + { + return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] || + bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER]); + } + + void set_table(TABLE *new_table); + + void mark_fields_used(THD *thd, trg_event_type event); + + bool is_updated_in_before_update_triggers(Field *fld); + + friend class Item_trigger_field; + friend int sp_cache_routines_and_add_tables_for_triggers(THD *thd, LEX *lex, + TABLE_LIST *table); + +private: + bool prepare_record1_accessors(TABLE *table); + LEX_STRING* change_table_name_in_trignames(const char *db_name, + LEX_STRING *new_table_name, + LEX_STRING *stopper); + bool change_table_name_in_triggers(THD *thd, + const char *db_name, + LEX_STRING *old_table_name, + LEX_STRING *new_table_name); +}; + +extern const LEX_STRING trg_action_time_type_names[]; +extern const LEX_STRING trg_event_type_names[]; + +int +add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists, + TABLE_LIST **table); diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 0b84d1b5fb3..077660f0bb9 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -17,15 +16,15 @@ /* This implements 'user defined functions' */ /* -** Known bugs: -** -** Memory for functions are never freed! -** Shared libraries are not closed before mysqld exists; -** - This is because we can't be sure if some threads is using -** a functions. -** -** The buggs only affects applications that creates and frees a lot of -** dynamic functions, so this shouldn't be a real problem. + Known bugs: + + Memory for functions is never freed! + Shared libraries are not closed before mysqld exits; + - This is because we can't be sure if some threads are using + a function. + + The bugs only affect applications that create and free a lot of + dynamic functions, so this shouldn't be a real problem. */ #ifdef USE_PRAGMA_IMPLEMENTATION @@ -83,7 +82,7 @@ static char *init_syms(udf_func *tmp, char *nm) { char *end; - if (!((tmp->func= dlsym(tmp->dlhandle, tmp->name.str)))) + if (!((tmp->func= (Udf_func_any) dlsym(tmp->dlhandle, tmp->name.str)))) return tmp->name.str; end=strmov(nm,tmp->name.str); @@ -91,18 +90,18 @@ static char *init_syms(udf_func *tmp, char *nm) if (tmp->type == UDFTYPE_AGGREGATE) { (void)strmov(end, "_clear"); - if (!((tmp->func_clear= dlsym(tmp->dlhandle, nm)))) + if (!((tmp->func_clear= (Udf_func_clear) dlsym(tmp->dlhandle, nm)))) return nm; (void)strmov(end, "_add"); - if (!((tmp->func_add= dlsym(tmp->dlhandle, nm)))) + if (!((tmp->func_add= (Udf_func_add) dlsym(tmp->dlhandle, nm)))) return nm; } (void) strmov(end,"_deinit"); - tmp->func_deinit= dlsym(tmp->dlhandle, nm); + tmp->func_deinit= (Udf_func_deinit) dlsym(tmp->dlhandle, nm); (void) strmov(end,"_init"); - tmp->func_init= dlsym(tmp->dlhandle, nm); + tmp->func_init= (Udf_func_init) dlsym(tmp->dlhandle, nm); /* to prefent loading "udf" from, e.g. libc.so @@ -110,15 +109,15 @@ static char *init_syms(udf_func *tmp, char *nm) */ if (!tmp->func_init && !tmp->func_deinit && tmp->type != UDFTYPE_AGGREGATE) { - if (opt_allow_suspicious_udfs) - sql_print_error(ER(ER_CANT_FIND_DL_ENTRY), nm); - else + if (!opt_allow_suspicious_udfs) return nm; + if (current_thd->variables.log_warnings) + sql_print_warning(ER(ER_CANT_FIND_DL_ENTRY), nm); } - return 0; } + extern "C" byte* get_hash_key(const byte *buff,uint *length, my_bool not_used __attribute__((unused))) { @@ -127,9 +126,10 @@ extern "C" byte* get_hash_key(const byte *buff,uint *length, return (byte*) udf->name.str; } + /* -** Read all predeclared functions from mysql.func and accept all that -** can be used. + Read all predeclared functions from mysql.func and accept all that + can be used. */ void udf_init() @@ -139,6 +139,7 @@ void udf_init() READ_RECORD read_record_info; TABLE *table; int error; + char db[]= "mysql"; /* A subject to casednstr, can't be constant */ DBUG_ENTER("ufd_init"); if (initialized) @@ -158,14 +159,14 @@ void udf_init() DBUG_VOID_RETURN; } initialized = 1; + new_thd->thread_stack= (char*) &new_thd; new_thd->store_globals(); - new_thd->db= my_strdup("mysql", MYF(0)); - new_thd->db_length=5; + new_thd->set_db(db, sizeof(db)-1); bzero((gptr) &tables,sizeof(tables)); - tables.alias= tables.real_name= (char*) "func"; + tables.alias= tables.table_name= (char*) "func"; tables.lock_type = TL_READ; - tables.db=new_thd->db; + tables.db= db; if (simple_open_n_lock_tables(new_thd, &tables)) { @@ -185,7 +186,7 @@ void udf_init() char *dl_name= get_field(&mem, table->field[2]); bool new_dl=0; Item_udftype udftype=UDFTYPE_FUNCTION; - if (table->fields >= 4) // New func table + if (table->s->fields >= 4) // New func table udftype=(Item_udftype) table->field[3]->val_int(); /* @@ -235,7 +236,7 @@ void udf_init() } } if (error > 0) - sql_print_error(ER(ER_GET_ERRNO), my_errno); + sql_print_error("Got unknown error: %d", my_errno); end_read_record(&read_record_info); new_thd->version--; // Force close to free memory @@ -306,6 +307,10 @@ static void del_udf(udf_func *udf) void free_udf(udf_func *udf) { DBUG_ENTER("free_udf"); + + if (!initialized) + DBUG_VOID_RETURN; + rw_wrlock(&THR_LOCK_udf); if (!--udf->usage_count) { @@ -330,6 +335,9 @@ udf_func *find_udf(const char *name,uint length,bool mark_used) udf_func *udf=0; DBUG_ENTER("find_udf"); + if (!initialized) + DBUG_RETURN(NULL); + /* TODO: This should be changed to reader locks someday! */ if (mark_used) rw_wrlock(&THR_LOCK_udf); /* Called during fix_fields */ @@ -402,7 +410,7 @@ int mysql_create_function(THD *thd,udf_func *udf) if (!initialized) { - send_error(thd, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } @@ -413,28 +421,30 @@ int mysql_create_function(THD *thd,udf_func *udf) */ if (strchr(udf->dl, '/') || IF_WIN(strchr(udf->dl, '\\'),0)) { - send_error(thd, ER_UDF_NO_PATHS,ER(ER_UDF_NO_PATHS)); + my_message(ER_UDF_NO_PATHS, ER(ER_UDF_NO_PATHS), MYF(0)); DBUG_RETURN(1); } if (udf->name.length > NAME_LEN) { - net_printf(thd, ER_TOO_LONG_IDENT,udf->name); + my_error(ER_TOO_LONG_IDENT, MYF(0), udf->name); DBUG_RETURN(1); } rw_wrlock(&THR_LOCK_udf); if ((hash_search(&udf_hash,(byte*) udf->name.str, udf->name.length))) { - net_printf(thd, ER_UDF_EXISTS, udf->name); + my_error(ER_UDF_EXISTS, MYF(0), udf->name); goto err; } if (!(dl = find_udf_dl(udf->dl))) { + DBUG_PRINT("info", ("Calling dlopen, udf->dl: %s", udf->dl)); if (!(dl = dlopen(udf->dl, RTLD_NOW))) { DBUG_PRINT("error",("dlopen of %s failed, error: %d (%s)", udf->dl,errno,dlerror())); - net_printf(thd, ER_CANT_OPEN_LIBRARY, udf->dl, errno, dlerror()); + my_error(ER_CANT_OPEN_LIBRARY, MYF(0), + udf->dl, errno, dlerror()); goto err; } new_dl=1; @@ -444,17 +454,14 @@ int mysql_create_function(THD *thd,udf_func *udf) char buf[NAME_LEN+16], *missing; if ((missing= init_syms(udf, buf))) { - net_printf(thd, ER_CANT_FIND_DL_ENTRY, missing); + my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), missing); goto err; } } udf->name.str=strdup_root(&mem,udf->name.str); udf->dl=strdup_root(&mem,udf->dl); if (!(u_d=add_udf(&udf->name,udf->returns,udf->dl,udf->type))) - { - send_error(thd,0); // End of memory goto err; - } u_d->dlhandle = dl; u_d->func=udf->func; u_d->func_init=udf->func_init; @@ -466,23 +473,23 @@ int mysql_create_function(THD *thd,udf_func *udf) bzero((char*) &tables,sizeof(tables)); tables.db= (char*) "mysql"; - tables.real_name= tables.alias= (char*) "func"; + tables.table_name= tables.alias= (char*) "func"; /* Allow creation of functions even if we can't open func table */ if (!(table = open_ltable(thd,&tables,TL_WRITE))) goto err; - restore_record(table,default_values); // Default values for fields + restore_record(table, s->default_values); // Default values for fields table->field[0]->store(u_d->name.str, u_d->name.length, system_charset_info); - table->field[1]->store((longlong) u_d->returns); + table->field[1]->store((longlong) u_d->returns, TRUE); table->field[2]->store(u_d->dl,(uint) strlen(u_d->dl), system_charset_info); - if (table->fields >= 4) // If not old func format - table->field[3]->store((longlong) u_d->type); + if (table->s->fields >= 4) // If not old func format + table->field[3]->store((longlong) u_d->type, TRUE); error = table->file->write_row(table->record[0]); close_thread_tables(thd); if (error) { - net_printf(thd, ER_ERROR_ON_WRITE, "mysql.func",error); + my_error(ER_ERROR_ON_WRITE, MYF(0), "mysql.func", error); del_udf(u_d); goto err; } @@ -502,19 +509,23 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) TABLE *table; TABLE_LIST tables; udf_func *udf; + char *exact_name_str; + uint exact_name_len; DBUG_ENTER("mysql_drop_function"); if (!initialized) { - send_error(thd, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } rw_wrlock(&THR_LOCK_udf); if (!(udf=(udf_func*) hash_search(&udf_hash,(byte*) udf_name->str, (uint) udf_name->length))) { - net_printf(thd, ER_FUNCTION_NOT_DEFINED, udf_name->str); + my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), udf_name->str); goto err; } + exact_name_str= udf->name.str; + exact_name_len= udf->name.length; del_udf(udf); /* Close the handle if this was function that was found during boot or @@ -525,10 +536,10 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) bzero((char*) &tables,sizeof(tables)); tables.db=(char*) "mysql"; - tables.real_name= tables.alias= (char*) "func"; + tables.table_name= tables.alias= (char*) "func"; if (!(table = open_ltable(thd,&tables,TL_WRITE))) goto err; - table->field[0]->store(udf_name->str, udf_name->length, system_charset_info); + table->field[0]->store(exact_name_str, exact_name_len, &my_charset_bin); table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (!table->file->index_read_idx(table->record[0], 0, (byte*) table->field[0]->ptr, diff --git a/sql/sql_udf.h b/sql/sql_udf.h index ca00901ea67..3cd9343610c 100644 --- a/sql/sql_udf.h +++ b/sql/sql_udf.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2003-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -23,6 +22,15 @@ enum Item_udftype {UDFTYPE_FUNCTION=1,UDFTYPE_AGGREGATE}; +typedef void (*Udf_func_clear)(UDF_INIT *, uchar *, uchar *); +typedef void (*Udf_func_add)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *); +typedef void (*Udf_func_deinit)(UDF_INIT*); +typedef my_bool (*Udf_func_init)(UDF_INIT *, UDF_ARGS *, char *); +typedef void (*Udf_func_any)(); +typedef double (*Udf_func_double)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *); +typedef longlong (*Udf_func_longlong)(UDF_INIT *, UDF_ARGS *, uchar *, + uchar *); + typedef struct st_udf_func { LEX_STRING name; @@ -30,11 +38,11 @@ typedef struct st_udf_func Item_udftype type; char *dl; void *dlhandle; - void *func; - void *func_init; - void *func_deinit; - void *func_clear; - void *func_add; + Udf_func_any func; + Udf_func_init func_init; + Udf_func_deinit func_deinit; + Udf_func_clear func_clear; + Udf_func_add func_add; ulong usage_count; } udf_func; @@ -65,18 +73,18 @@ class udf_handler :public Sql_alloc Item_result result_type () const { return u_d ? u_d->returns : STRING_RESULT;} bool get_arguments(); - bool fix_fields(THD *thd,struct st_table_list *tlist,Item_result_field *item, - uint arg_count,Item **args); + bool fix_fields(THD *thd, Item_result_field *item, + uint arg_count, Item **args); void cleanup(); double val(my_bool *null_value) { + is_null= 0; if (get_arguments()) { *null_value=1; return 0.0; } - double (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)= - (double (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func; + Udf_func_double func= (Udf_func_double) u_d->func; double tmp=func(&initid, &f_args, &is_null, &error); if (is_null || error) { @@ -88,13 +96,13 @@ class udf_handler :public Sql_alloc } longlong val_int(my_bool *null_value) { + is_null= 0; if (get_arguments()) { *null_value=1; return LL(0); } - longlong (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)= - (longlong (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func; + Udf_func_longlong func= (Udf_func_longlong) u_d->func; longlong tmp=func(&initid, &f_args, &is_null, &error); if (is_null || error) { @@ -104,11 +112,11 @@ class udf_handler :public Sql_alloc *null_value=0; return tmp; } + my_decimal *val_decimal(my_bool *null_value, my_decimal *dec_buf); void clear() { is_null= 0; - void (*func)(UDF_INIT *, uchar *, uchar *)= - (void (*)(UDF_INIT *, uchar *, uchar *)) u_d->func_clear; + Udf_func_clear func= u_d->func_clear; func(&initid, &is_null, &error); } void add(my_bool *null_value) @@ -118,8 +126,7 @@ class udf_handler :public Sql_alloc *null_value=1; return; } - void (*func)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)= - (void (*)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *)) u_d->func_add; + Udf_func_add func= u_d->func_add; func(&initid, &f_args, &is_null, &error); *null_value= (my_bool) (is_null || error); } diff --git a/sql/sql_union.cc b/sql/sql_union.cc index f3f814831f5..1ec724a6338 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -23,15 +22,18 @@ #include "mysql_priv.h" #include "sql_select.h" +#include "sql_cursor.h" -int mysql_union(THD *thd, LEX *lex, select_result *result, - SELECT_LEX_UNIT *unit) +bool mysql_union(THD *thd, LEX *lex, select_result *result, + SELECT_LEX_UNIT *unit, ulong setup_tables_done_option) { DBUG_ENTER("mysql_union"); - int res= 0; - if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK, ""))) + bool res; + if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | + setup_tables_done_option))) res= unit->exec(); - res|= unit->cleanup(); + if (res || !thd->cursor || !thd->cursor->is_open()) + res|= unit->cleanup(); DBUG_RETURN(res); } @@ -40,22 +42,6 @@ int mysql_union(THD *thd, LEX *lex, select_result *result, ** store records in temporary table for UNION ***************************************************************************/ -select_union::select_union(TABLE *table_par) - :table(table_par) -{ - bzero((char*) &info,sizeof(info)); - /* - We can always use IGNORE because the temporary table will only - contain a unique key if we are using not using UNION ALL - */ - info.ignore= 1; -} - -select_union::~select_union() -{ -} - - int select_union::prepare(List<Item> &list, SELECT_LEX_UNIT *u) { unit= u; @@ -65,22 +51,21 @@ int select_union::prepare(List<Item> &list, SELECT_LEX_UNIT *u) bool select_union::send_data(List<Item> &values) { + int error= 0; if (unit->offset_limit_cnt) { // using limit offset,count unit->offset_limit_cnt--; return 0; } - fill_record(table->field, values, 1); - if (thd->net.report_error || write_record(table,&info)) + fill_record(thd, table->field, values, 1); + if (thd->net.report_error) + return 1; + + if ((error= table->file->write_row(table->record[0]))) { - if (thd->net.last_errno == ER_RECORD_FILE_FULL) - { - thd->clear_error(); // do not report user about table overflow - if (create_myisam_from_heap(thd, table, &tmp_table_param, - info.last_errno, 1)) - return 1; - } - else + /* create_myisam_from_heap will generate error if needed */ + if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE && + create_myisam_from_heap(thd, table, &tmp_table_param, error, 1)) return 1; } return 0; @@ -98,13 +83,51 @@ bool select_union::flush() int error; if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) { - table->file->print_error(error,MYF(0)); - ::send_error(thd); + table->file->print_error(error, MYF(0)); return 1; } return 0; } +/* + Create a temporary table to store the result of select_union. + + SYNOPSIS + select_union::create_result_table() + thd thread handle + column_types a list of items used to define columns of the + temporary table + is_union_distinct if set, the temporary table will eliminate + duplicates on insert + options create options + + DESCRIPTION + Create a temporary table that is used to store the result of a UNION, + derived table, or a materialized cursor. + + RETURN VALUE + 0 The table has been created successfully. + 1 create_tmp_table failed. +*/ + +bool +select_union::create_result_table(THD *thd_arg, List<Item> *column_types, + bool is_union_distinct, ulonglong options, + const char *alias) +{ + DBUG_ASSERT(table == 0); + tmp_table_param.init(); + tmp_table_param.field_count= column_types->elements; + + if (! (table= create_tmp_table(thd_arg, &tmp_table_param, *column_types, + (ORDER*) 0, is_union_distinct, 1, + options, HA_POS_ERROR, (char*) alias))) + return TRUE; + table->file->extra(HA_EXTRA_WRITE_CACHE); + table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + return FALSE; +} + /* initialization procedures before fake_select_lex preparation() @@ -117,34 +140,30 @@ bool select_union::flush() options of SELECT */ -ulong -st_select_lex_unit::init_prepare_fake_select_lex(THD *thd) +void +st_select_lex_unit::init_prepare_fake_select_lex(THD *thd_arg) { - ulong options_tmp= thd->options | fake_select_lex->options; - thd->lex->current_select= fake_select_lex; - offset_limit_cnt= global_parameters->offset_limit; - select_limit_cnt= global_parameters->select_limit + - global_parameters->offset_limit; - - if (select_limit_cnt < global_parameters->select_limit) - select_limit_cnt= HA_POS_ERROR; // no limit - if (select_limit_cnt == HA_POS_ERROR) - options_tmp&= ~OPTION_FOUND_ROWS; - else if (found_rows_for_union && !thd->lex->describe) - options_tmp|= OPTION_FOUND_ROWS; + thd_arg->lex->current_select= fake_select_lex; fake_select_lex->table_list.link_in_list((byte *)&result_table_list, (byte **) - &result_table_list.next); - return options_tmp; + &result_table_list.next_local); + fake_select_lex->context.table_list= fake_select_lex->context.first_name_resolution_table= + fake_select_lex->get_table_list(); + for (ORDER *order= (ORDER *)global_parameters->order_list.first; + order; + order=order->next) + { + (*order->item)->walk(&Item::change_context_processor, + (byte *) &fake_select_lex->context); + } } -int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, - ulong additional_options, - const char *tmp_table_alias) +bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, + ulong additional_options) { SELECT_LEX *lex_select_save= thd_arg->lex->current_select; - SELECT_LEX *sl, *first_select; + SELECT_LEX *sl, *first_sl= first_select(); select_result *tmp_result; bool is_union; TABLE *empty_table= 0; @@ -163,7 +182,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (describe) { /* fast reinit for EXPLAIN */ - for (sl= first_select_in_union(); sl; sl= sl->next_select()) + for (sl= first_sl; sl; sl= sl->next_select()) { sl->join->result= result; select_limit_cnt= HA_POS_ERROR; @@ -171,34 +190,35 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (!sl->join->procedure && result->prepare(sl->join->fields_list, this)) { - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } sl->join->select_options|= SELECT_DESCRIBE; sl->join->reinit(); } } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } prepared= 1; - res= 0; + saved_error= FALSE; - thd_arg->lex->current_select= sl= first_select= first_select_in_union(); - found_rows_for_union= first_select->options & OPTION_FOUND_ROWS; - is_union= test(first_select->next_select() || fake_select_lex); + thd_arg->lex->current_select= sl= first_sl; + found_rows_for_union= first_sl->options & OPTION_FOUND_ROWS; + is_union= first_sl->next_select() || fake_select_lex; /* Global option */ if (is_union) { - if (!(tmp_result= union_result= new select_union(0))) + if (!(tmp_result= union_result= new select_union)) goto err; - union_result->tmp_table_param.init(); if (describe) tmp_result= sel_result; } else tmp_result= sel_result; + sl->context.resolve_in_select_list= TRUE; + for (;sl; sl= sl->next_select()) { bool can_skip_order_by; @@ -206,36 +226,47 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, JOIN *join= new JOIN(thd_arg, sl->item_list, sl->options | thd_arg->options | additional_options, tmp_result); + /* + setup_tables_done_option should be set only for very first SELECT, + because it protect from secont setup_tables call for select-like non + select commands (DELETE/INSERT/...) and they use only very first + SELECT (for union it can be only INSERT ... SELECT). + */ + additional_options&= ~OPTION_SETUP_TABLES_DONE; if (!join) goto err; thd_arg->lex->current_select= sl; - offset_limit_cnt= sl->offset_limit; - select_limit_cnt= sl->select_limit+sl->offset_limit; - if (select_limit_cnt < sl->select_limit) - select_limit_cnt= HA_POS_ERROR; // no limit - - can_skip_order_by= is_union && - (!sl->braces || select_limit_cnt == HA_POS_ERROR); - - res= join->prepare(&sl->ref_pointer_array, - (TABLE_LIST*) sl->table_list.first, sl->with_wild, - sl->where, - (can_skip_order_by ? 0 : sl->order_list.elements) + - sl->group_list.elements, - can_skip_order_by ? - (ORDER*) 0 : (ORDER *)sl->order_list.first, - (ORDER*) sl->group_list.first, - sl->having, - (is_union ? (ORDER*) 0 : - (ORDER*) thd_arg->lex->proc_list.first), - sl, this); + + can_skip_order_by= is_union && !(sl->braces && sl->explicit_limit); + + saved_error= join->prepare(&sl->ref_pointer_array, + (TABLE_LIST*) sl->table_list.first, + sl->with_wild, + sl->where, + (can_skip_order_by ? 0 : + sl->order_list.elements) + + sl->group_list.elements, + can_skip_order_by ? + (ORDER*) 0 : (ORDER *)sl->order_list.first, + (ORDER*) sl->group_list.first, + sl->having, + (is_union ? (ORDER*) 0 : + (ORDER*) thd_arg->lex->proc_list.first), + sl, this); /* There are no * in the statement anymore (for PS) */ sl->with_wild= 0; last_procedure= join->procedure; - if (res || thd_arg->is_fatal_error) + + if (saved_error || (saved_error= thd_arg->is_fatal_error)) goto err; - if (sl == first_select) + /* + Use items list of underlaid select for derived tables to preserve + information about fields lengths and exact types + */ + if (!is_union) + types= first_sl->item_list; + else if (sl == first_sl) { /* We need to create an empty table object. It is used @@ -271,7 +302,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, while ((type= tp++, item_tmp= it++)) { if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } } @@ -283,9 +314,8 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, all collations together for UNION. */ List_iterator_fast<Item> tp(types); - Item_arena *arena= thd->current_arena; Item *type; - ulong create_options; + ulonglong create_options; while ((type= tp++)) { @@ -297,7 +327,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, } } - create_options= (first_select_in_union()->options | thd_arg->options | + create_options= (first_sl->options | thd_arg->options | TMP_TABLE_ALL_COLUMNS); /* Force the temporary table to be a MyISAM table if we're going to use @@ -308,58 +338,46 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (global_parameters->ftfunc_list->elements) create_options= create_options | TMP_TABLE_FORCE_MYISAM; - union_result->tmp_table_param.field_count= types.elements; - if (!(table= create_tmp_table(thd_arg, - &union_result->tmp_table_param, types, - (ORDER*) 0, (bool) union_distinct, 1, - create_options, HA_POS_ERROR, - (char *) tmp_table_alias))) + if (union_result->create_result_table(thd, &types, test(union_distinct), + create_options, "")) goto err; - table->file->extra(HA_EXTRA_WRITE_CACHE); - table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); bzero((char*) &result_table_list, sizeof(result_table_list)); result_table_list.db= (char*) ""; - result_table_list.real_name= result_table_list.alias= (char*) "union"; - result_table_list.table= table; - union_result->set_table(table); + result_table_list.table_name= result_table_list.alias= (char*) "union"; + result_table_list.table= table= union_result->table; thd_arg->lex->current_select= lex_select_save; if (!item_list.elements) { - /* - We're in statement prepare or in execution - of a conventional statement. - */ - Item_arena *tmp_arena,backup; - tmp_arena= thd->change_arena_if_needed(&backup); + Query_arena *arena, backup_arena; - Field **field; - for (field= table->field; *field; field++) - { - Item_field *item= new Item_field(*field); - if (!item || item_list.push_back(item)) - { - if (tmp_arena) - thd->restore_backup_item_arena(tmp_arena, &backup); - DBUG_RETURN(-1); - } - } - if (tmp_arena) - thd->restore_backup_item_arena(tmp_arena, &backup); - if (arena->is_stmt_prepare()) + arena= thd->activate_stmt_arena_if_needed(&backup_arena); + + saved_error= table->fill_item_list(&item_list); + + if (arena) + thd->restore_active_arena(arena, &backup_arena); + + if (saved_error) + goto err; + + if (thd->stmt_arena->is_stmt_prepare()) { - /* prepare fake select to initialize it correctly */ - (void) init_prepare_fake_select_lex(thd); + /* Validate the global parameters of this union */ + + init_prepare_fake_select_lex(thd); + /* Should be done only once (the only item_list per statement) */ + DBUG_ASSERT(fake_select_lex->join == 0); if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, result))) { fake_select_lex->table_list.empty(); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } fake_select_lex->item_list= item_list; thd_arg->lex->current_select= fake_select_lex; - res= fake_select_lex->join-> + saved_error= fake_select_lex->join-> prepare(&fake_select_lex->ref_pointer_array, (TABLE_LIST*) fake_select_lex->table_list.first, 0, 0, @@ -371,42 +389,37 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, fake_select_lex->table_list.empty(); } } - else if (arena->is_stmt_execute()) + else { + DBUG_ASSERT(!thd->stmt_arena->is_conventional()); /* - We're in execution of a prepared statement: reset field items - to point at fields from the created temporary table. + We're in execution of a prepared statement or stored procedure: + reset field items to point at fields from the created temporary table. */ - List_iterator_fast<Item> it(item_list); - for (Field **field= table->field; *field; field++) - { - Item_field *item_field= (Item_field*) it++; - DBUG_ASSERT(item_field); - item_field->reset_field(*field); - } + table->reset_item_list(&item_list); } } thd_arg->lex->current_select= lex_select_save; - DBUG_RETURN(res || thd_arg->is_fatal_error ? 1 : 0); + DBUG_RETURN(saved_error || thd_arg->is_fatal_error); err: thd_arg->lex->current_select= lex_select_save; - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } -int st_select_lex_unit::exec() +bool st_select_lex_unit::exec() { SELECT_LEX *lex_select_save= thd->lex->current_select; - SELECT_LEX *select_cursor=first_select_in_union(); + SELECT_LEX *select_cursor=first_select(); ulonglong add_rows=0; ha_rows examined_rows= 0; DBUG_ENTER("st_select_lex_unit::exec"); if (executed && !uncacheable && !describe) - DBUG_RETURN(0); + DBUG_RETURN(FALSE); executed= 1; if (uncacheable || !item || !item->assigned() || describe) @@ -431,15 +444,11 @@ int st_select_lex_unit::exec() thd->lex->current_select= sl; if (optimized) - res= sl->join->reinit(); + saved_error= sl->join->reinit(); else { - if (sl != global_parameters && !describe) - { - offset_limit_cnt= sl->offset_limit; - select_limit_cnt= sl->select_limit+sl->offset_limit; - } - else + set_limit(sl); + if (sl == global_parameters || describe) { offset_limit_cnt= 0; /* @@ -448,11 +457,7 @@ int st_select_lex_unit::exec() */ if (sl->order_list.first || describe) select_limit_cnt= HA_POS_ERROR; - else - select_limit_cnt= sl->select_limit+sl->offset_limit; - } - if (select_limit_cnt < sl->select_limit) - select_limit_cnt= HA_POS_ERROR; // no limit + } /* When using braces, SQL_CALC_FOUND_ROWS affects the whole query: @@ -462,21 +467,23 @@ int st_select_lex_unit::exec() sl->join->select_options= (select_limit_cnt == HA_POS_ERROR || sl->braces) ? sl->options & ~OPTION_FOUND_ROWS : sl->options | found_rows_for_union; - res= sl->join->optimize(); + saved_error= sl->join->optimize(); } - if (!res) + if (!saved_error) { records_at_start= table->file->records; sl->join->exec(); if (sl == union_distinct) { if (table->file->disable_indexes(HA_KEY_SWITCH_ALL)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); table->no_keyread=1; } - res= sl->join->error; - offset_limit_cnt= sl->offset_limit; - if (!res) + saved_error= sl->join->error; + offset_limit_cnt= (ha_rows)(sl->offset_limit ? + sl->offset_limit->val_uint() : + 0); + if (!saved_error) { examined_rows+= thd->examined_row_count; if (union_result->flush()) @@ -486,10 +493,10 @@ int st_select_lex_unit::exec() } } } - if (res) + if (saved_error) { thd->lex->current_select= lex_select_save; - DBUG_RETURN(res); + DBUG_RETURN(saved_error); } /* Needed for the following test and for records_at_start in next loop */ int error= table->file->info(HA_STATUS_VARIABLE); @@ -515,14 +522,15 @@ int st_select_lex_unit::exec() optimized= 1; /* Send result to 'result' */ - res= -1; + saved_error= TRUE; { List<Item_func_match> empty_list; empty_list.empty(); if (!thd->is_fatal_error) // Check if EOM { - ulong options_tmp= init_prepare_fake_select_lex(thd); + set_limit(global_parameters); + init_prepare_fake_select_lex(thd); JOIN *join= fake_select_lex->join; if (!join) { @@ -530,11 +538,11 @@ int st_select_lex_unit::exec() allocate JOIN for fake select only once (prevent mysql_select automatic allocation) */ - if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, - result))) + if (!(fake_select_lex->join= new JOIN(thd, item_list, + fake_select_lex->options, result))) { fake_select_lex->table_list.empty(); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } /* @@ -546,24 +554,26 @@ int st_select_lex_unit::exec() else { JOIN_TAB *tab,*end; - for (tab=join->join_tab,end=tab+join->tables ; tab != end ; tab++) + for (tab=join->join_tab, end=tab+join->tables ; + tab && tab != end ; + tab++) { delete tab->select; delete tab->quick; } - join->init(thd, item_list, thd->options, result); + join->init(thd, item_list, fake_select_lex->options, result); } - res= mysql_select(thd, &fake_select_lex->ref_pointer_array, - &result_table_list, - 0, item_list, NULL, - global_parameters->order_list.elements, - (ORDER*)global_parameters->order_list.first, - (ORDER*) NULL, NULL, (ORDER*) NULL, - options_tmp | SELECT_NO_UNLOCK, - result, this, fake_select_lex); + saved_error= mysql_select(thd, &fake_select_lex->ref_pointer_array, + &result_table_list, + 0, item_list, NULL, + global_parameters->order_list.elements, + (ORDER*)global_parameters->order_list.first, + (ORDER*) NULL, NULL, (ORDER*) NULL, + fake_select_lex->options | SELECT_NO_UNLOCK, + result, this, fake_select_lex); fake_select_lex->table_list.empty(); - if (!res) + if (!saved_error) { thd->limit_found_rows = (ulonglong)table->file->records + add_rows; thd->examined_row_count+= examined_rows; @@ -575,18 +585,18 @@ int st_select_lex_unit::exec() } } thd->lex->current_select= lex_select_save; - DBUG_RETURN(res); + DBUG_RETURN(saved_error); } -int st_select_lex_unit::cleanup() +bool st_select_lex_unit::cleanup() { int error= 0; DBUG_ENTER("st_select_lex_unit::cleanup"); if (cleaned) { - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } cleaned= 1; @@ -598,33 +608,27 @@ int st_select_lex_unit::cleanup() free_tmp_table(thd, table); table= 0; // Safety } - JOIN *join; - SELECT_LEX *sl= first_select_in_union(); - for (; sl; sl= sl->next_select()) + + for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) + error|= sl->cleanup(); + + if (fake_select_lex) { - if ((join= sl->join)) + JOIN *join; + if ((join= fake_select_lex->join)) { - error|= sl->join->cleanup(); - delete join; + join->tables_list= 0; + join->tables= 0; } - else + error|= fake_select_lex->cleanup(); + if (fake_select_lex->order_list.elements) { - // it can be DO/SET with subqueries - for (SELECT_LEX_UNIT *lex_unit= sl->first_inner_unit(); - lex_unit != 0; - lex_unit= lex_unit->next_unit()) - { - error|= lex_unit->cleanup(); - } + ORDER *ord; + for (ord= (ORDER*)fake_select_lex->order_list.first; ord; ord= ord->next) + (*ord->item)->cleanup(); } } - if (fake_select_lex && (join= fake_select_lex->join)) - { - join->tables_list= 0; - join->tables= 0; - error|= join->cleanup(); - delete join; - } + DBUG_RETURN(error); } @@ -660,21 +664,98 @@ void st_select_lex_unit::reinit_exec_mechanism() old_result old select_result object RETURN - 0 - OK - -1 - error + FALSE - OK + TRUE - error */ -int st_select_lex_unit::change_result(select_subselect *result, - select_subselect *old_result) +bool st_select_lex_unit::change_result(select_subselect *new_result, + select_subselect *old_result) { - int res= 0; - for (SELECT_LEX *sl= first_select_in_union(); sl; sl= sl->next_select()) + bool res= FALSE; + for (SELECT_LEX *sl= first_select(); sl; sl= sl->next_select()) { if (sl->join && sl->join->result == old_result) - if ((res= sl->join->change_result(result))) - return (res); + if (sl->join->change_result(new_result)) + return TRUE; } if (fake_select_lex && fake_select_lex->join) - res= fake_select_lex->join->change_result(result); + res= fake_select_lex->join->change_result(new_result); return (res); } + +/* + Get column type information for this unit. + + SYNOPSIS + st_select_lex_unit::get_unit_column_types() + + DESCRIPTION + For a single-select the column types are taken + from the list of selected items. For a union this function + assumes that st_select_lex_unit::prepare has been called + and returns the type holders that were created for unioned + column types of all selects. + + NOTES + The implementation of this function should be in sync with + st_select_lex_unit::prepare() +*/ + +List<Item> *st_select_lex_unit::get_unit_column_types() +{ + SELECT_LEX *sl= first_select(); + bool is_union= test(sl->next_select()); + bool is_procedure= test(sl->join->procedure); + + if (is_procedure) + { + /* Types for "SELECT * FROM t1 procedure analyse()" + are generated during execute */ + return &sl->join->procedure_fields_list; + } + + + if (is_union) + { + DBUG_ASSERT(prepared); + /* Types are generated during prepare */ + return &types; + } + + return &sl->item_list; +} + +bool st_select_lex::cleanup() +{ + bool error= FALSE; + DBUG_ENTER("st_select_lex::cleanup()"); + + if (join) + { + DBUG_ASSERT((st_select_lex*)join->select_lex == this); + error= join->destroy(); + delete join; + join= 0; + } + for (SELECT_LEX_UNIT *lex_unit= first_inner_unit(); lex_unit ; + lex_unit= lex_unit->next_unit()) + { + error= (bool) ((uint) error | (uint) lex_unit->cleanup()); + } + non_agg_fields.empty(); + DBUG_RETURN(error); +} + + +void st_select_lex::cleanup_all_joins(bool full) +{ + SELECT_LEX_UNIT *unit; + SELECT_LEX *sl; + + if (join) + join->cleanup(full); + + for (unit= first_inner_unit(); unit; unit= unit->next_unit()) + for (sl= unit->first_select(); sl; sl= sl->next_select()) + sl->cleanup_all_joins(full); +} diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 7ed1b48d7aa..27d38114885 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,29 +21,89 @@ #include "mysql_priv.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" /* Return 0 if row hasn't changed */ -static bool compare_record(TABLE *table, ulong query_id) +bool compare_record(TABLE *table, query_id_t query_id) { - if (!table->blob_fields) + if (table->s->blob_fields + table->s->varchar_fields == 0) return cmp_record(table,record[1]); /* Compare null bits */ if (memcmp(table->null_flags, - table->null_flags+table->rec_buff_length, - table->null_bytes)) - return 1; // Diff in NULL value + table->null_flags+table->s->rec_buff_length, + table->s->null_bytes)) + return TRUE; // Diff in NULL value /* Compare updated fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { if ((*ptr)->query_id == query_id && - (*ptr)->cmp_binary_offset(table->rec_buff_length)) - return 1; + (*ptr)->cmp_binary_offset(table->s->rec_buff_length)) + return TRUE; } - return 0; + return FALSE; } +/* + check that all fields are real fields + + SYNOPSIS + check_fields() + thd thread handler + items Items for check + + RETURN + TRUE Items can't be used in UPDATE + FALSE Items are OK +*/ + +static bool check_fields(THD *thd, List<Item> &items) +{ + List_iterator<Item> it(items); + Item *item; + Item_field *field; + + while ((item= it++)) + { + if (!(field= item->filed_for_view_update())) + { + /* item has name, because it comes from VIEW SELECT list */ + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name); + return TRUE; + } + /* + we make temporary copy of Item_field, to avoid influence of changing + result_field on Item_ref which refer on this field + */ + thd->change_item_tree(it.ref(), new Item_field(thd, field)); + } + return FALSE; +} + + +/* + Process usual UPDATE + + SYNOPSIS + mysql_update() + thd thread handler + fields fields for update + values values of fields for update + conds WHERE clause expression + order_num number of elemen in ORDER BY clause + order ORDER BY clause list + limit limit clause + handle_duplicates how to handle duplicates + + RETURN + 0 - OK + 2 - privilege check and openning table passed, but we need to convert to + multi-update because of view substitution + 1 - error +*/ + int mysql_update(THD *thd, TABLE_LIST *table_list, List<Item> &fields, @@ -52,46 +111,73 @@ int mysql_update(THD *thd, COND *conds, uint order_num, ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates, - bool ignore) + enum enum_duplicates handle_duplicates, bool ignore) { - bool using_limit=limit != HA_POS_ERROR; - bool safe_update= thd->options & OPTION_SAFE_UPDATES; - bool used_key_is_modified, transactional_table, log_delayed; - int error=0; + bool using_limit= limit != HA_POS_ERROR; + bool safe_update= test(thd->options & OPTION_SAFE_UPDATES); + bool used_key_is_modified, transactional_table; + bool can_compare_record; + int res; + int error; uint used_index= MAX_KEY; bool need_sort= TRUE; #ifndef NO_EMBEDDED_ACCESS_CHECKS uint want_privilege; #endif - ulong query_id=thd->query_id, timestamp_query_id; + uint table_count= 0; + query_id_t query_id=thd->query_id, timestamp_query_id; ha_rows updated, found; key_map old_used_keys; TABLE *table; - SQL_SELECT *select= 0; + SQL_SELECT *select; READ_RECORD info; - TABLE_LIST *update_table_list= ((TABLE_LIST*) - thd->lex->select_lex.table_list.first); + SELECT_LEX *select_lex= &thd->lex->select_lex; + bool need_reopen; + List<Item> all_fields; DBUG_ENTER("mysql_update"); LINT_INIT(timestamp_query_id); - if ((open_and_lock_tables(thd, table_list))) - DBUG_RETURN(-1); + for ( ; ; ) + { + if (open_tables(thd, &table_list, &table_count, 0)) + DBUG_RETURN(1); + + if (table_list->multitable_view) + { + DBUG_ASSERT(table_list->view != 0); + DBUG_PRINT("info", ("Switch to multi-update")); + /* pass counter value */ + thd->lex->table_count= table_count; + /* convert to multiupdate */ + DBUG_RETURN(2); + } + if (!lock_tables(thd, table_list, table_count, &need_reopen)) + break; + if (!need_reopen) + DBUG_RETURN(1); + close_tables_for_reopen(thd, &table_list); + } + + if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) || + (thd->fill_derived_tables() && + mysql_handle_derived(thd->lex, &mysql_derived_filling))) + DBUG_RETURN(1); + thd->proc_info="init"; table= table_list->table; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); /* Calculate "table->used_keys" based on the WHERE */ - table->used_keys=table->keys_in_use; + table->used_keys= table->s->keys_in_use; table->quick_keys.clear_all(); #ifndef NO_EMBEDDED_ACCESS_CHECKS - want_privilege= table->grant.want_privilege; + /* TABLE_LIST contain right privilages request */ + want_privilege= table_list->grant.want_privilege; #endif - if ((error= mysql_prepare_update(thd, table_list, update_table_list, - &conds, order_num, order))) - DBUG_RETURN(error); + if (mysql_prepare_update(thd, table_list, &conds, order_num, order)) + DBUG_RETURN(1); old_used_keys= table->used_keys; // Keys used in WHERE /* @@ -106,10 +192,20 @@ int mysql_update(THD *thd, /* Check the fields we are going to modify */ #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege=want_privilege; + table_list->grant.want_privilege= table->grant.want_privilege= want_privilege; + table_list->register_want_access(want_privilege); #endif - if (setup_fields(thd, 0, update_table_list, fields, 1, 0, 0)) - DBUG_RETURN(-1); /* purecov: inspected */ + if (setup_fields_with_no_wrap(thd, 0, fields, 1, 0, 0)) + DBUG_RETURN(1); /* purecov: inspected */ + if (table_list->view && check_fields(thd, fields)) + { + DBUG_RETURN(1); + } + if (!table_list->updatable || check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE"); + DBUG_RETURN(1); + } if (table->timestamp_field) { // Don't set timestamp column if this is modified @@ -121,14 +217,19 @@ int mysql_update(THD *thd, #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check values */ - table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); + table_list->grant.want_privilege= table->grant.want_privilege= + (SELECT_ACL & ~table->grant.privilege); #endif - if (setup_fields(thd, 0, update_table_list, values, 1, 0, 0)) + if (setup_fields(thd, 0, values, 1, 0, 0)) { - free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(-1); /* purecov: inspected */ + free_underlaid_joins(thd, select_lex); + DBUG_RETURN(1); /* purecov: inspected */ } + if (select_lex->inner_refs_list.elements && + fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array)) + DBUG_RETURN(-1); + if (conds) { Item::cond_result cond_value; @@ -138,16 +239,15 @@ int mysql_update(THD *thd, } // Don't count on usage of 'only index' when calculating which key to use table->used_keys.clear_all(); - if (limit) - select=make_select(table,0,0,conds,&error); + select= make_select(table, 0, 0, conds, 0, &error); if (error || !limit || (select && select->check_quick(thd, safe_update, limit))) { delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); if (error) { - DBUG_RETURN(-1); // Error in where + DBUG_RETURN(1); // Error in where } send_ok(thd); // No matching records DBUG_RETURN(0); @@ -168,14 +268,14 @@ int mysql_update(THD *thd, goto err; } } - init_ftfuncs(thd, &thd->lex->select_lex, 1); - + init_ftfuncs(thd, select_lex, 1); /* Check if we are modifying a key that we are used to search with */ + if (select && select->quick) { - used_index=select->quick->index; + used_index= select->quick->index; used_key_is_modified= (!select->quick->unique_key_range() && - check_if_key_used(table, used_index, fields)); + select->quick->is_keys_used(&fields)); } else { @@ -183,7 +283,7 @@ int mysql_update(THD *thd, if (used_index == MAX_KEY) // no index for sort order used_index= table->file->key_used_on_scan; if (used_index != MAX_KEY) - used_key_is_modified= check_if_key_used(table, used_index, fields); + used_key_is_modified= is_key_used(table, used_index, fields); } if (used_key_is_modified || order) @@ -206,13 +306,13 @@ int mysql_update(THD *thd, Doing an ORDER BY; Let filesort find and sort the rows we are going to update */ - uint length; + uint length= 0; SORT_FIELD *sortorder; ha_rows examined_rows; table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL)); - if (!(sortorder=make_unireg_sortorder(order, &length)) || + if (!(sortorder=make_unireg_sortorder(order, &length, NULL)) || (table->sort.found_records = filesort(thd, table, sortorder, length, select, limit, &examined_rows)) @@ -241,6 +341,10 @@ int mysql_update(THD *thd, DISK_BUFFER_SIZE, MYF(MY_WME))) goto err; + /* If quick select is used, initialize it before retrieving rows. */ + if (select && select->quick && select->quick->reset()) + goto err; + /* When we get here, we have one of the following options: A. used_index == MAX_KEY @@ -276,6 +380,8 @@ int mysql_update(THD *thd, break; } } + else + table->file->unlock_row(); } if (thd->killed && !error) error= 1; // Aborted @@ -311,6 +417,9 @@ int mysql_update(THD *thd, if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + + if (select && select->quick && select->quick->reset()) + goto err; init_read_record(&info,thd,table,select,0,1); updated= found= 0; @@ -319,29 +428,76 @@ int mysql_update(THD *thd, thd->proc_info="Updating"; query_id=thd->query_id; + transactional_table= table->file->has_transactions(); + thd->no_trans_update= 0; + thd->abort_on_warning= test(!ignore && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + + if (table->triggers) + table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); + + /* + We can use compare_record() to optimize away updates if + the table handler is returning all columns + */ + can_compare_record= !(table->file->table_flags() & + HA_PARTIAL_COLUMN_READ); while (!(error=info.read_record(&info)) && !thd->killed) { if (!(select && select->skip_record())) { store_record(table,record[1]); - if (fill_record(fields,values, 0) || thd->net.report_error) + if (fill_record_n_invoke_before_triggers(thd, fields, values, 0, + table->triggers, + TRG_EVENT_UPDATE)) break; /* purecov: inspected */ + found++; - if (compare_record(table, query_id)) + + if (!can_compare_record || compare_record(table, query_id)) { + if ((res= table_list->view_check_option(thd, ignore)) != + VIEW_CHECK_OK) + { + found--; + if (res == VIEW_CHECK_SKIP) + continue; + else if (res == VIEW_CHECK_ERROR) + { + error= 1; + break; + } + } if (!(error=table->file->update_row((byte*) table->record[1], (byte*) table->record[0]))) { updated++; + thd->no_trans_update= !transactional_table; + + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE)) + { + error= 1; + break; + } } - else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) + else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { - thd->fatal_error(); // Force error message + /* + If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to + do anything; otherwise... + */ + if (error != HA_ERR_FOUND_DUPP_KEY) + thd->fatal_error(); /* Other handler errors are fatal */ table->file->print_error(error,MYF(0)); error= 1; break; } } + if (!--limit && using_limit) { error= -1; // Simulate end of file @@ -365,26 +521,34 @@ int mysql_update(THD *thd, This must be before binlog writing and ha_autocommit_... */ if (updated) + { query_cache_invalidate3(thd, table_list, 1); + } - transactional_table= table->file->has_transactions(); - log_delayed= (transactional_table || table->tmp_table); - if ((updated || (error < 0)) && (error <= 0 || !transactional_table)) + /* + error < 0 means really no error at all: we processed all rows until the + last one without error. error > 0 means an error (e.g. unique key + violation and no IGNORE or REPLACE). error == 0 is also an error (if + preparing the record or invoking before triggers fails). See + ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below. + Sometimes we want to binlog even if we updated no rows, in case user used + it to be sure master and slave are in same state. + */ + if ((error < 0) || (updated && !transactional_table)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { - if (error <= 0) + if (error < 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed, FALSE); + transactional_table, FALSE); if (mysql_bin_log.write(&qinfo) && transactional_table) error=1; // Rollback update } - if (!log_delayed) + if (!transactional_table) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); if (transactional_table) { if (ha_autocommit_or_rollback(thd, error >= 0)) @@ -397,31 +561,32 @@ int mysql_update(THD *thd, thd->lock=0; } - if (error >= 0) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); /* purecov: inspected */ - else + if (error < 0) { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); - send_ok(thd, - (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, + thd->row_count_func= + (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; + send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->last_insert_id : 0L,buff); - DBUG_PRINT("info",("%d records updated",updated)); + DBUG_PRINT("info",("%ld records updated", (long) updated)); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ + thd->abort_on_warning= 0; free_io_cache(table); - DBUG_RETURN(0); + DBUG_RETURN((error >= 0 || thd->net.report_error) ? 1 : 0); err: delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); if (table->key_read) { table->key_read=0; table->file->extra(HA_EXTRA_NO_KEYREAD); } - DBUG_RETURN(-1); + thd->abort_on_warning= 0; + DBUG_RETURN(1); } /* @@ -430,52 +595,60 @@ err: SYNOPSIS mysql_prepare_update() thd - thread handler - table_list - global table list - update_table_list - local table list of UPDATE SELECT_LEX + table_list - global/local table list conds - conditions order_num - number of ORDER BY list entries order - ORDER BY clause list RETURN VALUE - 0 - OK - 1 - error (message is sent to user) - -1 - error (message is not sent to user) + FALSE OK + TRUE error */ -int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *update_table_list, +bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, Item **conds, uint order_num, ORDER *order) { + Item *fake_conds= 0; TABLE *table= table_list->table; TABLE_LIST tables; List<Item> all_fields; + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_update"); #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); + table_list->grant.want_privilege= table->grant.want_privilege= + (SELECT_ACL & ~table->grant.privilege); + table_list->register_want_access(SELECT_ACL); #endif bzero((char*) &tables,sizeof(tables)); // For ORDER BY tables.table= table; tables.alias= table_list->alias; - thd->allow_sum_func= 0; - - if (setup_tables(update_table_list) || - setup_conds(thd, update_table_list, conds) || - thd->lex->select_lex.setup_ref_array(thd, order_num) || - setup_order(thd, thd->lex->select_lex.ref_pointer_array, - update_table_list, all_fields, all_fields, order) || - setup_ftfuncs(&thd->lex->select_lex)) - DBUG_RETURN(-1); + thd->lex->allow_sum_func= 0; + + if (setup_tables_and_check_access(thd, &select_lex->context, + &select_lex->top_join_list, + table_list, conds, + &select_lex->leaf_tables, + FALSE, UPDATE_ACL, SELECT_ACL) || + setup_conds(thd, table_list, select_lex->leaf_tables, conds) || + select_lex->setup_ref_array(thd, order_num) || + setup_order(thd, select_lex->ref_pointer_array, + table_list, all_fields, all_fields, order) || + setup_ftfuncs(select_lex)) + DBUG_RETURN(TRUE); /* Check that we are not using table that we are updating in a sub select */ - if (find_real_table_in_list(table_list->next, - table_list->db, table_list->real_name)) { - my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); - DBUG_RETURN(-1); + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0))) + { + update_non_unique_table_error(table_list, "UPDATE", duplicate); + my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->table_name); + DBUG_RETURN(TRUE); + } } - - DBUG_RETURN(0); + select_lex->fix_prepare_information(thd, conds, &fake_conds); + DBUG_RETURN(FALSE); } @@ -495,254 +668,278 @@ static table_map get_table_map(List<Item> *items) while ((item= (Item_field *) item_it++)) map|= item->used_tables(); - DBUG_PRINT("info",("table_map: 0x%08x", map)); + DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map)); return map; } /* - Prepare tables for multi-update - Analyse which tables need specific privileges and perform locking - as required + make update specific preparation and checks after opening tables + + SYNOPSIS + mysql_multi_update_prepare() + thd thread handler + + RETURN + FALSE OK + TRUE Error */ -int mysql_multi_update_lock(THD *thd, - TABLE_LIST *table_list, - List<Item> *fields, - SELECT_LEX *select_lex) +bool mysql_multi_update_prepare(THD *thd) { - int res; - TABLE_LIST *tl; - TABLE_LIST *update_list= (TABLE_LIST*) thd->lex->select_lex.table_list.first; + LEX *lex= thd->lex; + TABLE_LIST *table_list= lex->query_tables; + TABLE_LIST *tl, *leaves; + List<Item> *fields= &lex->select_lex.item_list; + table_map tables_for_update; + bool update_view= 0; + /* + if this multi-update was converted from usual update, here is table + counter else junk will be assigned here, but then replaced with real + count in open_tables() + */ + uint table_count= lex->table_count; const bool using_lock_tables= thd->locked_tables != 0; - bool initialized_dervied= 0; - DBUG_ENTER("mysql_multi_update_lock"); + bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI); + bool need_reopen= FALSE; + DBUG_ENTER("mysql_multi_update_prepare"); + + /* following need for prepared statements, to run next time multi-update */ + thd->lex->sql_command= SQLCOM_UPDATE_MULTI; + +reopen_tables: + /* open tables and create derived ones, but do not lock and fill them */ + if (((original_multiupdate || need_reopen) && + open_tables(thd, &table_list, &table_count, 0)) || + mysql_handle_derived(lex, &mysql_derived_prepare)) + DBUG_RETURN(TRUE); /* - The following loop is here to to ensure that we only lock tables - that we are going to update with a write lock + setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables() + second time, but this call will do nothing (there are check for second + call in setup_tables()). */ - for (;;) - { - table_map update_tables, derived_tables=0; - uint tnr, table_count; - if ((res=open_tables(thd, table_list, &table_count))) - DBUG_RETURN(res); + if (setup_tables_and_check_access(thd, &lex->select_lex.context, + &lex->select_lex.top_join_list, + table_list, &lex->select_lex.where, + &lex->select_lex.leaf_tables, FALSE, + UPDATE_ACL, SELECT_ACL)) + DBUG_RETURN(TRUE); - /* Only need to call lock_tables if we are not using LOCK TABLES */ - if (!using_lock_tables && - ((res= lock_tables(thd, table_list, table_count)))) - DBUG_RETURN(res); + if (setup_fields_with_no_wrap(thd, 0, *fields, 1, 0, 0)) + DBUG_RETURN(TRUE); - if (!initialized_dervied) - { - initialized_dervied= 1; - relink_tables_for_derived(thd); - if ((res= mysql_handle_derived(thd->lex))) - DBUG_RETURN(res); - } - - /* - Ensure that we have update privilege for all tables and columns in the - SET part - While we are here, initialize the table->map field to check which - tables are updated and updatability of derived tables - */ - for (tl= update_list, tnr=0 ; tl ; tl=tl->next) + for (tl= table_list; tl ; tl= tl->next_local) + { + if (tl->view) { - TABLE *table= tl->table; - /* - Update of derived tables is checked later - We don't check privileges here, becasue then we would get error - "UPDATE command denided .. for column N" instead of - "Target table ... is not updatable" - */ - if (!tl->derived) - table->grant.want_privilege= (UPDATE_ACL & ~table->grant.privilege); - table->map= (table_map) 1 << (tnr++); + update_view= 1; + break; } + } - if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) - DBUG_RETURN(-1); + if (update_view && check_fields(thd, *fields)) + { + DBUG_RETURN(TRUE); + } - update_tables= get_table_map(fields); + tables_for_update= get_table_map(fields); - /* Unlock the tables in preparation for relocking */ - if (!using_lock_tables) - { - mysql_unlock_tables(thd, thd->lock); - thd->lock= 0; - } + /* + Setup timestamp handling and locking mode + */ + leaves= lex->select_lex.leaf_tables; + for (tl= leaves; tl; tl= tl->next_leaf) + { + TABLE *table= tl->table; + /* Only set timestamp column if this is not modified */ + if (table->timestamp_field && + table->timestamp_field->query_id == thd->query_id) + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - /* - Count tables and setup timestamp handling - Set also the table locking strategy according to the update map - */ - for (tl= update_list; tl; tl= tl->next) + /* if table will be updated then check that it is unique */ + if (table->map & tables_for_update) { - TABLE_LIST *save= tl->next; - TABLE *table= tl->table; - uint wants; - /* if table will be updated then check that it is unique */ - if (table->map & update_tables) - { - /* - Multi-update can't be constructed over-union => we always have - single SELECT on top and have to check underlaying SELECTs of it - */ - if (select_lex->check_updateable_in_subqueries(tl->db, - tl->real_name)) - { - my_error(ER_UPDATE_TABLE_USED, MYF(0), - tl->real_name); - DBUG_RETURN(-1); - } - DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); - tl->lock_type= thd->lex->multi_lock_option; - tl->updating= 1; // loacal or only list - if (tl->table_list) - tl->table_list->updating= 1; // global list (if we have 2 lists) - wants= UPDATE_ACL; - } - else + if (!tl->updatable || check_key_in_view(thd, tl)) { - DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); - // If we are using the binary log, we need TL_READ_NO_INSERT to get - // correct order of statements. Otherwise, we use a TL_READ lock to - // improve performance. - tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ; - tl->updating= 0; // loacal or only list - if (tl->table_list) - tl->table_list->updating= 0; // global list (if we have 2 lists) - wants= SELECT_ACL; + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE"); + DBUG_RETURN(TRUE); } - if (tl->derived) - derived_tables|= table->map; - else - { - tl->next= 0; - if (!using_lock_tables) - tl->table->reginfo.lock_type= tl->lock_type; - if (check_access(thd, wants, tl->db, &tl->grant.privilege, 0, 0) || - (grant_option && check_grant(thd, wants, tl, 0, 1, 0))) - { - tl->next= save; - DBUG_RETURN(1); - } - tl->next= save; - } + if (table->triggers) + table->triggers->mark_fields_used(thd, TRG_EVENT_UPDATE); + + DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); + /* + If table will be updated we should not downgrade lock for it and + leave it as is. + */ + } + else + { + DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); + /* + If we are using the binary log, we need TL_READ_NO_INSERT to get + correct order of statements. Otherwise, we use a TL_READ lock to + improve performance. + */ + tl->lock_type= using_update_log ? TL_READ_NO_INSERT : TL_READ; + tl->updating= 0; + /* Update TABLE::lock_type accordingly. */ + if (!tl->placeholder() && !using_lock_tables) + tl->table->reginfo.lock_type= tl->lock_type; } + } + for (tl= table_list; tl; tl= tl->next_local) + { + /* Check access privileges for table */ + if (!tl->derived) + { + uint want_privilege= tl->updating ? UPDATE_ACL : SELECT_ACL; + if (check_access(thd, want_privilege, + tl->db, &tl->grant.privilege, 0, 0, + test(tl->schema_table)) || + (grant_option && check_grant(thd, want_privilege, tl, 0, 1, 0))) + DBUG_RETURN(TRUE); + } + } - if (thd->lex->derived_tables && (update_tables & derived_tables)) + /* check single table update for view compound from several tables */ + for (tl= table_list; tl; tl= tl->next_local) + { + if (tl->effective_algorithm == VIEW_ALGORITHM_MERGE) { - // find derived table which cause error - for (tl= update_list; tl; tl= tl->next) + TABLE_LIST *for_update= 0; + if (tl->check_single_table(&for_update, tables_for_update, tl)) { - if (tl->derived && (update_tables & tl->table->map)) - { - my_printf_error(ER_NON_UPDATABLE_TABLE, ER(ER_NON_UPDATABLE_TABLE), - MYF(0), tl->alias, "UPDATE"); - DBUG_RETURN(-1); - } + my_error(ER_VIEW_MULTIUPDATE, MYF(0), + tl->view_db.str, tl->view_name.str); + DBUG_RETURN(-1); } } + } - /* Relock the tables with the correct modes */ - res= lock_tables(thd, table_list, table_count); - if (using_lock_tables) - break; // Don't have to do setup_field() + /* now lock and fill tables */ + if (lock_tables(thd, table_list, table_count, &need_reopen)) + { + if (!need_reopen) + DBUG_RETURN(TRUE); /* - We must setup fields again as the file may have been reopened - during lock_tables + We have to reopen tables since some of them were altered or dropped + during lock_tables() or something was done with their triggers. + Let us do some cleanups to be able do setup_table() and setup_fields() + once again. */ - { - List_iterator_fast<Item> field_it(*fields); - Item_field *item; + List_iterator_fast<Item> it(*fields); + Item *item; + while ((item= it++)) + item->cleanup(); - while ((item= (Item_field *) field_it++)) + /* We have to cleanup translation tables of views. */ + for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global) + tbl->cleanup_items(); + + close_tables_for_reopen(thd, &table_list); + goto reopen_tables; + } + + /* + Check that we are not using table that we are updating, but we should + skip all tables of UPDATE SELECT itself + */ + lex->select_lex.exclude_from_table_unique_test= TRUE; + /* We only need SELECT privilege for columns in the values list */ + for (tl= leaves; tl; tl= tl->next_leaf) + { + TABLE *table= tl->table; + TABLE_LIST *tlist; + if (!(tlist= tl->top_table())->derived) + { + tlist->grant.want_privilege= + (SELECT_ACL & ~tlist->grant.privilege); + table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); + } + DBUG_PRINT("info", ("table: %s want_privilege: %u", tl->alias, + (uint) table->grant.want_privilege)); + if (tl->lock_type != TL_READ && + tl->lock_type != TL_READ_NO_INSERT) + { + TABLE_LIST *duplicate; + if ((duplicate= unique_table(thd, tl, table_list, 0))) { - item->field->query_id= 0; - item->cleanup(); + update_non_unique_table_error(table_list, "UPDATE", duplicate); + DBUG_RETURN(TRUE); } } - if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) - DBUG_RETURN(-1); - /* - If lock succeded and the table map didn't change since the above lock - we can continue. - */ - if (!res && update_tables == get_table_map(fields)) - break; - - /* - There was some very unexpected changes in the table definition between - open tables and lock tables. Close tables and try again. - */ - close_thread_tables(thd); } - - DBUG_RETURN(res); + /* + Set exclude_from_table_unique_test value back to FALSE. It is needed for + further check in multi_update::prepare whether to use record cache. + */ + lex->select_lex.exclude_from_table_unique_test= FALSE; + + if (thd->fill_derived_tables() && + mysql_handle_derived(lex, &mysql_derived_filling)) + DBUG_RETURN(TRUE); + + DBUG_RETURN (FALSE); } + /* Setup multi-update handling and call SELECT to do the join */ -int mysql_multi_update(THD *thd, - TABLE_LIST *table_list, - List<Item> *fields, - List<Item> *values, - COND *conds, - ulong options, - enum enum_duplicates handle_duplicates, bool ignore, - SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) +bool mysql_multi_update(THD *thd, + TABLE_LIST *table_list, + List<Item> *fields, + List<Item> *values, + COND *conds, + ulonglong options, + enum enum_duplicates handle_duplicates, bool ignore, + SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) { - int res; - TABLE_LIST *tl; - TABLE_LIST *update_list= (TABLE_LIST*) thd->lex->select_lex.table_list.first; - List<Item> total_list; multi_update *result; DBUG_ENTER("mysql_multi_update"); - /* Setup timestamp handling */ - for (tl= update_list; tl; tl= tl->next) - { - TABLE *table= tl->table; - /* Only set timestamp column if this is not modified */ - if (table->timestamp_field && - table->timestamp_field->query_id == thd->query_id) - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - - /* We only need SELECT privilege for columns in the values list */ - table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); - } + if (!(result= new multi_update(table_list, + thd->lex->select_lex.leaf_tables, + fields, values, + handle_duplicates, ignore))) + DBUG_RETURN(TRUE); - if (!(result=new multi_update(thd, update_list, fields, values, - handle_duplicates, ignore))) - DBUG_RETURN(-1); + thd->no_trans_update= 0; + thd->abort_on_warning= test(thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); - res= mysql_select(thd, &select_lex->ref_pointer_array, - select_lex->get_table_list(), select_lex->with_wild, - total_list, - conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, - (ORDER *)NULL, - options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, - result, unit, select_lex); + List<Item> total_list; + (void) mysql_select(thd, &select_lex->ref_pointer_array, + table_list, select_lex->with_wild, + total_list, + conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, + (ORDER *)NULL, + options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK | + OPTION_SETUP_TABLES_DONE, + result, unit, select_lex); delete result; - DBUG_RETURN(res); + thd->abort_on_warning= 0; + DBUG_RETURN(FALSE); } -multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list, +multi_update::multi_update(TABLE_LIST *table_list, + TABLE_LIST *leaves_list, List<Item> *field_list, List<Item> *value_list, - enum enum_duplicates handle_duplicates_arg, bool ignore_arg) - :all_tables(table_list), update_tables(0), thd(thd_arg), tmp_tables(0), - updated(0), found(0), fields(field_list), values(value_list), - table_count(0), copy_field(0), handle_duplicates(handle_duplicates_arg), - do_update(1), trans_safe(0), transactional_tables(1), ignore(ignore_arg) + enum enum_duplicates handle_duplicates_arg, + bool ignore_arg) + :all_tables(table_list), leaves(leaves_list), update_tables(0), + tmp_tables(0), updated(0), found(0), fields(field_list), + values(value_list), table_count(0), copy_field(0), + handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0), + transactional_tables(1), ignore(ignore_arg) {} @@ -770,7 +967,7 @@ int multi_update::prepare(List<Item> ¬_used_values, if (!tables_to_update) { - my_error(ER_NO_TABLES_USED, MYF(0)); + my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0)); DBUG_RETURN(1); } @@ -779,7 +976,7 @@ int multi_update::prepare(List<Item> ¬_used_values, reference tables */ - if (setup_fields(thd, 0, all_tables, *values, 1, 0, 0)) + if (setup_fields(thd, 0, *values, 1, 0, 0)) DBUG_RETURN(1); /* @@ -789,8 +986,9 @@ int multi_update::prepare(List<Item> ¬_used_values, */ update.empty(); - for (table_ref= all_tables; table_ref; table_ref=table_ref->next) + for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf) { + /* TODO: add support of view of join support */ TABLE *table=table_ref->table; if (tables_to_update & table->map) { @@ -798,7 +996,7 @@ int multi_update::prepare(List<Item> ¬_used_values, sizeof(*tl)); if (!tl) DBUG_RETURN(1); - update.link_in_list((byte*) tl, (byte**) &tl->next); + update.link_in_list((byte*) tl, (byte**) &tl->next_local); tl->shared= table_count++; table->no_keyread=1; table->used_keys.clear_all(); @@ -810,7 +1008,7 @@ int multi_update::prepare(List<Item> ¬_used_values, table_count= update.elements; update_tables= (TABLE_LIST*) update.first; - tmp_tables = (TABLE **) thd->calloc(sizeof(TABLE *) * table_count); + tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count); tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) * table_count); fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) * @@ -871,6 +1069,10 @@ int multi_update::prepare(List<Item> ¬_used_values, - Table is not joined to itself. + When checking for above cases we also should take into account that + BEFORE UPDATE trigger potentially may change value of any field in row + being updated. + WARNING This code is a bit dependent of how make_join_readinfo() works. @@ -880,33 +1082,33 @@ int multi_update::prepare(List<Item> ¬_used_values, */ static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab, - TABLE_LIST *all_tables, List<Item> *fields) + TABLE_LIST *table_ref, TABLE_LIST *all_tables, + List<Item> *fields) { TABLE *table= join_tab->table; - /* First check if a table is not joined to itself. */ - if (mysql_lock_have_duplicate(thd, table, all_tables)) + if (unique_table(thd, table_ref, all_tables, 0)) return 0; switch (join_tab->type) { case JT_SYSTEM: case JT_CONST: case JT_EQ_REF: - return 1; // At most one matching row + return TRUE; // At most one matching row case JT_REF: - return !check_if_key_used(table, join_tab->ref.key, *fields); + case JT_REF_OR_NULL: + return !is_key_used(table, join_tab->ref.key, *fields); case JT_ALL: /* If range search on index */ if (join_tab->quick) - return !check_if_key_used(table, join_tab->quick->index, - *fields); + return !join_tab->quick->is_keys_used(fields); /* If scanning in clustered key */ if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && - table->primary_key < MAX_KEY) - return !check_if_key_used(table, table->primary_key, *fields); - return 1; + table->s->primary_key < MAX_KEY) + return !is_key_used(table, table->s->primary_key, *fields); + return TRUE; default: break; // Avoid compler warning } - return 0; + return FALSE; } @@ -929,11 +1131,10 @@ multi_update::initialize_tables(JOIN *join) DBUG_RETURN(1); main_table=join->join_tab->table; trans_safe= transactional_tables= main_table->file->has_transactions(); - log_delayed= trans_safe || main_table->tmp_table != NO_TMP_TABLE; table_to_update= 0; /* Create a temporary table for keys to all tables, except main table */ - for (table_ref= update_tables; table_ref; table_ref=table_ref->next) + for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local) { TABLE *table=table_ref->table; uint cnt= table_ref->shared; @@ -945,7 +1146,8 @@ multi_update::initialize_tables(JOIN *join) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); if (table == main_table) // First table in join { - if (safe_update_on_fly(thd, join->join_tab, all_tables, &temp_fields)) + if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables, + &temp_fields)) { table_to_update= main_table; // Update table on the fly continue; @@ -963,6 +1165,11 @@ multi_update::initialize_tables(JOIN *join) /* ok to be on stack as this is not referenced outside of this func */ Field_string offset(table->file->ref_length, 0, "offset", table, &my_charset_bin); + /* + The field will be converted to varstring when creating tmp table if + table to be updated was created by mysql 4.1. Deny this. + */ + offset.can_alter_field_type= 0; if (!(ifield= new Item_field(((Field *) &offset)))) DBUG_RETURN(1); ifield->maybe_null= 0; @@ -995,7 +1202,7 @@ multi_update::initialize_tables(JOIN *join) multi_update::~multi_update() { TABLE_LIST *table; - for (table= update_tables ; table; table= table->next) + for (table= update_tables ; table; table= table->next_local) { table->table->no_keyread= table->table->no_cache= 0; if (ignore) @@ -1026,7 +1233,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) TABLE_LIST *cur_table; DBUG_ENTER("multi_update::send_data"); - for (cur_table= update_tables; cur_table ; cur_table= cur_table->next) + for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { TABLE *table= cur_table->table; /* @@ -1046,16 +1253,36 @@ bool multi_update::send_data(List<Item> ¬_used_values) uint offset= cur_table->shared; table->file->position(table->record[0]); + /* + We can use compare_record() to optimize away updates if + the table handler is returning all columns + */ if (table == table_to_update) { + bool can_compare_record; + can_compare_record= !(table->file->table_flags() & + HA_PARTIAL_COLUMN_READ); table->status|= STATUS_UPDATED; store_record(table,record[1]); - if (fill_record(*fields_for_table[offset], *values_for_table[offset], 0)) + if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset], + *values_for_table[offset], 0, + table->triggers, + TRG_EVENT_UPDATE)) DBUG_RETURN(1); + found++; - if (compare_record(table, thd->query_id)) + if (!can_compare_record || compare_record(table, thd->query_id)) { int error; + if ((error= cur_table->view_check_option(thd, ignore)) != + VIEW_CHECK_OK) + { + found--; + if (error == VIEW_CHECK_SKIP) + continue; + else if (error == VIEW_CHECK_ERROR) + DBUG_RETURN(1); + } if (!updated++) { /* @@ -1069,20 +1296,34 @@ bool multi_update::send_data(List<Item> ¬_used_values) table->record[0]))) { updated--; - if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) + if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { - thd->fatal_error(); // Force error message + /* + If (ignore && error == HA_ERR_FOUND_DUPP_KEY) we don't have to + do anything; otherwise... + */ + if (error != HA_ERR_FOUND_DUPP_KEY) + thd->fatal_error(); /* Other handler errors are fatal */ table->file->print_error(error,MYF(0)); DBUG_RETURN(1); } } + else + { + if (!table->file->has_transactions()) + thd->no_trans_update= 1; + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE)) + DBUG_RETURN(1); + } } } else { int error; TABLE *tmp_table= tmp_tables[offset]; - fill_record(tmp_table->field+1, *values_for_table[offset], 1); + fill_record(thd, tmp_table->field+1, *values_for_table[offset], 1); /* Store pointer to row */ memcpy((char*) tmp_table->field[0]->ptr, (char*) table->file->ref, table->file->ref_length); @@ -1109,7 +1350,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) void multi_update::send_error(uint errcode,const char *err) { /* First send error what ever it is ... */ - ::send_error(thd,errcode,err); + my_error(errcode, MYF(0), err); /* If nothing updated return */ if (!updated) @@ -1144,9 +1385,10 @@ int multi_update::do_updates(bool from_send_error) do_update= 0; // Don't retry this function if (!found) DBUG_RETURN(0); - for (cur_table= update_tables; cur_table ; cur_table= cur_table->next) + for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { byte *ref_pos; + bool can_compare_record; table = cur_table->table; if (table == table_to_update) @@ -1173,6 +1415,9 @@ int multi_update::do_updates(bool from_send_error) if ((local_error = tmp_table->file->ha_rnd_init(1))) goto err; + can_compare_record= !(table->file->table_flags() & + HA_PARTIAL_COLUMN_READ); + ref_pos= (byte*) tmp_table->field[0]->ptr; for (;;) { @@ -1197,8 +1442,22 @@ int multi_update::do_updates(bool from_send_error) copy_field_ptr++) (*copy_field_ptr->do_copy)(copy_field_ptr); - if (compare_record(table, thd->query_id)) + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_BEFORE, TRUE)) + goto err2; + + if (!can_compare_record || compare_record(table, thd->query_id)) { + int error; + if ((error= cur_table->view_check_option(thd, ignore)) != + VIEW_CHECK_OK) + { + if (error == VIEW_CHECK_SKIP) + continue; + else if (error == VIEW_CHECK_ERROR) + goto err; + } if ((local_error=table->file->update_row(table->record[1], table->record[0]))) { @@ -1206,17 +1465,18 @@ int multi_update::do_updates(bool from_send_error) goto err; } updated++; - if (table->tmp_table != NO_TMP_TABLE) - log_delayed= 1; + + if (table->triggers && + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, + TRG_ACTION_AFTER, TRUE)) + goto err2; } } if (updated != org_updated) { - if (table->tmp_table != NO_TMP_TABLE) - log_delayed= 1; // Tmp tables forces delay log if (table->file->has_transactions()) - log_delayed= transactional_tables= 1; + transactional_tables= 1; else trans_safe= 0; // Can't do safe rollback } @@ -1232,15 +1492,14 @@ err: table->file->print_error(local_error,MYF(0)); } +err2: (void) table->file->ha_rnd_end(); (void) tmp_table->file->ha_rnd_end(); if (updated != org_updated) { - if (table->tmp_table != NO_TMP_TABLE) - log_delayed= 1; if (table->file->has_transactions()) - log_delayed= transactional_tables= 1; + transactional_tables= 1; else trans_safe= 0; } @@ -1252,7 +1511,7 @@ err: bool multi_update::send_eof() { - char buff[80]; + char buff[STRING_BUFFER_USUAL_SIZE]; thd->proc_info="updating reference tables"; /* Does updates for the last n - 1 tables, returns 0 if ok */ @@ -1270,24 +1529,21 @@ bool multi_update::send_eof() /* Write the SQL statement to the binlog if we updated rows and we succeeded or if we updated some non - transacational tables. - Note that if we updated nothing we don't write to the binlog (TODO: - fix this). + transactional tables. */ - if (updated && (local_error <= 0 || !trans_safe)) + if ((local_error == 0) || (updated && !trans_safe)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { - if (local_error <= 0) + if (local_error == 0) thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, - log_delayed, FALSE); + transactional_tables, FALSE); if (mysql_bin_log.write(&qinfo) && trans_safe) local_error= 1; // Rollback update } - if (!log_delayed) + if (!transactional_tables) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } @@ -1302,15 +1558,15 @@ bool multi_update::send_eof() /* Safety: If we haven't got an error before (should not happen) */ my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update", MYF(0)); - ::send_error(thd); - return 1; + return TRUE; } sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); - ::send_ok(thd, - (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, + thd->row_count_func= + (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; + ::send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->last_insert_id : 0L,buff); - return 0; + return FALSE; } diff --git a/sql/sql_view.cc b/sql/sql_view.cc new file mode 100644 index 00000000000..83beec3d1be --- /dev/null +++ b/sql/sql_view.cc @@ -0,0 +1,1743 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#define MYSQL_LEX 1 +#include "mysql_priv.h" +#include "sql_select.h" +#include "parse_file.h" +#include "sp.h" +#include "sp_head.h" +#include "sp_cache.h" + +#define MD5_BUFF_LENGTH 33 + +const LEX_STRING view_type= { (char*) STRING_WITH_LEN("VIEW") }; + +static int mysql_register_view(THD *thd, TABLE_LIST *view, + enum_view_create_mode mode); + +const char *updatable_views_with_limit_names[]= { "NO", "YES", NullS }; +TYPELIB updatable_views_with_limit_typelib= +{ + array_elements(updatable_views_with_limit_names)-1, "", + updatable_views_with_limit_names, + 0 +}; + + +/* + Make a unique name for an anonymous view column + SYNOPSIS + target reference to the item for which a new name has to be made + item_list list of items within which we should check uniqueness of + the created name + last_element the last element of the list above + + NOTE + Unique names are generated by adding 'My_exp_' to the old name of the + column. In case the name that was created this way already exists, we + add a numeric postfix to its end (i.e. "1") and increase the number + until the name becomes unique. If the generated name is longer than + NAME_LEN, it is truncated. +*/ + +static void make_unique_view_field_name(Item *target, + List<Item> &item_list, + Item *last_element) +{ + char *name= (target->orig_name ? + target->orig_name : + target->name); + uint name_len, attempt; + char buff[NAME_LEN+1]; + List_iterator_fast<Item> itc(item_list); + + for (attempt= 0;; attempt++) + { + Item *check; + bool ok= TRUE; + + if (attempt) + name_len= my_snprintf(buff, NAME_LEN, "My_exp_%d_%s", attempt, name); + else + name_len= my_snprintf(buff, NAME_LEN, "My_exp_%s", name); + + do + { + check= itc++; + if (check != target && + my_strcasecmp(system_charset_info, buff, check->name) == 0) + { + ok= FALSE; + break; + } + } while (check != last_element); + if (ok) + break; + itc.rewind(); + } + + target->orig_name= target->name; + target->set_name(buff, name_len, system_charset_info); +} + + +/* + Check if items with same names are present in list and possibly + generate unique names for them. + + SYNOPSIS + item_list list of Items which should be checked for duplicates + gen_unique_view_name flag: generate unique name or return with error when + duplicate names are found. + + DESCRIPTION + This function is used on view creation and preparation of derived tables. + It checks item_list for items with duplicate names. If it founds two + items with same name and conversion to unique names isn't allowed, or + names for both items are set by user - function fails. + Otherwise it generates unique name for one item with autogenerated name + using make_unique_view_field_name() + + RETURN VALUE + FALSE no duplicate names found, or they are converted to unique ones + TRUE duplicate names are found and they can't be converted or conversion + isn't allowed +*/ + +bool check_duplicate_names(List<Item> &item_list, bool gen_unique_view_name) +{ + Item *item; + List_iterator_fast<Item> it(item_list); + List_iterator_fast<Item> itc(item_list); + DBUG_ENTER("check_duplicate_names"); + + while ((item= it++)) + { + Item *check; + /* treat underlying fields like set by user names */ + if (item->real_item()->type() == Item::FIELD_ITEM) + item->is_autogenerated_name= FALSE; + itc.rewind(); + while ((check= itc++) && check != item) + { + if (my_strcasecmp(system_charset_info, item->name, check->name) == 0) + { + if (!gen_unique_view_name) + goto err; + if (item->is_autogenerated_name) + make_unique_view_field_name(item, item_list, item); + else if (check->is_autogenerated_name) + make_unique_view_field_name(check, item_list, item); + else + goto err; + } + } + } + DBUG_RETURN(FALSE); + +err: + my_error(ER_DUP_FIELDNAME, MYF(0), item->name); + DBUG_RETURN(TRUE); +} + +/* + Fill defined view parts + + SYNOPSIS + fill_defined_view_parts() + thd current thread. + view view to operate on + + DESCRIPTION + This function will initialize the parts of the view + definition that are not specified in ALTER VIEW + to their values from CREATE VIEW. + The view must be opened to get its definition. + We use a copy of the view when opening because we want + to preserve the original view instance. + + RETURN VALUE + TRUE can't open table + FALSE success +*/ +static bool +fill_defined_view_parts (THD *thd, TABLE_LIST *view) +{ + LEX *lex= thd->lex; + bool not_used; + TABLE_LIST decoy; + + memcpy (&decoy, view, sizeof (TABLE_LIST)); + if (!open_table(thd, &decoy, thd->mem_root, ¬_used, OPEN_VIEW_NO_PARSE) && + !decoy.view) + { + /* It's a table */ + return TRUE; + } + + if (!lex->definer) + { + view->definer.host= decoy.definer.host; + view->definer.user= decoy.definer.user; + lex->definer= &view->definer; + } + if (lex->create_view_algorithm == VIEW_ALGORITHM_UNDEFINED) + lex->create_view_algorithm= (uint8) decoy.algorithm; + if (lex->create_view_suid == VIEW_SUID_DEFAULT) + lex->create_view_suid= decoy.view_suid ? + VIEW_SUID_DEFINER : VIEW_SUID_INVOKER; + + return FALSE; +} + + +/* + Creating/altering VIEW procedure + + SYNOPSIS + mysql_create_view() + thd - thread handler + views - views to create + mode - VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE + + RETURN VALUE + FALSE OK + TRUE Error +*/ + +bool mysql_create_view(THD *thd, TABLE_LIST *views, + enum_view_create_mode mode) +{ + LEX *lex= thd->lex; + bool link_to_local; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + bool definer_check_is_needed= mode != VIEW_ALTER || lex->definer; +#endif + /* first table in list is target VIEW name => cut off it */ + TABLE_LIST *view= lex->unlink_first_table(&link_to_local); + TABLE_LIST *tables= lex->query_tables; + TABLE_LIST *tbl; + SELECT_LEX *select_lex= &lex->select_lex; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + SELECT_LEX *sl; +#endif + SELECT_LEX_UNIT *unit= &lex->unit; + bool res= FALSE; + DBUG_ENTER("mysql_create_view"); + + /* This is ensured in the parser. */ + DBUG_ASSERT(!lex->proc_list.first && !lex->result && + !lex->param_list.elements && !lex->derived_tables); + + if (mode != VIEW_CREATE_NEW) + { + if (mode == VIEW_ALTER && + fill_defined_view_parts(thd, view)) + { + res= TRUE; + goto err; + } + sp_cache_invalidate(); + } + + if (!lex->definer) + { + /* + DEFINER-clause is missing; we have to create default definer in + persistent arena to be PS/SP friendly. + If this is an ALTER VIEW then the current user should be set as + the definer. + */ + Query_arena original_arena; + Query_arena *ps_arena = thd->activate_stmt_arena_if_needed(&original_arena); + + if (!(lex->definer= create_default_definer(thd))) + res= TRUE; + + if (ps_arena) + thd->restore_active_arena(ps_arena, &original_arena); + + if (res) + goto err; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + check definer of view: + - same as current user + - current user has SUPER_ACL + */ + if (definer_check_is_needed && + (strcmp(lex->definer->user.str, thd->security_ctx->priv_user) != 0 || + my_strcasecmp(system_charset_info, + lex->definer->host.str, + thd->security_ctx->priv_host) != 0)) + { + if (!(thd->security_ctx->master_access & SUPER_ACL)) + { + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), "SUPER"); + res= TRUE; + goto err; + } + else + { + if (!is_acl_user(lex->definer->host.str, + lex->definer->user.str)) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_NO_SUCH_USER, + ER(ER_NO_SUCH_USER), + lex->definer->user.str, + lex->definer->host.str); + } + } + } + /* + Privilege check for view creation: + - user has CREATE VIEW privilege on view table + - user has DROP privilege in case of ALTER VIEW or CREATE OR REPLACE + VIEW + - user has some (SELECT/UPDATE/INSERT/DELETE) privileges on columns of + underlying tables used on top of SELECT list (because it can be + (theoretically) updated, so it is enough to have UPDATE privilege on + them, for example) + - user has SELECT privilege on columns used in expressions of VIEW select + - for columns of underly tables used on top of SELECT list also will be + checked that we have not more privileges on correspondent column of view + table (i.e. user will not get some privileges by view creation) + */ + if ((check_access(thd, CREATE_VIEW_ACL, view->db, &view->grant.privilege, + 0, 0, is_schema_db(view->db)) || + grant_option && check_grant(thd, CREATE_VIEW_ACL, view, 0, 1, 0)) || + (mode != VIEW_CREATE_NEW && + (check_access(thd, DROP_ACL, view->db, &view->grant.privilege, + 0, 0, is_schema_db(view->db)) || + grant_option && check_grant(thd, DROP_ACL, view, 0, 1, 0)))) + { + res= TRUE; + goto err; + } + for (sl= select_lex; sl; sl= sl->next_select()) + { + for (tbl= sl->get_table_list(); tbl; tbl= tbl->next_local) + { + /* + Ensure that we have some privileges on this table, more strict check + will be done on column level after preparation, + */ + if (check_some_access(thd, VIEW_ANY_ACL, tbl)) + { + my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), + "ANY", thd->security_ctx->priv_user, + thd->security_ctx->priv_host, tbl->table_name); + res= TRUE; + goto err; + } + /* + Mark this table as a table which will be checked after the prepare + phase + */ + tbl->table_in_first_from_clause= 1; + + /* + We need to check only SELECT_ACL for all normal fields, fields for + which we need "any" (SELECT/UPDATE/INSERT/DELETE) privilege will be + checked later + */ + tbl->grant.want_privilege= SELECT_ACL; + /* + Make sure that all rights are loaded to the TABLE::grant field. + + tbl->table_name will be correct name of table because VIEWs are + not opened yet. + */ + fill_effective_table_privileges(thd, &tbl->grant, tbl->db, + tbl->table_name); + } + } + + if (&lex->select_lex != lex->all_selects_list) + { + /* check tables of subqueries */ + for (tbl= tables; tbl; tbl= tbl->next_global) + { + if (!tbl->table_in_first_from_clause) + { + if (check_access(thd, SELECT_ACL, tbl->db, + &tbl->grant.privilege, 0, 0, test(tbl->schema_table)) || + grant_option && check_grant(thd, SELECT_ACL, tbl, 0, 1, 0)) + { + res= TRUE; + goto err; + } + } + } + } + /* + Mark fields for special privilege check ("any" privilege) + */ + for (sl= select_lex; sl; sl= sl->next_select()) + { + List_iterator_fast<Item> it(sl->item_list); + Item *item; + while ((item= it++)) + { + Item_field *field; + if ((field= item->filed_for_view_update())) + field->any_privileges= 1; + } + } +#endif + + if (open_and_lock_tables(thd, tables)) + { + res= TRUE; + goto err; + } + + /* + check that tables are not temporary and this VIEW do not used in query + (it is possible with ALTERing VIEW). + open_and_lock_tables can change the value of tables, + e.g. it may happen if before the function call tables was equal to 0. + */ + for (tbl= lex->query_tables; tbl; tbl= tbl->next_global) + { + /* is this table view and the same view which we creates now? */ + if (tbl->view && + strcmp(tbl->view_db.str, view->db) == 0 && + strcmp(tbl->view_name.str, view->table_name) == 0) + { + my_error(ER_NO_SUCH_TABLE, MYF(0), tbl->view_db.str, tbl->view_name.str); + res= TRUE; + goto err; + } + + /* + tbl->table can be NULL when tbl is a placeholder for a view + that is indirectly referenced via a stored function from the + view being created. We don't check these indirectly + referenced views in CREATE VIEW so they don't have table + object. + */ + if (tbl->table) + { + /* is this table temporary and is not view? */ + if (tbl->table->s->tmp_table != NO_TMP_TABLE && !tbl->view && + !tbl->schema_table) + { + my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias); + res= TRUE; + goto err; + } + /* + Copy the privileges of the underlying VIEWs which were filled by + fill_effective_table_privileges + (they were not copied at derived tables processing) + */ + tbl->table->grant.privilege= tbl->grant.privilege; + } + } + + /* prepare select to resolve all fields */ + lex->view_prepare_mode= 1; + if (unit->prepare(thd, 0, 0)) + { + /* + some errors from prepare are reported to user, if is not then + it will be checked after err: label + */ + res= TRUE; + goto err; + } + + /* view list (list of view fields names) */ + if (lex->view_list.elements) + { + List_iterator_fast<Item> it(select_lex->item_list); + List_iterator_fast<LEX_STRING> nm(lex->view_list); + Item *item; + LEX_STRING *name; + + if (lex->view_list.elements != select_lex->item_list.elements) + { + my_message(ER_VIEW_WRONG_LIST, ER(ER_VIEW_WRONG_LIST), MYF(0)); + res= TRUE; + goto err; + } + while ((item= it++, name= nm++)) + { + item->set_name(name->str, name->length, system_charset_info); + item->is_autogenerated_name= FALSE; + } + } + + if (check_duplicate_names(select_lex->item_list, 1)) + { + res= TRUE; + goto err; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Compare/check grants on view with grants of underlying tables + */ + for (sl= select_lex; sl; sl= sl->next_select()) + { + DBUG_ASSERT(view->db); /* Must be set in the parser */ + List_iterator_fast<Item> it(sl->item_list); + Item *item; + fill_effective_table_privileges(thd, &view->grant, view->db, + view->table_name); + while ((item= it++)) + { + Item_field *fld; + uint priv= (get_column_grant(thd, &view->grant, view->db, + view->table_name, item->name) & + VIEW_ANY_ACL); + if ((fld= item->filed_for_view_update())) + { + /* + Do we have more privileges on view field then underlying table field? + */ + if (!fld->field->table->s->tmp_table && (~fld->have_privileges & priv)) + { + /* VIEW column has more privileges */ + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + "create view", thd->security_ctx->priv_user, + thd->security_ctx->priv_host, item->name, + view->table_name); + res= TRUE; + goto err; + } + } + } + } +#endif + + if (wait_if_global_read_lock(thd, 0, 0)) + { + res= TRUE; + goto err; + } + VOID(pthread_mutex_lock(&LOCK_open)); + res= mysql_register_view(thd, view, mode); + + if (mysql_bin_log.is_open()) + { + String buff; + const LEX_STRING command[3]= + {{(char *)STRING_WITH_LEN("CREATE ")}, + {(char *)STRING_WITH_LEN("ALTER ")}, + {(char *)STRING_WITH_LEN("CREATE OR REPLACE ")}}; + + buff.append(command[thd->lex->create_view_mode].str, + command[thd->lex->create_view_mode].length); + view_store_options(thd, views, &buff); + buff.append(STRING_WITH_LEN("VIEW ")); + + /* Test if user supplied a db (ie: we did not use thd->db) */ + if (views->db && views->db[0] && + (thd->db == NULL || strcmp(views->db, thd->db))) + { + append_identifier(thd, &buff, views->db, + views->db_length); + buff.append('.'); + } + append_identifier(thd, &buff, views->table_name, + views->table_name_length); + if (lex->view_list.elements) + { + List_iterator_fast<LEX_STRING> names(lex->view_list); + LEX_STRING *name; + int i; + + for (i= 0; (name= names++); i++) + { + buff.append(i ? ", " : "("); + append_identifier(thd, &buff, name->str, name->length); + } + buff.append(')'); + } + buff.append(STRING_WITH_LEN(" AS ")); + buff.append(views->source.str, views->source.length); + + Query_log_event qinfo(thd, buff.ptr(), buff.length(), 0, FALSE); + mysql_bin_log.write(&qinfo); + } + + VOID(pthread_mutex_unlock(&LOCK_open)); + if (view->revision != 1) + query_cache_invalidate3(thd, view, 0); + start_waiting_global_read_lock(thd); + if (res) + goto err; + + send_ok(thd); + lex->link_first_table_back(view, link_to_local); + DBUG_RETURN(0); + +err: + thd->proc_info= "end"; + lex->link_first_table_back(view, link_to_local); + unit->cleanup(); + DBUG_RETURN(res || thd->net.report_error); +} + + +/* index of revision number in following table */ +static const int revision_number_position= 8; +/* index of last required parameter for making view */ +static const int required_view_parameters= 10; +/* number of backups */ +static const int num_view_backups= 3; + +/* + table of VIEW .frm field descriptors + + Note that one should NOT change the order for this, as it's used by + parse() +*/ +static File_option view_parameters[]= +{{{(char*) STRING_WITH_LEN("query")}, + my_offsetof(TABLE_LIST, query), + FILE_OPTIONS_ESTRING}, + {{(char*) STRING_WITH_LEN("md5")}, + my_offsetof(TABLE_LIST, md5), + FILE_OPTIONS_STRING}, + {{(char*) STRING_WITH_LEN("updatable")}, + my_offsetof(TABLE_LIST, updatable_view), + FILE_OPTIONS_ULONGLONG}, + {{(char*) STRING_WITH_LEN("algorithm")}, + my_offsetof(TABLE_LIST, algorithm), + FILE_OPTIONS_ULONGLONG}, + {{(char*) STRING_WITH_LEN("definer_user")}, + my_offsetof(TABLE_LIST, definer.user), + FILE_OPTIONS_STRING}, + {{(char*) STRING_WITH_LEN("definer_host")}, + my_offsetof(TABLE_LIST, definer.host), + FILE_OPTIONS_STRING}, + {{(char*) STRING_WITH_LEN("suid")}, + my_offsetof(TABLE_LIST, view_suid), + FILE_OPTIONS_ULONGLONG}, + {{(char*) STRING_WITH_LEN("with_check_option")}, + my_offsetof(TABLE_LIST, with_check), + FILE_OPTIONS_ULONGLONG}, + {{(char*) STRING_WITH_LEN("revision")}, + my_offsetof(TABLE_LIST, revision), + FILE_OPTIONS_REV}, + {{(char*) STRING_WITH_LEN("timestamp")}, + my_offsetof(TABLE_LIST, timestamp), + FILE_OPTIONS_TIMESTAMP}, + {{(char*)STRING_WITH_LEN("create-version")}, + my_offsetof(TABLE_LIST, file_version), + FILE_OPTIONS_ULONGLONG}, + {{(char*) STRING_WITH_LEN("source")}, + my_offsetof(TABLE_LIST, source), + FILE_OPTIONS_ESTRING}, + {{NullS, 0}, 0, + FILE_OPTIONS_STRING} +}; + +static LEX_STRING view_file_type[]= {{(char*) STRING_WITH_LEN("VIEW") }}; + + +/* + Register VIEW (write .frm & process .frm's history backups) + + SYNOPSIS + mysql_register_view() + thd - thread handler + view - view description + mode - VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE + + RETURN + 0 OK + -1 Error + 1 Error and error message given +*/ + +static int mysql_register_view(THD *thd, TABLE_LIST *view, + enum_view_create_mode mode) +{ + LEX *lex= thd->lex; + char buff[4096]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + char md5[MD5_BUFF_LENGTH]; + bool can_be_merged; + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + DBUG_ENTER("mysql_register_view"); + + /* print query */ + str.length(0); + { + ulong sql_mode= thd->variables.sql_mode & MODE_ANSI_QUOTES; + thd->variables.sql_mode&= ~MODE_ANSI_QUOTES; + lex->unit.print(&str); + thd->variables.sql_mode|= sql_mode; + } + str.append('\0'); + DBUG_PRINT("info", ("View: %s", str.ptr())); + + /* print file name */ + (void) my_snprintf(dir_buff, FN_REFLEN, "%s/%s/", + mysql_data_home, view->db); + unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + dir.length= strlen(dir_buff); + + file.str= file_buff; + file.length= (strxnmov(file_buff, FN_REFLEN, view->table_name, reg_ext, + NullS) - file_buff); + /* init timestamp */ + if (!view->timestamp.str) + view->timestamp.str= view->timestamp_buffer; + + /* check old .frm */ + { + char path_buff[FN_REFLEN]; + LEX_STRING path; + File_parser *parser; + + path.str= path_buff; + fn_format(path_buff, file.str, dir.str, 0, MY_UNPACK_FILENAME); + path.length= strlen(path_buff); + + if (!access(path.str, F_OK)) + { + if (mode == VIEW_CREATE_NEW) + { + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), view->alias); + DBUG_RETURN(-1); + } + + if (!(parser= sql_parse_prepare(&path, thd->mem_root, 0))) + DBUG_RETURN(1); + + if (!parser->ok() || !is_equal(&view_type, parser->type())) + { + my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW"); + DBUG_RETURN(-1); + } + + /* + read revision number + + TODO: read dependence list, too, to process cascade/restrict + TODO: special cascade/restrict procedure for alter? + */ + if (parser->parse((gptr)view, thd->mem_root, + view_parameters + revision_number_position, 1, + &file_parser_dummy_hook)) + { + DBUG_RETURN(thd->net.report_error? -1 : 0); + } + } + else + { + if (mode == VIEW_ALTER) + { + my_error(ER_NO_SUCH_TABLE, MYF(0), view->db, view->alias); + DBUG_RETURN(-1); + } + } + } + /* fill structure */ + view->query.str= (char*)str.ptr(); + view->query.length= str.length()-1; // we do not need last \0 + view->source.str= thd->query + thd->lex->create_view_select_start; + view->source.length= (char *)skip_rear_comments((uchar *)view->source.str, + (uchar *)thd->query + + thd->query_length) - + view->source.str; + view->file_version= 1; + view->calc_md5(md5); + view->md5.str= md5; + view->md5.length= 32; + can_be_merged= lex->can_be_merged(); + if (lex->create_view_algorithm == VIEW_ALGORITHM_MERGE && + !lex->can_be_merged()) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_VIEW_MERGE, + ER(ER_WARN_VIEW_MERGE)); + lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; + } + view->algorithm= lex->create_view_algorithm; + view->definer.user= lex->definer->user; + view->definer.host= lex->definer->host; + view->view_suid= lex->create_view_suid; + view->with_check= lex->create_view_check; + if ((view->updatable_view= (can_be_merged && + view->algorithm != VIEW_ALGORITHM_TMPTABLE))) + { + /* TODO: change here when we will support UNIONs */ + for (TABLE_LIST *tbl= (TABLE_LIST *)lex->select_lex.table_list.first; + tbl; + tbl= tbl->next_local) + { + if ((tbl->view && !tbl->updatable_view) || tbl->schema_table) + { + view->updatable_view= 0; + break; + } + for (TABLE_LIST *up= tbl; up; up= up->embedding) + { + if (up->outer_join) + { + view->updatable_view= 0; + goto loop_out; + } + } + } + } +loop_out: + /* + Check that table of main select do not used in subqueries. + + This test can catch only very simple cases of such non-updateable views, + all other will be detected before updating commands execution. + (it is more optimisation then real check) + + NOTE: this skip cases of using table via VIEWs, joined VIEWs, VIEWs with + UNION + */ + if (view->updatable_view && + !lex->select_lex.next_select() && + !((TABLE_LIST*)lex->select_lex.table_list.first)->next_local && + find_table_in_global_list(lex->query_tables->next_global, + lex->query_tables->db, + lex->query_tables->table_name)) + { + view->updatable_view= 0; + } + + if (view->with_check != VIEW_CHECK_NONE && + !view->updatable_view) + { + my_error(ER_VIEW_NONUPD_CHECK, MYF(0), view->db, view->table_name); + DBUG_RETURN(-1); + } + + if (sql_create_definition_file(&dir, &file, view_file_type, + (gptr)view, view_parameters, num_view_backups)) + { + DBUG_RETURN(thd->net.report_error? -1 : 1); + } + DBUG_RETURN(0); +} + + + +/* + read VIEW .frm and create structures + + SYNOPSIS + mysql_make_view() + thd Thread handler + parser parser object + table TABLE_LIST structure for filling + flags flags + RETURN + 0 ok + 1 error +*/ + +bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, + uint flags) +{ + SELECT_LEX *end, *view_select; + LEX *old_lex, *lex; + Query_arena *arena, backup; + TABLE_LIST *top_view= table->top_table(); + int res; + bool result, view_is_mergeable; + TABLE_LIST *view_main_select_tables; + DBUG_ENTER("mysql_make_view"); + DBUG_PRINT("info", ("table: 0x%lx (%s)", (ulong) table, table->table_name)); + + if (table->view) + { + /* + It's an execution of a PS/SP and the view has already been unfolded + into a list of used tables. Now we only need to update the information + about granted privileges in the view tables with the actual data + stored in MySQL privilege system. We don't need to restore the + required privileges (by calling register_want_access) because they has + not changed since PREPARE or the previous execution: the only case + when this information is changed is execution of UPDATE on a view, but + the original want_access is restored in its end. + */ + if (!table->prelocking_placeholder && table->prepare_security(thd)) + { + DBUG_RETURN(1); + } + DBUG_PRINT("info", + ("VIEW %s.%s is already processed on previous PS/SP execution", + table->view_db.str, table->view_name.str)); + DBUG_RETURN(0); + } + + /* check loop via view definition */ + for (TABLE_LIST *precedent= table->referencing_view; + precedent; + precedent= precedent->referencing_view) + { + if (precedent->view_name.length == table->table_name_length && + precedent->view_db.length == table->db_length && + my_strcasecmp(system_charset_info, + precedent->view_name.str, table->table_name) == 0 && + my_strcasecmp(system_charset_info, + precedent->view_db.str, table->db) == 0) + { + my_error(ER_VIEW_RECURSIVE, MYF(0), + top_view->view_db.str, top_view->view_name.str); + DBUG_RETURN(TRUE); + } + } + + /* + For now we assume that tables will not be changed during PS life (it + will be TRUE as far as we make new table cache). + */ + old_lex= thd->lex; + arena= thd->stmt_arena; + if (arena->is_conventional()) + arena= 0; + else + thd->set_n_backup_active_arena(arena, &backup); + + /* init timestamp */ + if (!table->timestamp.str) + table->timestamp.str= table->timestamp_buffer; + /* prepare default values for old format */ + table->view_suid= TRUE; + table->definer.user.str= table->definer.host.str= 0; + table->definer.user.length= table->definer.host.length= 0; + + /* + TODO: when VIEWs will be stored in cache, table mem_root should + be used here + */ + if (parser->parse((gptr)table, thd->mem_root, view_parameters, + required_view_parameters, &file_parser_dummy_hook)) + goto err; + + /* + check old format view .frm + */ + if (!table->definer.user.str) + { + DBUG_ASSERT(!table->definer.host.str && + !table->definer.user.length && + !table->definer.host.length); + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_VIEW_FRM_NO_USER, ER(ER_VIEW_FRM_NO_USER), + table->db, table->table_name); + get_default_definer(thd, &table->definer); + } + if (flags & OPEN_VIEW_NO_PARSE) + { + DBUG_RETURN(FALSE); + } + + /* + Save VIEW parameters, which will be wiped out by derived table + processing + */ + table->view_db.str= table->db; + table->view_db.length= table->db_length; + table->view_name.str= table->table_name; + table->view_name.length= table->table_name_length; + + /*TODO: md5 test here and warning if it is differ */ + + /* + TODO: TABLE mem root should be used here when VIEW will be stored in + TABLE cache + + now Lex placed in statement memory + */ + table->view= lex= thd->lex= (LEX*) new(thd->mem_root) st_lex_local; + lex_start(thd, (uchar*)table->query.str, table->query.length); + view_select= &lex->select_lex; + view_select->select_number= ++thd->select_number; + { + ulong save_mode= thd->variables.sql_mode; + /* switch off modes which can prevent normal parsing of VIEW + - MODE_REAL_AS_FLOAT affect only CREATE TABLE parsing + + MODE_PIPES_AS_CONCAT affect expression parsing + + MODE_ANSI_QUOTES affect expression parsing + + MODE_IGNORE_SPACE affect expression parsing + - MODE_NOT_USED not used :) + * MODE_ONLY_FULL_GROUP_BY affect execution + * MODE_NO_UNSIGNED_SUBTRACTION affect execution + - MODE_NO_DIR_IN_CREATE affect table creation only + - MODE_POSTGRESQL compounded from other modes + - MODE_ORACLE compounded from other modes + - MODE_MSSQL compounded from other modes + - MODE_DB2 compounded from other modes + - MODE_MAXDB affect only CREATE TABLE parsing + - MODE_NO_KEY_OPTIONS affect only SHOW + - MODE_NO_TABLE_OPTIONS affect only SHOW + - MODE_NO_FIELD_OPTIONS affect only SHOW + - MODE_MYSQL323 affect only SHOW + - MODE_MYSQL40 affect only SHOW + - MODE_ANSI compounded from other modes + (+ transaction mode) + ? MODE_NO_AUTO_VALUE_ON_ZERO affect UPDATEs + + MODE_NO_BACKSLASH_ESCAPES affect expression parsing + */ + thd->variables.sql_mode&= ~(MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | MODE_NO_BACKSLASH_ESCAPES); + CHARSET_INFO *save_cs= thd->variables.character_set_client; + thd->variables.character_set_client= system_charset_info; + res= MYSQLparse((void *)thd); + thd->variables.character_set_client= save_cs; + thd->variables.sql_mode= save_mode; + } + if (!res && !thd->is_fatal_error) + { + TABLE_LIST *view_tables= lex->query_tables; + TABLE_LIST *view_tables_tail= 0; + TABLE_LIST *tbl; + + /* + Check rights to run commands (EXPLAIN SELECT & SHOW CREATE) which show + underlying tables. + Skip this step if we are opening view for prelocking only. + */ + if (!table->prelocking_placeholder && + (old_lex->sql_command == SQLCOM_SELECT && old_lex->describe)) + { + if (check_table_access(thd, SELECT_ACL, view_tables, 1) && + check_table_access(thd, SHOW_VIEW_ACL, table, 1)) + { + my_message(ER_VIEW_NO_EXPLAIN, ER(ER_VIEW_NO_EXPLAIN), MYF(0)); + goto err; + } + } + else if (!table->prelocking_placeholder && + old_lex->sql_command == SQLCOM_SHOW_CREATE && + !table->belong_to_view) + { + if (check_table_access(thd, SHOW_VIEW_ACL, table, 0)) + goto err; + } + + if (!(table->view_tables= + (List<TABLE_LIST>*) new(thd->mem_root) List<TABLE_LIST>)) + goto err; + /* + mark to avoid temporary table using and put view reference and find + last view table + */ + for (tbl= view_tables; + tbl; + tbl= (view_tables_tail= tbl)->next_global) + { + tbl->skip_temporary= 1; + tbl->belong_to_view= top_view; + tbl->referencing_view= table; + tbl->prelocking_placeholder= table->prelocking_placeholder; + /* + First we fill want_privilege with SELECT_ACL (this is needed for the + tables which belongs to view subqueries and temporary table views, + then for the merged view underlying tables we will set wanted + privileges of top_view + */ + tbl->grant.want_privilege= SELECT_ACL; + /* + After unfolding the view we lose the list of tables referenced in it + (we will have only a list of underlying tables in case of MERGE + algorithm, which does not include the tables referenced from + subqueries used in view definition). + Let's build a list of all tables referenced in the view. + */ + table->view_tables->push_back(tbl); + } + + /* + Put tables of VIEW after VIEW TABLE_LIST + + NOTE: It is important for UPDATE/INSERT/DELETE checks to have this + tables just after VIEW instead of tail of list, to be able check that + table is unique. Also we store old next table for the same purpose. + */ + if (view_tables) + { + if (table->next_global) + { + view_tables_tail->next_global= table->next_global; + table->next_global->prev_global= &view_tables_tail->next_global; + } + else + { + old_lex->query_tables_last= &view_tables_tail->next_global; + } + view_tables->prev_global= &table->next_global; + table->next_global= view_tables; + } + + view_is_mergeable= (table->algorithm != VIEW_ALGORITHM_TMPTABLE && + lex->can_be_merged()); + LINT_INIT(view_main_select_tables); + + if (view_is_mergeable) + { + /* + Currently 'view_main_select_tables' differs from 'view_tables' + only then view has CONVERT_TZ() function in its select list. + This may change in future, for example if we enable merging of + views with subqueries in select list. + */ + view_main_select_tables= + (TABLE_LIST*)lex->select_lex.table_list.first; + + /* + Let us set proper lock type for tables of the view's main + select since we may want to perform update or insert on + view. This won't work for view containing union. But this is + ok since we don't allow insert and update on such views + anyway. + */ + for (tbl= view_main_select_tables; tbl; tbl= tbl->next_local) + tbl->lock_type= table->lock_type; + } + + /* + If we are opening this view as part of implicit LOCK TABLES, then + this view serves as simple placeholder and we should not continue + further processing. + */ + if (table->prelocking_placeholder) + goto ok2; + + old_lex->derived_tables|= (DERIVED_VIEW | lex->derived_tables); + + /* move SQL_NO_CACHE & Co to whole query */ + old_lex->safe_to_cache_query= (old_lex->safe_to_cache_query && + lex->safe_to_cache_query); + /* move SQL_CACHE to whole query */ + if (view_select->options & OPTION_TO_QUERY_CACHE) + old_lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + + if (table->view_suid) + { + /* + Prepare a security context to check underlying objects of the view + */ + if (!(table->view_sctx= (Security_context *) + thd->stmt_arena->alloc(sizeof(Security_context)))) + goto err; + /* Assign the context to the tables referenced in the view */ + if (view_tables) + { + DBUG_ASSERT(view_tables_tail); + for (tbl= view_tables; tbl != view_tables_tail->next_global; + tbl= tbl->next_global) + tbl->security_ctx= table->view_sctx; + } + /* assign security context to SELECT name resolution contexts of view */ + for(SELECT_LEX *sl= lex->all_selects_list; + sl; + sl= sl->next_select_in_list()) + sl->context.security_ctx= table->view_sctx; + } + + /* + Setup an error processor to hide error messages issued by stored + routines referenced in the view + */ + for (SELECT_LEX *sl= lex->all_selects_list; + sl; + sl= sl->next_select_in_list()) + { + sl->context.error_processor= &view_error_processor; + sl->context.error_processor_data= (void *)table; + } + + /* + check MERGE algorithm ability + - algorithm is not explicit TEMPORARY TABLE + - VIEW SELECT allow merging + - VIEW used in subquery or command support MERGE algorithm + */ + if (view_is_mergeable && + (table->select_lex->master_unit() != &old_lex->unit || + old_lex->can_use_merged()) && + !old_lex->can_not_use_merged()) + { + /* lex should contain at least one table */ + DBUG_ASSERT(view_main_select_tables != 0); + + List_iterator_fast<TABLE_LIST> ti(view_select->top_join_list); + + table->effective_algorithm= VIEW_ALGORITHM_MERGE; + DBUG_PRINT("info", ("algorithm: MERGE")); + table->updatable= (table->updatable_view != 0); + table->effective_with_check= + old_lex->get_effective_with_check(table); + table->merge_underlying_list= view_main_select_tables; + + /* Fill correct wanted privileges. */ + for (tbl= view_main_select_tables; tbl; tbl= tbl->next_local) + tbl->grant.want_privilege= top_view->grant.orig_want_privilege; + + /* prepare view context */ + lex->select_lex.context.resolve_in_table_list_only(view_main_select_tables); + lex->select_lex.context.outer_context= 0; + lex->select_lex.context.select_lex= table->select_lex; + lex->select_lex.select_n_having_items+= + table->select_lex->select_n_having_items; + + /* + Tables of the main select of the view should be marked as belonging + to the same select as original view (again we can use LEX::select_lex + for this purprose because we don't support MERGE algorithm for views + with unions). + */ + for (tbl= lex->select_lex.get_table_list(); tbl; tbl= tbl->next_local) + tbl->select_lex= table->select_lex; + + { + if (view_main_select_tables->next_local) + { + table->multitable_view= TRUE; + if (table->belong_to_view) + table->belong_to_view->multitable_view= TRUE; + } + /* make nested join structure for view tables */ + NESTED_JOIN *nested_join; + if (!(nested_join= table->nested_join= + (NESTED_JOIN *) thd->calloc(sizeof(NESTED_JOIN)))) + goto err; + nested_join->join_list= view_select->top_join_list; + + /* re-nest tables of VIEW */ + ti.rewind(); + while ((tbl= ti++)) + { + tbl->join_list= &nested_join->join_list; + tbl->embedding= table; + } + } + + /* Store WHERE clause for post-processing in setup_underlying */ + table->where= view_select->where; + /* + Add subqueries units to SELECT into which we merging current view. + unit(->next)* chain starts with subqueries that are used by this + view and continues with subqueries that are used by other views. + We must not add any subquery twice (otherwise we'll form a loop), + to do this we remember in end_unit the first subquery that has + been already added. + + NOTE: we do not support UNION here, so we take only one select + */ + SELECT_LEX_NODE *end_unit= table->select_lex->slave; + SELECT_LEX_UNIT *next_unit; + for (SELECT_LEX_UNIT *unit= lex->select_lex.first_inner_unit(); + unit; + unit= next_unit) + { + if (unit == end_unit) + break; + SELECT_LEX_NODE *save_slave= unit->slave; + next_unit= unit->next_unit(); + unit->include_down(table->select_lex); + unit->slave= save_slave; // fix include_down initialisation + } + + /* + This SELECT_LEX will be linked in global SELECT_LEX list + to make it processed by mysql_handle_derived(), + but it will not be included to SELECT_LEX tree, because it + will not be executed + */ + table->select_lex->order_list.push_back(&lex->select_lex.order_list); + goto ok; + } + + table->effective_algorithm= VIEW_ALGORITHM_TMPTABLE; + DBUG_PRINT("info", ("algorithm: TEMPORARY TABLE")); + view_select->linkage= DERIVED_TABLE_TYPE; + table->updatable= 0; + table->effective_with_check= VIEW_CHECK_NONE; + old_lex->subqueries= TRUE; + + /* SELECT tree link */ + lex->unit.include_down(table->select_lex); + lex->unit.slave= view_select; // fix include_down initialisation + + table->derived= &lex->unit; + } + else + goto err; + +ok: + /* global SELECT list linking */ + end= view_select; // primary SELECT_LEX is always last + end->link_next= old_lex->all_selects_list; + old_lex->all_selects_list->link_prev= &end->link_next; + old_lex->all_selects_list= lex->all_selects_list; + lex->all_selects_list->link_prev= + (st_select_lex_node**)&old_lex->all_selects_list; + +ok2: + if (!old_lex->time_zone_tables_used && thd->lex->time_zone_tables_used) + old_lex->time_zone_tables_used= thd->lex->time_zone_tables_used; + result= !table->prelocking_placeholder && table->prepare_security(thd); + + lex_end(thd->lex); +end: + if (arena) + thd->restore_active_arena(arena, &backup); + thd->lex= old_lex; + DBUG_RETURN(result); + +err: + DBUG_ASSERT(thd->lex == table->view); + lex_end(thd->lex); + delete table->view; + table->view= 0; // now it is not VIEW placeholder + result= 1; + goto end; +} + + +/* + drop view + + SYNOPSIS + mysql_drop_view() + thd - thread handler + views - views to delete + drop_mode - cascade/check + + RETURN VALUE + FALSE OK + TRUE Error +*/ + +bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) +{ + DBUG_ENTER("mysql_drop_view"); + char path[FN_REFLEN]; + TABLE_LIST *view; + frm_type_enum type; + db_type not_used; + String non_existant_views; + char *wrong_object_db= NULL, *wrong_object_name= NULL; + bool error= FALSE; + + VOID(pthread_mutex_lock(&LOCK_open)); + for (view= views; view; view= view->next_local) + { + strxnmov(path, FN_REFLEN, mysql_data_home, "/", view->db, "/", + view->table_name, reg_ext, NullS); + (void) unpack_filename(path, path); + type= FRMTYPE_ERROR; + if (access(path, F_OK) || + FRMTYPE_VIEW != (type= mysql_frm_type(thd, path, ¬_used))) + { + char name[FN_REFLEN]; + my_snprintf(name, sizeof(name), "%s.%s", view->db, view->table_name); + if (thd->lex->drop_if_exists) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), + name); + continue; + } + if (type == FRMTYPE_TABLE) + { + if (!wrong_object_name) + { + wrong_object_db= view->db; + wrong_object_name= view->table_name; + } + } + else + { + if (non_existant_views.length()) + non_existant_views.append(','); + non_existant_views.append(String(view->table_name,system_charset_info)); + } + continue; + } + if (my_delete(path, MYF(MY_WME))) + error= TRUE; + query_cache_invalidate3(thd, view, 0); + sp_cache_invalidate(); + } + if (mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + + VOID(pthread_mutex_unlock(&LOCK_open)); + + if (error) + { + DBUG_RETURN(TRUE); + } + if (wrong_object_name) + { + my_error(ER_WRONG_OBJECT, MYF(0), wrong_object_db, wrong_object_name, + "VIEW"); + DBUG_RETURN(TRUE); + } + if (non_existant_views.length()) + { + my_error(ER_BAD_TABLE_ERROR, MYF(0), non_existant_views.c_ptr()); + DBUG_RETURN(TRUE); + } + send_ok(thd); + DBUG_RETURN(FALSE); +} + + +/* + Check type of .frm if we are not going to parse it + + SYNOPSIS + mysql_frm_type() + path path to file + + RETURN + FRMTYPE_ERROR error + FRMTYPE_TABLE table + FRMTYPE_VIEW view +*/ + +frm_type_enum mysql_frm_type(THD *thd, char *path, db_type *dbt) +{ + File file; + uchar header[10]; //"TYPE=VIEW\n" it is 10 characters + int error; + DBUG_ENTER("mysql_frm_type"); + + *dbt= DB_TYPE_UNKNOWN; + + if ((file= my_open(path, O_RDONLY | O_SHARE, MYF(0))) < 0) + DBUG_RETURN(FRMTYPE_ERROR); + error= my_read(file, (byte*) header, sizeof(header), MYF(MY_WME | MY_NABP)); + my_close(file, MYF(MY_WME)); + + if (error) + DBUG_RETURN(FRMTYPE_ERROR); + if (!strncmp((char*) header, "TYPE=VIEW\n", sizeof(header))) + DBUG_RETURN(FRMTYPE_VIEW); + + /* + This is just a check for DB_TYPE. We'll return default unknown type + if the following test is true (arg #3). This should not have effect + on return value from this function (default FRMTYPE_TABLE) + */ + if (header[0] != (uchar) 254 || header[1] != 1 || + (header[2] != FRM_VER && header[2] != FRM_VER+1 && + (header[2] < FRM_VER+3 || header[2] > FRM_VER+4))) + DBUG_RETURN(FRMTYPE_TABLE); + + *dbt= ha_checktype(thd, (enum db_type) (uint) *(header + 3), 0, 0); + DBUG_RETURN(FRMTYPE_TABLE); // Is probably a .frm table +} + + +/* + check of key (primary or unique) presence in updatable view + + SYNOPSIS + check_key_in_view() + thd thread handler + view view for check with opened table + + DESCRIPTION + If it is VIEW and query have LIMIT clause then check that undertlying + table of viey contain one of following: + 1) primary key of underlying table + 2) unique key underlying table with fields for which NULL value is + impossible + 3) all fields of underlying table + + RETURN + FALSE OK + TRUE view do not contain key or all fields +*/ + +bool check_key_in_view(THD *thd, TABLE_LIST *view) +{ + TABLE *table; + Field_translator *trans, *end_of_trans; + KEY *key_info, *key_info_end; + DBUG_ENTER("check_key_in_view"); + + /* + we do not support updatable UNIONs in VIEW, so we can check just limit of + LEX::select_lex + */ + if ((!view->view && !view->belong_to_view) || + thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->select_lex.select_limit == 0) + DBUG_RETURN(FALSE); /* it is normal table or query without LIMIT */ + table= view->table; + view= view->top_table(); + trans= view->field_translation; + key_info_end= (key_info= table->key_info)+ table->s->keys; + + end_of_trans= view->field_translation_end; + DBUG_ASSERT(table != 0 && view->field_translation != 0); + + { + /* + We should be sure that all fields are ready to get keys from them, but + this operation should not have influence on Field::query_id, to avoid + marking as used fields which are not used + */ + bool save_set_query_id= thd->set_query_id; + thd->set_query_id= 0; + for (Field_translator *fld= trans; fld < end_of_trans; fld++) + { + if (!fld->item->fixed && fld->item->fix_fields(thd, &fld->item)) + { + thd->set_query_id= save_set_query_id; + return TRUE; + } + } + thd->set_query_id= save_set_query_id; + } + /* Loop over all keys to see if a unique-not-null key is used */ + for (;key_info != key_info_end ; key_info++) + { + if ((key_info->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) + { + KEY_PART_INFO *key_part= key_info->key_part; + KEY_PART_INFO *key_part_end= key_part + key_info->key_parts; + + /* check that all key parts are used */ + for (;;) + { + Field_translator *k; + for (k= trans; k < end_of_trans; k++) + { + Item_field *field; + if ((field= k->item->filed_for_view_update()) && + field->field == key_part->field) + break; + } + if (k == end_of_trans) + break; // Key is not possible + if (++key_part == key_part_end) + DBUG_RETURN(FALSE); // Found usable key + } + } + } + + DBUG_PRINT("info", ("checking if all fields of table are used")); + /* check all fields presence */ + { + Field **field_ptr; + Field_translator *fld; + for (field_ptr= table->field; *field_ptr; field_ptr++) + { + for (fld= trans; fld < end_of_trans; fld++) + { + Item_field *field; + if ((field= fld->item->filed_for_view_update()) && + field->field == *field_ptr) + break; + } + if (fld == end_of_trans) // If field didn't exists + { + /* + Keys or all fields of underlying tables are not found => we have + to check variable updatable_views_with_limit to decide should we + issue an error or just a warning + */ + if (thd->variables.updatable_views_with_limit) + { + /* update allowed, but issue warning */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_WARN_VIEW_WITHOUT_KEY, ER(ER_WARN_VIEW_WITHOUT_KEY)); + DBUG_RETURN(FALSE); + } + /* prohibit update */ + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} + + +/* + insert fields from VIEW (MERGE algorithm) into given list + + SYNOPSIS + insert_view_fields() + thd thread handler + list list for insertion + view view for processing + + RETURN + FALSE OK + TRUE error (is not sent to cliet) +*/ + +bool insert_view_fields(THD *thd, List<Item> *list, TABLE_LIST *view) +{ + Field_translator *trans_end; + Field_translator *trans; + DBUG_ENTER("insert_view_fields"); + + if (!(trans= view->field_translation)) + DBUG_RETURN(FALSE); + trans_end= view->field_translation_end; + + for (Field_translator *entry= trans; entry < trans_end; entry++) + { + Item_field *fld; + if ((fld= entry->item->filed_for_view_update())) + list->push_back(fld); + else + { + my_error(ER_NON_INSERTABLE_TABLE, MYF(0), view->alias, "INSERT"); + DBUG_RETURN(TRUE); + } + } + DBUG_RETURN(FALSE); +} + +/* + checking view md5 check suum + + SINOPSYS + view_checksum() + thd threar handler + view view for check + + RETUIRN + HA_ADMIN_OK OK + HA_ADMIN_NOT_IMPLEMENTED it is not VIEW + HA_ADMIN_WRONG_CHECKSUM check sum is wrong +*/ + +int view_checksum(THD *thd, TABLE_LIST *view) +{ + char md5[MD5_BUFF_LENGTH]; + if (!view->view || view->md5.length != 32) + return HA_ADMIN_NOT_IMPLEMENTED; + view->calc_md5(md5); + return (strncmp(md5, view->md5.str, 32) ? + HA_ADMIN_WRONG_CHECKSUM : + HA_ADMIN_OK); +} + +/* + rename view + + Synopsis: + renames a view + + Parameters: + thd thread handler + new_name new name of view + view view + + Return values: + FALSE Ok + TRUE Error +*/ +bool +mysql_rename_view(THD *thd, + const char *new_name, + TABLE_LIST *view) +{ + LEX_STRING pathstr, file; + File_parser *parser; + char view_path[FN_REFLEN]; + bool error= TRUE; + + DBUG_ENTER("mysql_rename_view"); + + strxnmov(view_path, FN_REFLEN, mysql_data_home, "/", view->db, "/", + view->table_name, reg_ext, NullS); + (void) unpack_filename(view_path, view_path); + + pathstr.str= (char *)view_path; + pathstr.length= strlen(view_path); + + if ((parser= sql_parse_prepare(&pathstr, thd->mem_root, 1)) && + is_equal(&view_type, parser->type())) + { + TABLE_LIST view_def; + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + + /* + To be PS-friendly we should either to restore state of + TABLE_LIST object pointed by 'view' after using it for + view definition parsing or use temporary 'view_def' + object for it. + */ + bzero(&view_def, sizeof(view_def)); + view_def.timestamp.str= view_def.timestamp_buffer; + view_def.view_suid= TRUE; + + /* get view definition and source */ + if (parser->parse((gptr)&view_def, thd->mem_root, view_parameters, + array_elements(view_parameters)-1, + &file_parser_dummy_hook)) + goto err; + + /* rename view and it's backups */ + if (rename_in_schema_file(view->db, view->table_name, new_name, + view_def.revision - 1, num_view_backups)) + goto err; + + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", view->db, "/", NullS); + (void) unpack_filename(dir_buff, dir_buff); + + pathstr.str= (char*)dir_buff; + pathstr.length= strlen(dir_buff); + + file.str= file_buff; + file.length= (strxnmov(file_buff, FN_REFLEN, new_name, reg_ext, NullS) + - file_buff); + + if (sql_create_definition_file(&pathstr, &file, view_file_type, + (gptr)&view_def, view_parameters, + num_view_backups)) + { + /* restore renamed view in case of error */ + rename_in_schema_file(view->db, new_name, view->table_name, + view_def.revision - 1, num_view_backups); + goto err; + } + } else + DBUG_RETURN(1); + + /* remove cache entries */ + query_cache_invalidate3(thd, view, 0); + sp_cache_invalidate(); + error= FALSE; + +err: + DBUG_RETURN(error); +} diff --git a/sql/sql_view.h b/sql/sql_view.h new file mode 100644 index 00000000000..ab0920e0bf2 --- /dev/null +++ b/sql/sql_view.h @@ -0,0 +1,40 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +bool mysql_create_view(THD *thd, TABLE_LIST *view, + enum_view_create_mode mode); + +bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table, + uint flags); + +bool mysql_drop_view(THD *thd, TABLE_LIST *view, enum_drop_mode drop_mode); + +bool check_key_in_view(THD *thd, TABLE_LIST * view); + +bool insert_view_fields(THD *thd, List<Item> *list, TABLE_LIST *view); + +frm_type_enum mysql_frm_type(THD *thd, char *path, db_type *dbt); + +int view_checksum(THD *thd, TABLE_LIST *view); + +extern TYPELIB updatable_views_with_limit_typelib; + +bool check_duplicate_names(List<Item>& item_list, bool gen_unique_view_names); +bool mysql_rename_view(THD *thd, const char *new_name, TABLE_LIST *view); + +#define VIEW_ANY_ACL (SELECT_ACL | UPDATE_ACL | INSERT_ACL | DELETE_ACL) + diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index ee57a4d611c..bbb0d11b942 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -34,22 +33,381 @@ #include "slave.h" #include "lex_symbol.h" #include "item_create.h" +#include "sp_head.h" +#include "sp_pcontext.h" +#include "sp_rcontext.h" +#include "sp.h" #include <myisam.h> #include <myisammrg.h> int yylex(void *yylval, void *yythd); -#define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if(my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }} +const LEX_STRING null_lex_str={0,0}; -#define WARN_DEPRECATED(A,B) \ +#define yyoverflow(A,B,C,D,E,F) {ulong val= *(F); if (my_yyoverflow((B), (D), &val)) { yyerror((char*) (A)); return 2; } else { *(F)= (YYSIZE_T)val; }} + +#undef WARN_DEPRECATED /* this macro is also defined in mysql_priv.h */ +#define WARN_DEPRECATED(A,B) \ push_warning_printf(((THD *)yythd), MYSQL_ERROR::WARN_LEVEL_WARN, \ - ER_WARN_DEPRECATED_SYNTAX, \ - ER(ER_WARN_DEPRECATED_SYNTAX), (A), (B)); + ER_WARN_DEPRECATED_SYNTAX, \ + ER(ER_WARN_DEPRECATED_SYNTAX), (A), (B)); + +#define MYSQL_YYABORT \ + do \ + { \ + LEX::cleanup_lex_after_parse_error(YYTHD);\ + YYABORT; \ + } while (0) + +#define MYSQL_YYABORT_UNLESS(A) \ + if (!(A)) \ + { \ + my_parse_error(ER(ER_SYNTAX_ERROR));\ + MYSQL_YYABORT; \ + } + +#ifndef DBUG_OFF +#define YYDEBUG 1 +#else +#define YYDEBUG 0 +#endif + +/** + @brief Push an error message into MySQL error stack with line + and position information. + + This function provides semantic action implementers with a way + to push the famous "You have a syntax error near..." error + message into the error stack, which is normally produced only if + a parse error is discovered internally by the Bison generated + parser. +*/ + +void my_parse_error(const char *s) +{ + THD *thd= current_thd; + + char *yytext= (char*) thd->lex->tok_start; + /* Push an error into the error stack */ + my_printf_error(ER_PARSE_ERROR, ER(ER_PARSE_ERROR), MYF(0), s, + (yytext ? (char*) yytext : ""), + thd->lex->yylineno); +} + +/** + @brief Bison callback to report a syntax/OOM error + + This function is invoked by the bison-generated parser + when a syntax error, a parse error or an out-of-memory + condition occurs. This function is not invoked when the + parser is requested to abort by semantic action code + by means of YYABORT or YYACCEPT macros. This is why these + macros should not be used (use MYSQL_YYABORT/MYSQL_YYACCEPT + instead). + + The parser will abort immediately after invoking this callback. + + This function is not for use in semantic actions and is internal to + the parser, as it performs some pre-return cleanup. + In semantic actions, please use my_parse_error or my_error to + push an error into the error stack and MYSQL_YYABORT + to abort from the parser. +*/ -inline Item *or_or_concat(THD *thd, Item* A, Item* B) +void MYSQLerror(const char *s) { - return (thd->variables.sql_mode & MODE_PIPES_AS_CONCAT ? - (Item*) new Item_func_concat(A,B) : (Item*) new Item_cond_or(A,B)); + THD *thd= current_thd; + + /* + Restore the original LEX if it was replaced when parsing + a stored procedure. We must ensure that a parsing error + does not leave any side effects in the THD. + */ + LEX::cleanup_lex_after_parse_error(thd); + + /* "parse error" changed into "syntax error" between bison 1.75 and 1.875 */ + if (strcmp(s,"parse error") == 0 || strcmp(s,"syntax error") == 0) + s= ER(ER_SYNTAX_ERROR); + my_parse_error(s); +} + + +#ifndef DBUG_OFF +void turn_parser_debug_on() +{ + /* + MYSQLdebug is in sql/sql_yacc.cc, in bison generated code. + Turning this option on is **VERY** verbose, and should be + used when investigating a syntax error problem only. + + The syntax to run with bison traces is as follows : + - Starting a server manually : + mysqld --debug="d,parser_debug" ... + - Running a test : + mysql-test-run.pl --mysqld="--debug=d,parser_debug" ... + + The result will be in the process stderr (var/log/master.err) + */ + + extern int yydebug; + yydebug= 1; +} +#endif + + +/** + Helper action for a case statement (entering the CASE). + This helper is used for both 'simple' and 'searched' cases. + This helper, with the other case_stmt_action_..., is executed when + the following SQL code is parsed: +<pre> +CREATE PROCEDURE proc_19194_simple(i int) +BEGIN + DECLARE str CHAR(10); + + CASE i + WHEN 1 THEN SET str="1"; + WHEN 2 THEN SET str="2"; + WHEN 3 THEN SET str="3"; + ELSE SET str="unknown"; + END CASE; + + SELECT str; +END +</pre> + The actions are used to generate the following code: +<pre> +SHOW PROCEDURE CODE proc_19194_simple; +Pos Instruction +0 set str@1 NULL +1 set_case_expr (12) 0 i@0 +2 jump_if_not 5(12) (case_expr@0 = 1) +3 set str@1 _latin1'1' +4 jump 12 +5 jump_if_not 8(12) (case_expr@0 = 2) +6 set str@1 _latin1'2' +7 jump 12 +8 jump_if_not 11(12) (case_expr@0 = 3) +9 set str@1 _latin1'3' +10 jump 12 +11 set str@1 _latin1'unknown' +12 stmt 0 "SELECT str" +</pre> + + @param lex the parser lex context +*/ + +void case_stmt_action_case(LEX *lex) +{ + lex->sphead->new_cont_backpatch(NULL); + + /* + BACKPATCH: Creating target label for the jump to + "case_stmt_action_end_case" + (Instruction 12 in the example) + */ + + lex->spcont->push_label((char *)"", lex->sphead->instructions()); +} + +/** + Helper action for a case expression statement (the expr in 'CASE expr'). + This helper is used for 'searched' cases only. + @param lex the parser lex context + @param expr the parsed expression + @return 0 on success +*/ + +int case_stmt_action_expr(LEX *lex, Item* expr) +{ + sp_head *sp= lex->sphead; + sp_pcontext *parsing_ctx= lex->spcont; + int case_expr_id= parsing_ctx->register_case_expr(); + sp_instr_set_case_expr *i; + + if (parsing_ctx->push_case_expr_id(case_expr_id)) + return 1; + + i= new sp_instr_set_case_expr(sp->instructions(), + parsing_ctx, case_expr_id, expr, lex); + + sp->add_cont_backpatch(i); + sp->add_instr(i); + + return 0; +} + +/** + Helper action for a case when condition. + This helper is used for both 'simple' and 'searched' cases. + @param lex the parser lex context + @param when the parsed expression for the WHEN clause + @param simple true for simple cases, false for searched cases +*/ + +void case_stmt_action_when(LEX *lex, Item *when, bool simple) +{ + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump_if_not *i; + Item_case_expr *var; + Item *expr; + + if (simple) + { + var= new Item_case_expr(ctx->get_current_case_expr_id()); + +#ifndef DBUG_OFF + if (var) + { + var->m_sp= sp; + } +#endif + + expr= new Item_func_eq(var, when); + i= new sp_instr_jump_if_not(ip, ctx, expr, lex); + } + else + i= new sp_instr_jump_if_not(ip, ctx, when, lex); + + /* + BACKPATCH: Registering forward jump from + "case_stmt_action_when" to "case_stmt_action_then" + (jump_if_not from instruction 2 to 5, 5 to 8 ... in the example) + */ + + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + sp->add_cont_backpatch(i); + sp->add_instr(i); +} + +/** + Helper action for a case then statements. + This helper is used for both 'simple' and 'searched' cases. + @param lex the parser lex context +*/ + +void case_stmt_action_then(LEX *lex) +{ + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump *i = new sp_instr_jump(ip, ctx); + sp->add_instr(i); + + /* + BACKPATCH: Resolving forward jump from + "case_stmt_action_when" to "case_stmt_action_then" + (jump_if_not from instruction 2 to 5, 5 to 8 ... in the example) + */ + + sp->backpatch(ctx->pop_label()); + + /* + BACKPATCH: Registering forward jump from + "case_stmt_action_then" to "case_stmt_action_end_case" + (jump from instruction 4 to 12, 7 to 12 ... in the example) + */ + + sp->push_backpatch(i, ctx->last_label()); +} + +/** + Helper action for an end case. + This helper is used for both 'simple' and 'searched' cases. + @param lex the parser lex context + @param simple true for simple cases, false for searched cases +*/ + +void case_stmt_action_end_case(LEX *lex, bool simple) +{ + /* + BACKPATCH: Resolving forward jump from + "case_stmt_action_then" to "case_stmt_action_end_case" + (jump from instruction 4 to 12, 7 to 12 ... in the example) + */ + lex->sphead->backpatch(lex->spcont->pop_label()); + + if (simple) + lex->spcont->pop_case_expr_id(); + + lex->sphead->do_cont_backpatch(); +} + +/** + Helper to resolve the SQL:2003 Syntax exception 1) in <in predicate>. + See SQL:2003, Part 2, section 8.4 <in predicate>, Note 184, page 383. + This function returns the proper item for the SQL expression + <code>left [NOT] IN ( expr )</code> + @param thd the current thread + @param left the in predicand + @param equal true for IN predicates, false for NOT IN predicates + @param expr first and only expression of the in value list + @return an expression representing the IN predicate. +*/ +Item* handle_sql2003_note184_exception(THD *thd, Item* left, bool equal, + Item *expr) +{ + /* + Relevant references for this issue: + - SQL:2003, Part 2, section 8.4 <in predicate>, page 383, + - SQL:2003, Part 2, section 7.2 <row value expression>, page 296, + - SQL:2003, Part 2, section 6.3 <value expression primary>, page 174, + - SQL:2003, Part 2, section 7.15 <subquery>, page 370, + - SQL:2003 Feature F561, "Full value expressions". + + The exception in SQL:2003 Note 184 means: + Item_singlerow_subselect, which corresponds to a <scalar subquery>, + should be re-interpreted as an Item_in_subselect, which corresponds + to a <table subquery> when used inside an <in predicate>. + + Our reading of Note 184 is reccursive, so that all: + - IN (( <subquery> )) + - IN ((( <subquery> ))) + - IN '('^N <subquery> ')'^N + - etc + should be interpreted as a <table subquery>, no matter how deep in the + expression the <subquery> is. + */ + + Item *result; + + DBUG_ENTER("handle_sql2003_note184_exception"); + + if (expr->type() == Item::SUBSELECT_ITEM) + { + Item_subselect *expr2 = (Item_subselect*) expr; + + if (expr2->substype() == Item_subselect::SINGLEROW_SUBS) + { + Item_singlerow_subselect *expr3 = (Item_singlerow_subselect*) expr2; + st_select_lex *subselect; + + /* + Implement the mandated change, by altering the semantic tree: + left IN Item_singlerow_subselect(subselect) + is modified to + left IN (subselect) + which is represented as + Item_in_subselect(left, subselect) + */ + subselect= expr3->invalidate_and_restore_select_lex(); + result= new (thd->mem_root) Item_in_subselect(left, subselect); + + if (! equal) + result = negate_expression(thd, result); + + DBUG_RETURN(result); + } + } + + if (equal) + result= new (thd->mem_root) Item_func_eq(left, expr); + else + result= new (thd->mem_root) Item_func_ne(left, expr); + + DBUG_RETURN(result); } %} @@ -72,6 +430,7 @@ inline Item *or_or_concat(THD *thd, Item* A, Item* B) udf_func *udf; LEX_USER *lex_user; struct sys_var_with_base variable; + enum enum_var_type var_type; Key::Keytype key_type; enum ha_key_alg key_alg; enum db_type db_type; @@ -82,10 +441,14 @@ inline Item *or_or_concat(THD *thd, Item* A, Item* B) enum Item_udftype udf_type; CHARSET_INFO *charset; thr_lock_type lock_type; - interval_type interval; + interval_type interval, interval_time_st; timestamp_type date_time_type; st_select_lex *select_lex; chooser_compare_func_creator boolfunc2creator; + struct sp_cond_type *spcondtype; + struct { int vars, conds, hndlrs, curs; } spblock; + sp_name *spname; + struct st_lex *lex; } %{ @@ -93,496 +456,562 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %} %pure_parser /* We have threads */ - -%token END_OF_INPUT - -%token CLOSE_SYM -%token HANDLER_SYM -%token LAST_SYM -%token NEXT_SYM -%token PREV_SYM - -%token DIV_SYM -%token EQ -%token EQUAL_SYM -%token SOUNDS_SYM -%token GE -%token GT_SYM -%token LE -%token LT -%token NE -%token IS -%token MOD_SYM -%token SHIFT_LEFT -%token SHIFT_RIGHT -%token SET_VAR - -%token ABORT_SYM -%token ADD -%token AFTER_SYM -%token ALTER -%token ANALYZE_SYM -%token ANY_SYM -%token AVG_SYM -%token BEGIN_SYM -%token BINLOG_SYM -%token CHANGE -%token CLIENT_SYM -%token COMMENT_SYM -%token COMMIT_SYM +/* + Currently there is 251 shift/reduce conflict. We should not introduce + new conflicts any more. +*/ +%expect 251 + +%token END_OF_INPUT + +%token ABORT_SYM +%token ACTION +%token ADD +%token ADDDATE_SYM +%token AFTER_SYM +%token AGAINST +%token AGGREGATE_SYM +%token ALGORITHM_SYM +%token ALL +%token ALTER +%token ANALYZE_SYM +%token AND_AND_SYM +%token AND_SYM +%token ANY_SYM +%token AS +%token ASC +%token ASCII_SYM +%token ASENSITIVE_SYM +%token ATAN +%token AUTO_INC +%token AVG_ROW_LENGTH +%token AVG_SYM +%token BACKUP_SYM +%token BEFORE_SYM +%token BEGIN_SYM +%token BENCHMARK_SYM +%token BERKELEY_DB_SYM +%token BIGINT +%token BINARY +%token BINLOG_SYM +%token BIN_NUM +%token BIT_AND +%token BIT_OR +%token BIT_SYM +%token BIT_XOR +%token BLOB_SYM +%token BOOLEAN_SYM +%token BOOL_SYM +%token BOTH +%token BTREE_SYM +%token BY +%token BYTE_SYM +%token CACHE_SYM +%token CALL_SYM +%token CASCADE +%token CASCADED +%token CAST_SYM +%token CHAIN_SYM +%token CHANGE +%token CHANGED +%token CHARSET +%token CHAR_SYM +%token CHECKSUM_SYM +%token CHECK_SYM +%token CIPHER_SYM +%token CLIENT_SYM +%token CLOSE_SYM +%token COALESCE +%token CODE_SYM +%token COLLATE_SYM +%token COLLATION_SYM +%token COLUMNS +%token COLUMN_SYM +%token COMMENT_SYM +%token COMMITTED_SYM +%token COMMIT_SYM +%token COMPACT_SYM +%token COMPRESSED_SYM +%token CONCAT +%token CONCAT_WS +%token CONCURRENT +%token CONDITION_SYM +%token CONNECTION_SYM %token CONSISTENT_SYM -%token COUNT_SYM -%token CREATE -%token CROSS +%token CONSTRAINT +%token CONTAINS_SYM +%token CONTINUE_SYM +%token CONVERT_SYM +%token CONVERT_TZ_SYM +%token COUNT_SYM +%token CREATE +%token CROSS %token CUBE_SYM -%token DELETE_SYM -%token DUAL_SYM -%token DO_SYM -%token DROP -%token EVENTS_SYM -%token EXECUTE_SYM -%token EXPANSION_SYM -%token FLUSH_SYM -%token HELP_SYM -%token INSERT -%token RELAY_THREAD -%token KILL_SYM -%token LOAD -%token LOCKS_SYM -%token LOCK_SYM -%token MASTER_SYM -%token MAX_SYM -%token MIN_SYM -%token NONE_SYM -%token OPTIMIZE -%token PURGE -%token REPAIR -%token REPLICATION -%token RESET_SYM -%token ROLLBACK_SYM -%token ROLLUP_SYM -%token SAVEPOINT_SYM -%token SELECT_SYM -%token SHOW -%token SLAVE -%token SNAPSHOT_SYM -%token SQL_THREAD -%token START_SYM -%token STD_SYM -%token VARIANCE_SYM -%token STOP_SYM -%token SUM_SYM -%token ADDDATE_SYM -%token SUPER_SYM -%token TRUNCATE_SYM -%token UNLOCK_SYM -%token UNTIL_SYM -%token UPDATE_SYM - -%token ACTION -%token AGGREGATE_SYM -%token ALL -%token AND_SYM -%token AS -%token ASC -%token AUTO_INC -%token AVG_ROW_LENGTH -%token BACKUP_SYM -%token BERKELEY_DB_SYM -%token BINARY -%token BIT_SYM -%token BOOL_SYM -%token BOOLEAN_SYM -%token BOTH -%token BTREE_SYM -%token BY -%token BYTE_SYM -%token CACHE_SYM -%token CASCADE -%token CAST_SYM -%token CHARSET -%token CHECKSUM_SYM -%token CHECK_SYM -%token COMMITTED_SYM -%token COLLATE_SYM -%token COLLATION_SYM -%token COLUMNS -%token COLUMN_SYM -%token CONCURRENT -%token CONSTRAINT -%token CONVERT_SYM +%token CURDATE %token CURRENT_USER -%token DATABASES -%token DATA_SYM -%token DEFAULT -%token DELAYED_SYM -%token DELAY_KEY_WRITE_SYM -%token DESC -%token DESCRIBE -%token DES_KEY_FILE -%token DISABLE_SYM -%token DISCARD -%token DISTINCT +%token CURSOR_SYM +%token CURTIME +%token DATABASE +%token DATABASES +%token DATA_SYM +%token DATETIME +%token DATE_ADD_INTERVAL +%token DATE_SUB_INTERVAL +%token DATE_SYM +%token DAY_HOUR_SYM +%token DAY_MICROSECOND_SYM +%token DAY_MINUTE_SYM +%token DAY_SECOND_SYM +%token DAY_SYM +%token DEALLOCATE_SYM +%token DECIMAL_NUM +%token DECIMAL_SYM +%token DECLARE_SYM +%token DECODE_SYM +%token DEFAULT +%token DEFINER_SYM +%token DELAYED_SYM +%token DELAY_KEY_WRITE_SYM +%token DELETE_SYM +%token DESC +%token DESCRIBE +%token DES_DECRYPT_SYM +%token DES_ENCRYPT_SYM +%token DES_KEY_FILE +%token DETERMINISTIC_SYM +%token DIRECTORY_SYM +%token DISABLE_SYM +%token DISCARD +%token DISTINCT +%token DIV_SYM +%token DOUBLE_SYM +%token DO_SYM +%token DROP +%token DUAL_SYM +%token DUMPFILE %token DUPLICATE_SYM -%token DYNAMIC_SYM -%token ENABLE_SYM -%token ENCLOSED -%token ESCAPED -%token DIRECTORY_SYM -%token ESCAPE_SYM -%token EXISTS -%token EXTENDED_SYM -%token FALSE_SYM -%token FILE_SYM -%token FIRST_SYM -%token FIXED_SYM -%token FLOAT_NUM -%token FORCE_SYM -%token FOREIGN -%token FROM -%token FULL -%token FULLTEXT_SYM -%token GLOBAL_SYM -%token GRANT -%token GRANTS -%token GREATEST_SYM -%token GROUP -%token HAVING -%token HASH_SYM -%token HEX_NUM -%token HIGH_PRIORITY -%token HOSTS_SYM -%token IDENT -%token IDENT_QUOTED -%token IGNORE_SYM -%token IMPORT -%token INDEX_SYM -%token INDEXES -%token INFILE -%token INNER_SYM -%token INNOBASE_SYM -%token INTO -%token IN_SYM -%token ISOLATION -%token JOIN_SYM -%token KEYS -%token KEY_SYM -%token LEADING -%token LEAST_SYM -%token LEAVES -%token LEVEL_SYM -%token LEX_HOSTNAME -%token LIKE -%token LINES -%token LOCAL_SYM -%token LOG_SYM -%token LOGS_SYM -%token LONG_NUM -%token LONG_SYM -%token LOW_PRIORITY -%token MASTER_HOST_SYM -%token MASTER_USER_SYM -%token MASTER_LOG_FILE_SYM -%token MASTER_LOG_POS_SYM -%token MASTER_PASSWORD_SYM -%token MASTER_PORT_SYM -%token MASTER_CONNECT_RETRY_SYM -%token MASTER_SERVER_ID_SYM -%token MASTER_SSL_SYM -%token MASTER_SSL_CA_SYM -%token MASTER_SSL_CAPATH_SYM -%token MASTER_SSL_CERT_SYM -%token MASTER_SSL_CIPHER_SYM -%token MASTER_SSL_KEY_SYM -%token RELAY_LOG_FILE_SYM -%token RELAY_LOG_POS_SYM -%token MATCH -%token MAX_ROWS -%token MAX_CONNECTIONS_PER_HOUR -%token MAX_QUERIES_PER_HOUR -%token MAX_UPDATES_PER_HOUR -%token MEDIUM_SYM -%token MIN_ROWS -%token NAMES_SYM -%token NATIONAL_SYM -%token NATURAL -%token NDBCLUSTER_SYM -%token NEW_SYM -%token NCHAR_SYM -%token NCHAR_STRING -%token NVARCHAR_SYM -%token NOT -%token NO_SYM -%token NULL_SYM -%token NUM -%token OFFSET_SYM -%token ON -%token ONE_SHOT_SYM -%token OPEN_SYM -%token OPTION -%token OPTIONALLY -%token OR_SYM -%token OR_OR_CONCAT -%token ORDER_SYM -%token OUTER -%token OUTFILE -%token DUMPFILE -%token PACK_KEYS_SYM -%token PARTIAL -%token PRIMARY_SYM -%token PRIVILEGES -%token PROCESS -%token PROCESSLIST_SYM -%token QUERY_SYM -%token RAID_0_SYM -%token RAID_STRIPED_SYM -%token RAID_TYPE -%token RAID_CHUNKS -%token RAID_CHUNKSIZE -%token READ_SYM -%token REAL_NUM -%token REFERENCES -%token REGEXP -%token RELOAD -%token RENAME -%token REPEATABLE_SYM -%token REQUIRE_SYM -%token RESOURCES -%token RESTORE_SYM -%token RESTRICT -%token REVOKE -%token ROWS_SYM -%token ROW_FORMAT_SYM -%token ROW_SYM -%token RTREE_SYM -%token SET -%token SEPARATOR_SYM -%token SERIAL_SYM -%token SERIALIZABLE_SYM -%token SESSION_SYM -%token SIMPLE_SYM -%token SHUTDOWN -%token SPATIAL_SYM -%token SSL_SYM -%token STARTING -%token STATUS_SYM -%token STORAGE_SYM -%token STRAIGHT_JOIN -%token SUBJECT_SYM -%token TABLES -%token TABLE_SYM -%token TABLESPACE -%token TEMPORARY -%token TERMINATED -%token TEXT_STRING -%token TO_SYM -%token TRAILING -%token TRANSACTION_SYM -%token TRUE_SYM -%token TYPE_SYM -%token TYPES_SYM -%token FUNC_ARG0 -%token FUNC_ARG1 -%token FUNC_ARG2 -%token FUNC_ARG3 -%token UDF_RETURNS_SYM -%token UDF_SONAME_SYM -%token UDF_SYM -%token UNCOMMITTED_SYM -%token UNDERSCORE_CHARSET -%token UNICODE_SYM -%token UNION_SYM -%token UNIQUE_SYM -%token USAGE -%token USE_FRM -%token USE_SYM -%token USING -%token VALUE_SYM -%token VALUES -%token VARIABLES -%token WHERE -%token WITH -%token WRITE_SYM -%token NO_WRITE_TO_BINLOG -%token X509_SYM -%token XOR -%token COMPRESSED_SYM - +%token DYNAMIC_SYM +%token EACH_SYM +%token ELSEIF_SYM +%token ELT_FUNC +%token ENABLE_SYM +%token ENCLOSED +%token ENCODE_SYM +%token ENCRYPT +%token END +%token ENGINES_SYM +%token ENGINE_SYM +%token ENUM +%token EQ +%token EQUAL_SYM %token ERRORS -%token WARNINGS - -%token ASCII_SYM -%token BIGINT -%token BLOB_SYM -%token CHAR_SYM -%token CHANGED -%token COALESCE -%token DATETIME -%token DATE_SYM -%token DECIMAL_SYM -%token DOUBLE_SYM -%token ENUM -%token FAST_SYM -%token FLOAT_SYM -%token GEOMETRY_SYM -%token INT_SYM -%token LIMIT -%token LONGBLOB -%token LONGTEXT -%token MEDIUMBLOB -%token MEDIUMINT -%token MEDIUMTEXT -%token NUMERIC_SYM -%token PRECISION -%token PREPARE_SYM -%token DEALLOCATE_SYM -%token QUICK -%token REAL -%token SIGNED_SYM -%token SMALLINT -%token STRING_SYM -%token TEXT_SYM -%token TIMESTAMP -%token TIME_SYM -%token TINYBLOB -%token TINYINT -%token TINYTEXT -%token ULONGLONG_NUM -%token UNSIGNED -%token VARBINARY -%token VARCHAR -%token VARYING -%token ZEROFILL - -%token ADDDATE_SYM -%token AGAINST -%token ATAN -%token BETWEEN_SYM -%token BIT_AND -%token BIT_OR -%token BIT_XOR -%token CASE_SYM -%token CONCAT -%token CONCAT_WS -%token CONVERT_TZ_SYM -%token CURDATE -%token CURTIME -%token DATABASE -%token DATE_ADD_INTERVAL -%token DATE_SUB_INTERVAL -%token DAY_HOUR_SYM -%token DAY_MICROSECOND_SYM -%token DAY_MINUTE_SYM -%token DAY_SECOND_SYM -%token DAY_SYM -%token DECODE_SYM -%token DES_ENCRYPT_SYM -%token DES_DECRYPT_SYM -%token ELSE -%token ELT_FUNC -%token ENCODE_SYM -%token ENGINE_SYM -%token ENGINES_SYM -%token ENCRYPT -%token EXPORT_SET -%token EXTRACT_SYM -%token FIELD_FUNC -%token FORMAT_SYM -%token FOR_SYM -%token FROM_UNIXTIME -%token GEOMCOLLFROMTEXT -%token GEOMFROMTEXT -%token GEOMFROMWKB +%token ESCAPED +%token ESCAPE_SYM +%token EVENTS_SYM +%token EXECUTE_SYM +%token EXISTS +%token EXIT_SYM +%token EXPANSION_SYM +%token EXPORT_SET +%token EXTENDED_SYM +%token EXTRACT_SYM +%token FALSE_SYM +%token FAST_SYM +%token FETCH_SYM +%token FIELD_FUNC +%token FILE_SYM +%token FIRST_SYM +%token FIXED_SYM +%token FLOAT_NUM +%token FLOAT_SYM +%token FLUSH_SYM +%token FORCE_SYM +%token FOREIGN +%token FORMAT_SYM +%token FOR_SYM +%token FOUND_SYM +%token FRAC_SECOND_SYM +%token FROM +%token FROM_UNIXTIME +%token FULL +%token FULLTEXT_SYM +%token FUNCTION_SYM +%token FUNC_ARG0 +%token FUNC_ARG1 +%token FUNC_ARG2 +%token FUNC_ARG3 +%token GE +%token GEOMCOLLFROMTEXT %token GEOMETRYCOLLECTION -%token GROUP_CONCAT_SYM -%token GROUP_UNIQUE_USERS +%token GEOMETRY_SYM +%token GEOMFROMTEXT +%token GEOMFROMWKB %token GET_FORMAT -%token HOUR_MICROSECOND_SYM -%token HOUR_MINUTE_SYM -%token HOUR_SECOND_SYM -%token HOUR_SYM -%token IDENTIFIED_SYM -%token IF -%token INSERT_METHOD -%token INTERVAL_SYM -%token LAST_INSERT_ID -%token LEFT -%token LINEFROMTEXT +%token GLOBAL_SYM +%token GRANT +%token GRANTS +%token GREATEST_SYM +%token GROUP +%token GROUP_CONCAT_SYM +%token GROUP_UNIQUE_USERS +%token GT_SYM +%token HANDLER_SYM +%token HASH_SYM +%token HAVING +%token HELP_SYM +%token HEX_NUM +%token HIGH_PRIORITY +%token HOSTS_SYM +%token HOUR_MICROSECOND_SYM +%token HOUR_MINUTE_SYM +%token HOUR_SECOND_SYM +%token HOUR_SYM +%token IDENT +%token IDENTIFIED_SYM +%token IDENT_QUOTED +%token IF +%token IGNORE_SYM +%token IMPORT +%token INDEXES +%token INDEX_SYM +%token INFILE +%token INNER_SYM +%token INNOBASE_SYM +%token INOUT_SYM +%token INSENSITIVE_SYM +%token INSERT +%token INSERT_METHOD +%token INTERVAL_SYM +%token INTO +%token INT_SYM +%token INVOKER_SYM +%token IN_SYM +%token IS +%token ISOLATION +%token ISSUER_SYM +%token ITERATE_SYM +%token JOIN_SYM +%token KEYS +%token KEY_SYM +%token KILL_SYM +%token LABEL_SYM +%token LANGUAGE_SYM +%token LAST_INSERT_ID +%token LAST_SYM +%token LE +%token LEADING +%token LEAST_SYM +%token LEAVES +%token LEAVE_SYM +%token LEFT +%token LEVEL_SYM +%token LEX_HOSTNAME +%token LIKE +%token LIMIT +%token LINEFROMTEXT +%token LINES %token LINESTRING -%token LOCATE -%token MAKE_SET_SYM -%token MASTER_POS_WAIT +%token LOAD +%token LOCAL_SYM +%token LOCATE +%token LOCATOR_SYM +%token LOCKS_SYM +%token LOCK_SYM +%token LOGS_SYM +%token LOG_SYM +%token LONGBLOB +%token LONGTEXT +%token LONG_NUM +%token LONG_SYM +%token LOOP_SYM +%token LOW_PRIORITY +%token LT +%token MAKE_SET_SYM +%token MASTER_CONNECT_RETRY_SYM +%token MASTER_HOST_SYM +%token MASTER_LOG_FILE_SYM +%token MASTER_LOG_POS_SYM +%token MASTER_PASSWORD_SYM +%token MASTER_PORT_SYM +%token MASTER_POS_WAIT +%token MASTER_SERVER_ID_SYM +%token MASTER_SSL_CAPATH_SYM +%token MASTER_SSL_CA_SYM +%token MASTER_SSL_CERT_SYM +%token MASTER_SSL_CIPHER_SYM +%token MASTER_SSL_KEY_SYM +%token MASTER_SSL_SYM +%token MASTER_SYM +%token MASTER_USER_SYM +%token MATCH +%token MAX_CONNECTIONS_PER_HOUR +%token MAX_QUERIES_PER_HOUR +%token MAX_ROWS +%token MAX_SYM +%token MAX_UPDATES_PER_HOUR +%token MAX_USER_CONNECTIONS_SYM +%token MEDIUMBLOB +%token MEDIUMINT +%token MEDIUMTEXT +%token MEDIUM_SYM +%token MERGE_SYM %token MICROSECOND_SYM -%token MINUTE_MICROSECOND_SYM -%token MINUTE_SECOND_SYM -%token MINUTE_SYM -%token MODE_SYM -%token MODIFY_SYM -%token MONTH_SYM -%token MLINEFROMTEXT -%token MPOINTFROMTEXT -%token MPOLYFROMTEXT +%token MIGRATE_SYM +%token MINUTE_MICROSECOND_SYM +%token MINUTE_SECOND_SYM +%token MINUTE_SYM +%token MIN_ROWS +%token MIN_SYM +%token MLINEFROMTEXT +%token MODE_SYM +%token MODIFIES_SYM +%token MODIFY_SYM +%token MOD_SYM +%token MONTH_SYM +%token MPOINTFROMTEXT +%token MPOLYFROMTEXT %token MULTILINESTRING %token MULTIPOINT %token MULTIPOLYGON -%token NOW_SYM -%token OLD_PASSWORD -%token PASSWORD +%token MUTEX_SYM +%token NAMES_SYM +%token NAME_SYM +%token NATIONAL_SYM +%token NATURAL +%token NCHAR_STRING +%token NCHAR_SYM +%token NDBCLUSTER_SYM +%token NE +%token NEW_SYM +%token NEXT_SYM +%token NONE_SYM +%token NOT2_SYM +%token NOT_SYM +%token NOW_SYM +%token NO_SYM +%token NO_WRITE_TO_BINLOG +%token NULL_SYM +%token NUM +%token NUMERIC_SYM +%token NVARCHAR_SYM +%token OFFSET_SYM +%token OJ_SYM +%token OLD_PASSWORD +%token ON +%token ONE_SHOT_SYM +%token ONE_SYM +%token OPEN_SYM +%token OPTIMIZE +%token OPTION +%token OPTIONALLY +%token OR2_SYM +%token ORDER_SYM +%token OR_OR_SYM +%token OR_SYM +%token OUTER +%token OUTFILE +%token OUT_SYM +%token PACK_KEYS_SYM +%token PARTIAL +%token PASSWORD %token PARAM_MARKER -%token POINTFROMTEXT -%token POINT_SYM -%token POLYFROMTEXT +%token PHASE_SYM +%token POINTFROMTEXT +%token POINT_SYM +%token POLYFROMTEXT %token POLYGON -%token POSITION_SYM -%token PROCEDURE -%token RAND -%token REPLACE -%token RIGHT -%token ROUND -%token SECOND_SYM -%token SECOND_MICROSECOND_SYM -%token SHARE_SYM -%token SUBDATE_SYM -%token SUBSTRING -%token SUBSTRING_INDEX -%token TRIM -%token UDA_CHAR_SUM -%token UDA_FLOAT_SUM -%token UDA_INT_SUM -%token UDF_CHAR_FUNC -%token UDF_FLOAT_FUNC -%token UDF_INT_FUNC -%token UNIQUE_USERS -%token UNIX_TIMESTAMP -%token USER -%token UTC_DATE_SYM -%token UTC_TIME_SYM -%token UTC_TIMESTAMP_SYM -%token WEEK_SYM -%token WHEN_SYM -%token WORK_SYM -%token YEAR_MONTH_SYM -%token YEAR_SYM -%token YEARWEEK -%token BENCHMARK_SYM -%token END -%token THEN_SYM - -%token SQL_BIG_RESULT -%token SQL_CACHE_SYM -%token SQL_CALC_FOUND_ROWS -%token SQL_NO_CACHE_SYM -%token SQL_SMALL_RESULT +%token POSITION_SYM +%token PRECISION +%token PREPARE_SYM +%token PREV_SYM +%token PRIMARY_SYM +%token PRIVILEGES +%token PROCEDURE +%token PROCESS +%token PROCESSLIST_SYM +%token PURGE +%token QUARTER_SYM +%token QUERY_SYM +%token QUICK +%token RAID_0_SYM +%token RAID_CHUNKS +%token RAID_CHUNKSIZE +%token RAID_STRIPED_SYM +%token RAID_TYPE +%token RAND +%token READS_SYM +%token READ_SYM +%token REAL +%token RECOVER_SYM +%token REDUNDANT_SYM +%token REFERENCES +%token REGEXP +%token RELAY_LOG_FILE_SYM +%token RELAY_LOG_POS_SYM +%token RELAY_THREAD +%token RELEASE_SYM +%token RELOAD +%token RENAME +%token REPAIR +%token REPEATABLE_SYM +%token REPEAT_SYM +%token REPLACE +%token REPLICATION +%token REQUIRE_SYM +%token RESET_SYM +%token RESOURCES +%token RESTORE_SYM +%token RESTRICT +%token RESUME_SYM +%token RETURNS_SYM +%token RETURN_SYM +%token REVOKE +%token RIGHT +%token ROLLBACK_SYM +%token ROLLUP_SYM +%token ROUND +%token ROUTINE_SYM +%token ROWS_SYM +%token ROW_COUNT_SYM +%token ROW_FORMAT_SYM +%token ROW_SYM +%token RTREE_SYM +%token SAVEPOINT_SYM +%token SECOND_MICROSECOND_SYM +%token SECOND_SYM +%token SECURITY_SYM +%token SELECT_SYM +%token SENSITIVE_SYM +%token SEPARATOR_SYM +%token SERIALIZABLE_SYM +%token SERIAL_SYM +%token SESSION_SYM +%token SET +%token SET_VAR +%token SHARE_SYM +%token SHIFT_LEFT +%token SHIFT_RIGHT +%token SHOW +%token SHUTDOWN +%token SIGNED_SYM +%token SIMPLE_SYM +%token SLAVE +%token SMALLINT +%token SNAPSHOT_SYM +%token SOUNDS_SYM +%token SPATIAL_SYM +%token SPECIFIC_SYM +%token SQLEXCEPTION_SYM +%token SQLSTATE_SYM +%token SQLWARNING_SYM +%token SQL_BIG_RESULT %token SQL_BUFFER_RESULT - -%token ISSUER_SYM +%token SQL_CACHE_SYM +%token SQL_CALC_FOUND_ROWS +%token SQL_NO_CACHE_SYM +%token SQL_SMALL_RESULT +%token SQL_SYM +%token SQL_THREAD +%token SSL_SYM +%token STARTING +%token START_SYM +%token STATUS_SYM +%token STD_SYM +%token STDDEV_SAMP_SYM +%token STOP_SYM +%token STORAGE_SYM +%token STRAIGHT_JOIN +%token STRING_SYM +%token SUBDATE_SYM %token SUBJECT_SYM -%token CIPHER_SYM - -%token BEFORE_SYM +%token SUBSTRING +%token SUBSTRING_INDEX +%token SUM_SYM +%token SUPER_SYM +%token SUSPEND_SYM +%token SYSDATE +%token TABLES +%token TABLESPACE +%token TABLE_SYM +%token TEMPORARY +%token TEMPTABLE_SYM +%token TERMINATED +%token TEXT_STRING +%token TEXT_SYM +%token TIMESTAMP +%token TIMESTAMP_ADD +%token TIMESTAMP_DIFF +%token TIME_SYM +%token TINYBLOB +%token TINYINT +%token TINYTEXT +%token TO_SYM +%token TRAILING +%token TRANSACTION_SYM +%token TRIGGER_SYM +%token TRIGGERS_SYM +%token TRIM +%token TRUE_SYM +%token TRUNCATE_SYM +%token TYPES_SYM +%token TYPE_SYM +%token UDF_RETURNS_SYM +%token UDF_SONAME_SYM +%token ULONGLONG_NUM +%token UNCOMMITTED_SYM +%token UNDEFINED_SYM +%token UNDERSCORE_CHARSET +%token UNDO_SYM +%token UNICODE_SYM +%token UNION_SYM +%token UNIQUE_SYM +%token UNIQUE_USERS +%token UNIX_TIMESTAMP +%token UNKNOWN_SYM +%token UNLOCK_SYM +%token UNSIGNED +%token UNTIL_SYM +%token UPDATE_SYM +%token UPGRADE_SYM +%token USAGE +%token USER +%token USE_FRM +%token USE_SYM +%token USING +%token UTC_DATE_SYM +%token UTC_TIMESTAMP_SYM +%token UTC_TIME_SYM +%token VAR_SAMP_SYM +%token VALUES +%token VALUE_SYM +%token VARBINARY +%token VARCHAR +%token VARIABLES +%token VARIANCE_SYM +%token VARYING +%token VIEW_SYM +%token WARNINGS +%token WEEK_SYM +%token WHEN_SYM +%token WHERE +%token WHILE_SYM +%token WITH +%token WORK_SYM +%token WRITE_SYM +%token X509_SYM +%token XA_SYM +%token XOR +%token YEARWEEK +%token YEAR_MONTH_SYM +%token YEAR_SYM +%token ZEROFILL + +%left JOIN_SYM INNER_SYM STRAIGHT_JOIN CROSS LEFT RIGHT +/* A dummy token to force the priority of table_ref production in a join. */ +%left TABLE_REF_PRIORITY %left SET_VAR -%left OR_OR_CONCAT OR_SYM XOR -%left AND_SYM +%left OR_OR_SYM OR_SYM OR2_SYM XOR +%left AND_SYM AND_AND_SYM %left BETWEEN_SYM CASE_SYM WHEN_SYM THEN_SYM ELSE %left EQ EQUAL_SYM GE GT_SYM LE LT NE IS LIKE REGEXP IN_SYM %left '|' @@ -592,20 +1021,21 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %left '*' '/' '%' DIV_SYM MOD_SYM %left '^' %left NEG '~' -%right NOT +%right NOT_SYM NOT2_SYM %right BINARY COLLATE_SYM %type <lex_str> - IDENT IDENT_QUOTED TEXT_STRING REAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM + IDENT IDENT_QUOTED TEXT_STRING DECIMAL_NUM FLOAT_NUM NUM LONG_NUM HEX_NUM LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal NCHAR_STRING opt_component key_cache_name + sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem %type <lex_str_ptr> opt_table_alias %type <table> - table_ident table_ident_nodb references + table_ident table_ident_nodb references xid %type <simple_string> remember_name remember_end opt_ident opt_db text_or_password @@ -615,15 +1045,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); text_string opt_gconcat_separator %type <num> - type int_type real_type order_dir opt_field_spec lock_option + type int_type real_type order_dir lock_option udf_type if_exists opt_local opt_table_options table_options - table_option opt_if_not_exists opt_no_write_to_binlog opt_var_type - opt_var_ident_type delete_option opt_temporary all_or_any opt_distinct + table_option opt_if_not_exists opt_no_write_to_binlog + delete_option opt_temporary all_or_any opt_distinct opt_ignore_leaves fulltext_options spatial_type union_option - start_transaction_opts + start_transaction_opts opt_chain opt_release + union_opt select_derived_init option_type2 %type <ulong_num> - ULONG_NUM raid_types merge_insert_types + ulong_num raid_types merge_insert_types %type <ulonglong_number> ulonglong_num @@ -634,18 +1065,25 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <item> literal text_literal insert_ident order_ident simple_ident select_item2 expr opt_expr opt_else sum_expr in_sum_expr - table_wild no_in_expr expr_expr simple_expr no_and_expr - using_list expr_or_default set_expr_or_default interval_expr - param_marker singlerow_subselect singlerow_subselect_init - exists_subselect exists_subselect_init geometry_function + variable variable_aux bool_term bool_factor bool_test bool_pri + predicate bit_expr bit_term bit_factor value_expr term factor + table_wild simple_expr udf_expr + expr_or_default set_expr_or_default interval_expr + param_marker geometry_function signed_literal now_or_signed_literal opt_escape + sp_opt_default + simple_ident_nospvar simple_ident_q + field_or_var limit_option %type <item_num> NUM_literal %type <item_list> - expr_list udf_expr_list udf_sum_expr_list when_list ident_list - ident_list_arg + expr_list udf_expr_list udf_expr_list2 when_list + ident_list ident_list_arg opt_expr_list + +%type <var_type> + option_type opt_var_type opt_var_ident_type %type <key_type> key_type opt_unique_or_fulltext constraint_key_type @@ -654,21 +1092,21 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); key_alg opt_btree_or_rtree %type <string_list> - key_usage_list + key_usage_list using_list %type <key_part> key_part %type <table_list> join_table_list join_table - -%type <udf> - UDF_CHAR_FUNC UDF_FLOAT_FUNC UDF_INT_FUNC - UDA_CHAR_SUM UDA_FLOAT_SUM UDA_INT_SUM + table_factor table_ref + select_derived derived_table_list %type <date_time_type> date_time_type; %type <interval> interval +%type <interval_time_st> interval_time_st + %type <db_type> storage_engines %type <row_type> row_types @@ -681,7 +1119,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <udf_type> udf_func_type -%type <symbol> FUNC_ARG0 FUNC_ARG1 FUNC_ARG2 FUNC_ARG3 keyword +%type <symbol> FUNC_ARG0 FUNC_ARG1 FUNC_ARG2 FUNC_ARG3 keyword keyword_sp %type <lex_user> user grant_user @@ -693,10 +1131,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); old_or_new_charset_name_or_default collation_name collation_name_or_default + opt_load_data_charset %type <variable> internal_variable_name -%type <select_lex> in_subselect in_subselect_init +%type <select_lex> subselect subselect_init + get_select_lex %type <boolfunc2creator> comp_op @@ -704,7 +1144,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); query verb_clause create change select do drop insert replace insert2 insert_values update delete truncate rename show describe load alter optimize keycache preload flush - reset purge begin commit rollback savepoint + reset purge begin commit rollback savepoint release slave master_def master_defs master_file_def slave_until_opts repair restore backup analyze check start checksum field_list field_list_item field_spec kill column_def key_def @@ -712,7 +1152,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); select_item_list select_item values_list no_braces opt_limit_clause delete_limit_clause fields opt_values values procedure_list procedure_list2 procedure_item - when_list2 expr_list2 handler + expr_list2 udf_expr_list3 handler opt_precision opt_ignore opt_column opt_restrict grant revoke set lock unlock string_list field_options field_option field_opt_list opt_binary table_lock_list table_lock @@ -720,9 +1160,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); opt_delete_options opt_delete_option varchar nchar nvarchar opt_outer table_list table_name opt_option opt_place opt_attribute opt_attribute_list attribute column_list column_list_id - opt_column_list grant_privileges opt_table user_list grant_option - grant_privilege grant_privilege_list - flush_options flush_option + opt_column_list grant_privileges grant_ident grant_list grant_option + object_privilege object_privilege_list user_list rename_list + clear_privileges flush_options flush_option equal optional_braces opt_key_definition key_usage_list2 opt_mi_check_type opt_to mi_check_types normal_join table_to_table_list table_to_table opt_table_list opt_as @@ -732,13 +1172,28 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); precision subselect_start opt_and charset subselect_end select_var_list select_var_list_init help opt_len opt_extended_describe - prepare prepare_src execute deallocate + prepare prepare_src execute deallocate + statement sp_suid + sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic xa + load_data opt_field_or_var_spec fields_or_vars opt_load_data_set_spec + definer view_replace_or_algorithm view_replace view_algorithm_opt + view_algorithm view_or_trigger_or_sp view_or_trigger_or_sp_tail + view_suid view_tail view_list_opt view_list view_select + view_check_option trigger_tail sp_tail + case_stmt_specification simple_case_stmt searched_case_stmt END_OF_INPUT +%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt +%type <num> sp_decl_idents sp_opt_inout sp_handler_type sp_hcond_list +%type <spcondtype> sp_cond sp_hcond +%type <spblock> sp_decls sp_decl +%type <lex> sp_cursor_stmt +%type <spname> sp_name + %type <NONE> '-' '+' '*' '/' '%' '(' ')' - ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_CONCAT BETWEEN_SYM CASE_SYM - THEN_SYM WHEN_SYM DIV_SYM MOD_SYM + ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM + THEN_SYM WHEN_SYM DIV_SYM MOD_SYM OR2_SYM AND_AND_SYM %% @@ -749,8 +1204,8 @@ query: if (!thd->bootstrap && (!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) { - send_error(thd,ER_EMPTY_QUERY); - YYABORT; + my_message(ER_EMPTY_QUERY, ER(ER_EMPTY_QUERY), MYF(0)); + MYSQL_YYABORT; } else { @@ -760,10 +1215,16 @@ query: | verb_clause END_OF_INPUT {}; verb_clause: + statement + | begin + ; + +/* Verb clauses, except begin */ +statement: alter | analyze | backup - | begin + | call | change | check | checksum @@ -788,6 +1249,7 @@ verb_clause: | preload | prepare | purge + | release | rename | repair | replace @@ -805,6 +1267,7 @@ verb_clause: | unlock | update | use + | xa ; deallocate: @@ -812,10 +1275,10 @@ deallocate: { THD *thd=YYTHD; LEX *lex= thd->lex; - if (thd->command == COM_PREPARE) + if (lex->stmt_prepare_mode) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_DEALLOCATE_PREPARE; lex->prepared_stmt_name= $3; @@ -832,10 +1295,10 @@ prepare: { THD *thd=YYTHD; LEX *lex= thd->lex; - if (thd->command == COM_PREPARE) + if (lex->stmt_prepare_mode) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_PREPARE; lex->prepared_stmt_name= $2; @@ -862,10 +1325,10 @@ execute: { THD *thd=YYTHD; LEX *lex= thd->lex; - if (thd->command == COM_PREPARE) + if (lex->stmt_prepare_mode) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } lex->sql_command= SQLCOM_EXECUTE; lex->prepared_stmt_name= $2; @@ -889,18 +1352,26 @@ execute_var_ident: '@' ident_or_text LEX *lex=Lex; LEX_STRING *lexstr= (LEX_STRING*)sql_memdup(&$2, sizeof(LEX_STRING)); if (!lexstr || lex->prepared_stmt_params.push_back(lexstr)) - YYABORT; + MYSQL_YYABORT; } ; /* help */ help: - HELP_SYM ident_or_text + HELP_SYM + { + if (Lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "HELP"); + MYSQL_YYABORT; + } + } + ident_or_text { LEX *lex= Lex; lex->sql_command= SQLCOM_HELP; - lex->help_arg= $2.str; + lex->help_arg= $3.str; }; /* change master */ @@ -936,16 +1407,16 @@ master_def: Lex->mi.password = $3.str; } | - MASTER_PORT_SYM EQ ULONG_NUM + MASTER_PORT_SYM EQ ulong_num { Lex->mi.port = $3; } | - MASTER_CONNECT_RETRY_SYM EQ ULONG_NUM + MASTER_CONNECT_RETRY_SYM EQ ulong_num { Lex->mi.connect_retry = $3; } - | MASTER_SSL_SYM EQ ULONG_NUM + | MASTER_SSL_SYM EQ ulong_num { Lex->mi.ssl= $3 ? LEX_MASTER_INFO::SSL_ENABLE : LEX_MASTER_INFO::SSL_DISABLE; @@ -999,7 +1470,7 @@ master_file_def: { Lex->mi.relay_log_name = $3.str; } - | RELAY_LOG_POS_SYM EQ ULONG_NUM + | RELAY_LOG_POS_SYM EQ ulong_num { Lex->mi.relay_log_pos = $3; /* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */ @@ -1020,7 +1491,7 @@ create: (using_update_log ? TL_READ_NO_INSERT: TL_READ))) - YYABORT; + MYSQL_YYABORT; lex->alter_info.reset(); lex->col_list.empty(); lex->change=NullS; @@ -1038,9 +1509,8 @@ create: lex->sql_command= SQLCOM_CREATE_INDEX; if (!lex->current_select->add_table_to_list(lex->thd, $7, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; lex->alter_info.reset(); - lex->alter_info.is_simple= 0; lex->alter_info.flags= ALTER_ADD_INDEX; lex->col_list.empty(); lex->change=NullS; @@ -1065,19 +1535,1233 @@ create: lex->name=$4.str; lex->create_info.options=$3; } - | CREATE udf_func_type UDF_SYM IDENT_sys + | CREATE + { + Lex->create_view_mode= VIEW_CREATE_NEW; + Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; + Lex->create_view_suid= TRUE; + } + view_or_trigger_or_sp + {} + | CREATE USER clear_privileges grant_list + { + Lex->sql_command = SQLCOM_CREATE_USER; + } + ; + +clear_privileges: + /* Nothing */ + { + LEX *lex=Lex; + lex->users_list.empty(); + lex->columns.empty(); + lex->grant= lex->grant_tot_col= 0; + lex->all_privileges= 0; + lex->select_lex.db= 0; + lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; + lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; + bzero((char *)&(lex->mqh),sizeof(lex->mqh)); + } + ; + +sp_name: + ident '.' ident + { + if (!$1.str || check_db_name($1.str)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), $1.str); + MYSQL_YYABORT; + } + if (check_routine_name($3)) + { + my_error(ER_SP_WRONG_NAME, MYF(0), $3.str); + MYSQL_YYABORT; + } + $$= new sp_name($1, $3); + $$->init_qname(YYTHD); + } + | ident + { + THD *thd= YYTHD; + LEX_STRING db; + if (check_routine_name($1)) + { + my_error(ER_SP_WRONG_NAME, MYF(0), $1.str); + MYSQL_YYABORT; + } + if (thd->copy_db_to(&db.str, &db.length)) + MYSQL_YYABORT; + $$= new sp_name(db, $1); + if ($$) + $$->init_qname(YYTHD); + } + ; + +create_function_tail: + RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING_sys { LEX *lex=Lex; + if (lex->definer != NULL) + { + /* + DEFINER is a concept meaningful when interpreting SQL code. + UDF functions are compiled. + Using DEFINER with UDF has therefore no semantic, + and is considered a parsing error. + */ + my_error(ER_WRONG_USAGE, MYF(0), "SONAME", "DEFINER"); + MYSQL_YYABORT; + } lex->sql_command = SQLCOM_CREATE_FUNCTION; - lex->udf.name = $4; - lex->udf.type= $2; + lex->udf.name = lex->spname->m_name; + lex->udf.returns=(Item_result) $2; + lex->udf.dl=$4.str; } - UDF_RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING_sys + | '(' { - LEX *lex=Lex; - lex->udf.returns=(Item_result) $7; - lex->udf.dl=$9.str; + LEX *lex= Lex; + sp_head *sp; + + /* + First check if AGGREGATE was used, in that case it's a + syntax error. + */ + if (lex->udf.type == UDFTYPE_AGGREGATE) + { + my_error(ER_SP_NO_AGGREGATE, MYF(0)); + MYSQL_YYABORT; + } + + if (lex->sphead) + { + my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "FUNCTION"); + MYSQL_YYABORT; + } + /* Order is important here: new - reset - init */ + sp= new sp_head(); + sp->reset_thd_mem_root(YYTHD); + sp->init(lex); + sp->init_sp_name(YYTHD, lex->spname); + + sp->m_type= TYPE_ENUM_FUNCTION; + lex->sphead= sp; + /* + * We have to turn of CLIENT_MULTI_QUERIES while parsing a + * stored procedure, otherwise yylex will chop it into pieces + * at each ';'. + */ + sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES; + YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES; + lex->sphead->m_param_begin= lex->tok_start+1; + } + sp_fdparam_list ')' + { + LEX *lex= Lex; + + lex->sphead->m_param_end= lex->tok_start; + } + RETURNS_SYM + { + LEX *lex= Lex; + lex->charset= NULL; + lex->length= lex->dec= NULL; + lex->interval_list.empty(); + lex->type= 0; + } + type + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + if (sp->fill_field_definition(YYTHD, lex, + (enum enum_field_types) $8, + &sp->m_return_field_def)) + MYSQL_YYABORT; + + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_c_chistics + { + LEX *lex= Lex; + + lex->sphead->m_chistics= &lex->sp_chistics; + lex->sphead->m_body_begin= lex->tok_start; + } + sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + if (sp->is_not_allowed_in_function("function")) + MYSQL_YYABORT; + + lex->sql_command= SQLCOM_CREATE_SPFUNCTION; + sp->init_strings(YYTHD, lex); + if (!(sp->m_flags & sp_head::HAS_RETURN)) + { + my_error(ER_SP_NORETURN, MYF(0), sp->m_qname.str); + MYSQL_YYABORT; + } + /* Restore flag if it was cleared above */ + if (sp->m_old_cmq) + YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES; + sp->restore_thd_mem_root(YYTHD); + } + ; + +sp_a_chistics: + /* Empty */ {} + | sp_a_chistics sp_chistic {} + ; + +sp_c_chistics: + /* Empty */ {} + | sp_c_chistics sp_c_chistic {} + ; + +/* Characteristics for both create and alter */ +sp_chistic: + COMMENT_SYM TEXT_STRING_sys + { Lex->sp_chistics.comment= $2; } + | LANGUAGE_SYM SQL_SYM + { /* Just parse it, we only have one language for now. */ } + | NO_SYM SQL_SYM + { Lex->sp_chistics.daccess= SP_NO_SQL; } + | CONTAINS_SYM SQL_SYM + { Lex->sp_chistics.daccess= SP_CONTAINS_SQL; } + | READS_SYM SQL_SYM DATA_SYM + { Lex->sp_chistics.daccess= SP_READS_SQL_DATA; } + | MODIFIES_SYM SQL_SYM DATA_SYM + { Lex->sp_chistics.daccess= SP_MODIFIES_SQL_DATA; } + | sp_suid + { } + ; + +/* Create characteristics */ +sp_c_chistic: + sp_chistic { } + | DETERMINISTIC_SYM { Lex->sp_chistics.detistic= TRUE; } + | not DETERMINISTIC_SYM { Lex->sp_chistics.detistic= FALSE; } + ; + +sp_suid: + SQL_SYM SECURITY_SYM DEFINER_SYM + { + Lex->sp_chistics.suid= SP_IS_SUID; + } + | SQL_SYM SECURITY_SYM INVOKER_SYM + { + Lex->sp_chistics.suid= SP_IS_NOT_SUID; + } + ; + +call: + CALL_SYM sp_name + { + LEX *lex = Lex; + + lex->sql_command= SQLCOM_CALL; + lex->spname= $2; + lex->value_list.empty(); + sp_add_used_routine(lex, YYTHD, $2, TYPE_ENUM_PROCEDURE); + } + opt_sp_cparam_list {} + ; + +/* CALL parameters */ +opt_sp_cparam_list: + /* Empty */ + | '(' opt_sp_cparams ')' + ; + +opt_sp_cparams: + /* Empty */ + | sp_cparams + ; + +sp_cparams: + sp_cparams ',' expr + { + Lex->value_list.push_back($3); + } + | expr + { + Lex->value_list.push_back($1); + } + ; + +/* Stored FUNCTION parameter declaration list */ +sp_fdparam_list: + /* Empty */ + | sp_fdparams + ; + +sp_fdparams: + sp_fdparams ',' sp_fdparam + | sp_fdparam + ; + +sp_init_param: + /* Empty */ + { + LEX *lex= Lex; + + lex->length= 0; + lex->dec= 0; + lex->type= 0; + + lex->default_value= 0; + lex->on_update_value= 0; + + lex->comment= null_lex_str; + lex->charset= NULL; + + lex->interval_list.empty(); + lex->uint_geom_type= 0; + } + ; + +sp_fdparam: + ident sp_init_param type + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_variable(&$1, TRUE)) + { + my_error(ER_SP_DUP_PARAM, MYF(0), $1.str); + MYSQL_YYABORT; + } + sp_variable_t *spvar= spc->push_variable(&$1, + (enum enum_field_types)$3, + sp_param_in); + + if (lex->sphead->fill_field_definition(YYTHD, lex, + (enum enum_field_types) $3, + &spvar->field_def)) + { + MYSQL_YYABORT; + } + spvar->field_def.field_name= spvar->name.str; + spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL; + } + ; + +/* Stored PROCEDURE parameter declaration list */ +sp_pdparam_list: + /* Empty */ + | sp_pdparams + ; + +sp_pdparams: + sp_pdparams ',' sp_pdparam + | sp_pdparam + ; + +sp_pdparam: + sp_opt_inout sp_init_param ident type + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_variable(&$3, TRUE)) + { + my_error(ER_SP_DUP_PARAM, MYF(0), $3.str); + MYSQL_YYABORT; + } + sp_variable_t *spvar= spc->push_variable(&$3, + (enum enum_field_types)$4, + (sp_param_mode_t)$1); + + if (lex->sphead->fill_field_definition(YYTHD, lex, + (enum enum_field_types) $4, + &spvar->field_def)) + { + MYSQL_YYABORT; + } + spvar->field_def.field_name= spvar->name.str; + spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL; + } + ; + +sp_opt_inout: + /* Empty */ { $$= sp_param_in; } + | IN_SYM { $$= sp_param_in; } + | OUT_SYM { $$= sp_param_out; } + | INOUT_SYM { $$= sp_param_inout; } + ; + +sp_proc_stmts: + /* Empty */ {} + | sp_proc_stmts sp_proc_stmt ';' + ; + +sp_proc_stmts1: + sp_proc_stmt ';' {} + | sp_proc_stmts1 sp_proc_stmt ';' + ; + +sp_decls: + /* Empty */ + { + $$.vars= $$.conds= $$.hndlrs= $$.curs= 0; + } + | sp_decls sp_decl ';' + { + /* We check for declarations out of (standard) order this way + because letting the grammar rules reflect it caused tricky + shift/reduce conflicts with the wrong result. (And we get + better error handling this way.) */ + if (($2.vars || $2.conds) && ($1.curs || $1.hndlrs)) + { /* Variable or condition following cursor or handler */ + my_message(ER_SP_VARCOND_AFTER_CURSHNDLR, + ER(ER_SP_VARCOND_AFTER_CURSHNDLR), MYF(0)); + MYSQL_YYABORT; + } + if ($2.curs && $1.hndlrs) + { /* Cursor following handler */ + my_message(ER_SP_CURSOR_AFTER_HANDLER, + ER(ER_SP_CURSOR_AFTER_HANDLER), MYF(0)); + MYSQL_YYABORT; + } + $$.vars= $1.vars + $2.vars; + $$.conds= $1.conds + $2.conds; + $$.hndlrs= $1.hndlrs + $2.hndlrs; + $$.curs= $1.curs + $2.curs; + } + ; + +sp_decl: + DECLARE_SYM sp_decl_idents + { + LEX *lex= Lex; + + lex->sphead->reset_lex(YYTHD); + lex->spcont->declare_var_boundary($2); + } + type + sp_opt_default + { + LEX *lex= Lex; + sp_pcontext *pctx= lex->spcont; + uint num_vars= pctx->context_var_count(); + enum enum_field_types var_type= (enum enum_field_types) $4; + Item *dflt_value_item= $5; + + if (!dflt_value_item) + { + dflt_value_item= new Item_null(); + /* QQ Set to the var_type with null_value? */ + } + + for (uint i = num_vars-$2 ; i < num_vars ; i++) + { + uint var_idx= pctx->var_context2runtime(i); + sp_variable_t *spvar= pctx->find_variable(var_idx); + + if (!spvar) + MYSQL_YYABORT; + + spvar->type= var_type; + spvar->dflt= dflt_value_item; + + if (lex->sphead->fill_field_definition(YYTHD, lex, var_type, + &spvar->field_def)) + { + MYSQL_YYABORT; + } + + spvar->field_def.field_name= spvar->name.str; + spvar->field_def.pack_flag |= FIELDFLAG_MAYBE_NULL; + + /* The last instruction is responsible for freeing LEX. */ + + lex->sphead->add_instr( + new sp_instr_set(lex->sphead->instructions(), pctx, var_idx, + dflt_value_item, var_type, lex, + (i == num_vars - 1))); + } + + pctx->declare_var_boundary(0); + lex->sphead->restore_lex(YYTHD); + + $$.vars= $2; + $$.conds= $$.hndlrs= $$.curs= 0; + } + | DECLARE_SYM ident CONDITION_SYM FOR_SYM sp_cond + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_cond(&$2, TRUE)) + { + my_error(ER_SP_DUP_COND, MYF(0), $2.str); + MYSQL_YYABORT; + } + YYTHD->lex->spcont->push_cond(&$2, $5); + $$.vars= $$.hndlrs= $$.curs= 0; + $$.conds= 1; + } + | DECLARE_SYM sp_handler_type HANDLER_SYM FOR_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + lex->spcont= lex->spcont->push_context(LABEL_HANDLER_SCOPE); + + sp_pcontext *ctx= lex->spcont; + sp_instr_hpush_jump *i= + new sp_instr_hpush_jump(sp->instructions(), ctx, $2, + ctx->current_var_count()); + + sp->add_instr(i); + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + } + sp_hcond_list sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_label_t *hlab= lex->spcont->pop_label(); /* After this hdlr */ + sp_instr_hreturn *i; + + if ($2 == SP_HANDLER_CONTINUE) + { + i= new sp_instr_hreturn(sp->instructions(), ctx, + ctx->current_var_count()); + sp->add_instr(i); + } + else + { /* EXIT or UNDO handler, just jump to the end of the block */ + i= new sp_instr_hreturn(sp->instructions(), ctx, 0); + + sp->add_instr(i); + sp->push_backpatch(i, lex->spcont->last_label()); /* Block end */ + } + lex->sphead->backpatch(hlab); + + lex->spcont= ctx->pop_context(); + + $$.vars= $$.conds= $$.curs= 0; + $$.hndlrs= $6; + lex->spcont->add_handlers($6); + } + | DECLARE_SYM ident CURSOR_SYM FOR_SYM sp_cursor_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + uint offp; + sp_instr_cpush *i; + + if (ctx->find_cursor(&$2, &offp, TRUE)) + { + my_error(ER_SP_DUP_CURS, MYF(0), $2.str); + delete $5; + MYSQL_YYABORT; + } + i= new sp_instr_cpush(sp->instructions(), ctx, $5, + ctx->current_cursor_count()); + sp->add_instr(i); + ctx->push_cursor(&$2); + $$.vars= $$.conds= $$.hndlrs= 0; + $$.curs= 1; + } + ; + +sp_cursor_stmt: + { + Lex->sphead->reset_lex(YYTHD); + + /* We use statement here just be able to get a better + error message. Using 'select' works too, but will then + result in a generic "syntax error" if a non-select + statement is given. */ + } + statement + { + LEX *lex= Lex; + + if (lex->sql_command != SQLCOM_SELECT) + { + my_message(ER_SP_BAD_CURSOR_QUERY, ER(ER_SP_BAD_CURSOR_QUERY), + MYF(0)); + MYSQL_YYABORT; + } + if (lex->result) + { + my_message(ER_SP_BAD_CURSOR_SELECT, ER(ER_SP_BAD_CURSOR_SELECT), + MYF(0)); + MYSQL_YYABORT; + } + lex->sp_lex_in_use= TRUE; + $$= lex; + lex->sphead->restore_lex(YYTHD); + } + ; + +sp_handler_type: + EXIT_SYM { $$= SP_HANDLER_EXIT; } + | CONTINUE_SYM { $$= SP_HANDLER_CONTINUE; } +/* | UNDO_SYM { QQ No yet } */ + ; + +sp_hcond_list: + sp_hcond_element + { $$= 1; } + | sp_hcond_list ',' sp_hcond_element + { $$+= 1; } + ; + +sp_hcond_element: + sp_hcond + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont->parent_context(); + + if (ctx->find_handler($1)) + { + my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0)); + MYSQL_YYABORT; + } + else + { + sp_instr_hpush_jump *i= + (sp_instr_hpush_jump *)sp->last_instruction(); + + i->add_condition($1); + ctx->push_handler($1); + } + } + ; + +sp_cond: + ulong_num + { /* mysql errno */ + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::number; + $$->mysqlerr= $1; + } + | SQLSTATE_SYM opt_value TEXT_STRING_literal + { /* SQLSTATE */ + if (!sp_cond_check(&$3)) + { + my_error(ER_SP_BAD_SQLSTATE, MYF(0), $3.str); + MYSQL_YYABORT; + } + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::state; + memcpy($$->sqlstate, $3.str, 5); + $$->sqlstate[5]= '\0'; + } + ; + +opt_value: + /* Empty */ {} + | VALUE_SYM {} + ; + +sp_hcond: + sp_cond + { + $$= $1; + } + | ident /* CONDITION name */ + { + $$= Lex->spcont->find_cond(&$1); + if ($$ == NULL) + { + my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str); + MYSQL_YYABORT; + } + } + | SQLWARNING_SYM /* SQLSTATEs 01??? */ + { + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::warning; + } + | not FOUND_SYM /* SQLSTATEs 02??? */ + { + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::notfound; + } + | SQLEXCEPTION_SYM /* All other SQLSTATEs */ + { + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::exception; + } + ; + +sp_decl_idents: + ident + { + /* NOTE: field definition is filled in sp_decl section. */ + + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_variable(&$1, TRUE)) + { + my_error(ER_SP_DUP_VAR, MYF(0), $1.str); + MYSQL_YYABORT; + } + spc->push_variable(&$1, (enum_field_types)0, sp_param_in); + $$= 1; + } + | sp_decl_idents ',' ident + { + /* NOTE: field definition is filled in sp_decl section. */ + + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_variable(&$3, TRUE)) + { + my_error(ER_SP_DUP_VAR, MYF(0), $3.str); + MYSQL_YYABORT; + } + spc->push_variable(&$3, (enum_field_types)0, sp_param_in); + $$= $1 + 1; + } + ; + +sp_opt_default: + /* Empty */ { $$ = NULL; } + | DEFAULT expr { $$ = $2; } + ; + +sp_proc_stmt: + { + LEX *lex= Lex; + + lex->sphead->reset_lex(YYTHD); + lex->sphead->m_tmp_query= lex->tok_start; + } + statement + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + sp->m_flags|= sp_get_flags_for_command(lex); + if (lex->sql_command == SQLCOM_CHANGE_DB) + { /* "USE db" doesn't work in a procedure */ + my_error(ER_SP_BADSTATEMENT, MYF(0), "USE"); + MYSQL_YYABORT; + } + /* + Don't add an instruction for SET statements, since all + instructions for them were already added during processing + of "set" rule. + */ + DBUG_ASSERT(lex->sql_command != SQLCOM_SET_OPTION || + lex->var_list.is_empty()); + if (lex->sql_command != SQLCOM_SET_OPTION) + { + sp_instr_stmt *i=new sp_instr_stmt(sp->instructions(), + lex->spcont, lex); + + /* + Extract the query statement from the tokenizer. The + end is either lex->ptr, if there was no lookahead, + lex->tok_end otherwise. + */ + if (yychar == YYEMPTY) + i->m_query.length= lex->ptr - sp->m_tmp_query; + else + i->m_query.length= lex->tok_end - sp->m_tmp_query; + i->m_query.str= strmake_root(YYTHD->mem_root, + (char *)sp->m_tmp_query, + i->m_query.length); + sp->add_instr(i); + } + sp->restore_lex(YYTHD); + } + | RETURN_SYM + { Lex->sphead->reset_lex(YYTHD); } + expr + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + if (sp->m_type != TYPE_ENUM_FUNCTION) + { + my_message(ER_SP_BADRETURN, ER(ER_SP_BADRETURN), MYF(0)); + MYSQL_YYABORT; + } + else + { + sp_instr_freturn *i; + + i= new sp_instr_freturn(sp->instructions(), lex->spcont, $3, + sp->m_return_field_def.sql_type, lex); + sp->add_instr(i); + sp->m_flags|= sp_head::HAS_RETURN; + } + sp->restore_lex(YYTHD); + } + | IF + { Lex->sphead->new_cont_backpatch(NULL); } + sp_if END IF + { Lex->sphead->do_cont_backpatch(); } + | case_stmt_specification + | sp_labeled_control + {} + | { /* Unlabeled controls get a secret label. */ + LEX *lex= Lex; + + lex->spcont->push_label((char *)"", lex->sphead->instructions()); + } + sp_unlabeled_control + { + LEX *lex= Lex; + + lex->sphead->backpatch(lex->spcont->pop_label()); + } + | LEAVE_SYM label_ident + { + LEX *lex= Lex; + sp_head *sp = lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_label_t *lab= ctx->find_label($2.str); + + if (! lab) + { + my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "LEAVE", $2.str); + MYSQL_YYABORT; + } + else + { + sp_instr_jump *i; + uint ip= sp->instructions(); + uint n; + + n= ctx->diff_handlers(lab->ctx, TRUE); /* Exclusive the dest. */ + if (n) + sp->add_instr(new sp_instr_hpop(ip++, ctx, n)); + n= ctx->diff_cursors(lab->ctx, TRUE); /* Exclusive the dest. */ + if (n) + sp->add_instr(new sp_instr_cpop(ip++, ctx, n)); + i= new sp_instr_jump(ip, ctx); + sp->push_backpatch(i, lab); /* Jumping forward */ + sp->add_instr(i); + } + } + | ITERATE_SYM label_ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_label_t *lab= ctx->find_label($2.str); + + if (! lab || lab->type != SP_LAB_ITER) + { + my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "ITERATE", $2.str); + MYSQL_YYABORT; + } + else + { + sp_instr_jump *i; + uint ip= sp->instructions(); + uint n; + + n= ctx->diff_handlers(lab->ctx, FALSE); /* Inclusive the dest. */ + if (n) + sp->add_instr(new sp_instr_hpop(ip++, ctx, n)); + n= ctx->diff_cursors(lab->ctx, FALSE); /* Inclusive the dest. */ + if (n) + sp->add_instr(new sp_instr_cpop(ip++, ctx, n)); + i= new sp_instr_jump(ip, ctx, lab->ip); /* Jump back */ + sp->add_instr(i); + } } + | OPEN_SYM ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint offset; + sp_instr_copen *i; + + if (! lex->spcont->find_cursor(&$2, &offset)) + { + my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); + MYSQL_YYABORT; + } + i= new sp_instr_copen(sp->instructions(), lex->spcont, offset); + sp->add_instr(i); + } + | FETCH_SYM sp_opt_fetch_noise ident INTO + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint offset; + sp_instr_cfetch *i; + + if (! lex->spcont->find_cursor(&$3, &offset)) + { + my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $3.str); + MYSQL_YYABORT; + } + i= new sp_instr_cfetch(sp->instructions(), lex->spcont, offset); + sp->add_instr(i); + } + sp_fetch_list + { } + | CLOSE_SYM ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint offset; + sp_instr_cclose *i; + + if (! lex->spcont->find_cursor(&$2, &offset)) + { + my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); + MYSQL_YYABORT; + } + i= new sp_instr_cclose(sp->instructions(), lex->spcont, offset); + sp->add_instr(i); + } + ; + +sp_opt_fetch_noise: + /* Empty */ + | NEXT_SYM FROM + | FROM + ; + +sp_fetch_list: + ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *spc= lex->spcont; + sp_variable_t *spv; + + if (!spc || !(spv = spc->find_variable(&$1))) + { + my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); + MYSQL_YYABORT; + } + else + { + /* An SP local variable */ + sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction(); + + i->add_to_varlist(spv); + } + } + | + sp_fetch_list ',' ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *spc= lex->spcont; + sp_variable_t *spv; + + if (!spc || !(spv = spc->find_variable(&$3))) + { + my_error(ER_SP_UNDECLARED_VAR, MYF(0), $3.str); + MYSQL_YYABORT; + } + else + { + /* An SP local variable */ + sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction(); + + i->add_to_varlist(spv); + } + } + ; + +sp_if: + { Lex->sphead->reset_lex(YYTHD); } + expr THEN_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, ctx, + $2, lex); + + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + sp->add_cont_backpatch(i); + sp->add_instr(i); + sp->restore_lex(YYTHD); + } + sp_proc_stmts1 + { + sp_head *sp= Lex->sphead; + sp_pcontext *ctx= Lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump *i = new sp_instr_jump(ip, ctx); + + sp->add_instr(i); + sp->backpatch(ctx->pop_label()); + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + } + sp_elseifs + { + LEX *lex= Lex; + + lex->sphead->backpatch(lex->spcont->pop_label()); + } + ; + +sp_elseifs: + /* Empty */ + | ELSEIF_SYM sp_if + | ELSE sp_proc_stmts1 + ; + +case_stmt_specification: + simple_case_stmt + | searched_case_stmt + ; + +simple_case_stmt: + CASE_SYM + { + LEX *lex= Lex; + case_stmt_action_case(lex); + lex->sphead->reset_lex(YYTHD); /* For expr $3 */ + } + expr + { + LEX *lex= Lex; + if (case_stmt_action_expr(lex, $3)) + MYSQL_YYABORT; + + lex->sphead->restore_lex(YYTHD); /* For expr $3 */ + } + simple_when_clause_list + else_clause_opt + END + CASE_SYM + { + LEX *lex= Lex; + case_stmt_action_end_case(lex, true); + } + ; + +searched_case_stmt: + CASE_SYM + { + LEX *lex= Lex; + case_stmt_action_case(lex); + } + searched_when_clause_list + else_clause_opt + END + CASE_SYM + { + LEX *lex= Lex; + case_stmt_action_end_case(lex, false); + } + ; + +simple_when_clause_list: + simple_when_clause + | simple_when_clause_list simple_when_clause + ; + +searched_when_clause_list: + searched_when_clause + | searched_when_clause_list searched_when_clause + ; + +simple_when_clause: + WHEN_SYM + { + Lex->sphead->reset_lex(YYTHD); /* For expr $3 */ + } + expr + { + /* Simple case: <caseval> = <whenval> */ + + LEX *lex= Lex; + case_stmt_action_when(lex, $3, true); + lex->sphead->restore_lex(YYTHD); /* For expr $3 */ + } + THEN_SYM + sp_proc_stmts1 + { + LEX *lex= Lex; + case_stmt_action_then(lex); + } + ; + +searched_when_clause: + WHEN_SYM + { + Lex->sphead->reset_lex(YYTHD); /* For expr $3 */ + } + expr + { + LEX *lex= Lex; + case_stmt_action_when(lex, $3, false); + lex->sphead->restore_lex(YYTHD); /* For expr $3 */ + } + THEN_SYM + sp_proc_stmts1 + { + LEX *lex= Lex; + case_stmt_action_then(lex); + } + ; + +else_clause_opt: + /* empty */ + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint ip= sp->instructions(); + sp_instr_error *i= new sp_instr_error(ip, lex->spcont, + ER_SP_CASE_NOT_FOUND); + sp->add_instr(i); + } + | ELSE sp_proc_stmts1 + ; + +sp_labeled_control: + label_ident ':' + { + LEX *lex= Lex; + sp_pcontext *ctx= lex->spcont; + sp_label_t *lab= ctx->find_label($1.str); + + if (lab) + { + my_error(ER_SP_LABEL_REDEFINE, MYF(0), $1.str); + MYSQL_YYABORT; + } + else + { + lab= lex->spcont->push_label($1.str, + lex->sphead->instructions()); + lab->type= SP_LAB_ITER; + } + } + sp_unlabeled_control sp_opt_label + { + LEX *lex= Lex; + + if ($5.str) + { + sp_label_t *lab= lex->spcont->find_label($5.str); + + if (!lab || + my_strcasecmp(system_charset_info, $5.str, lab->name) != 0) + { + my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str); + MYSQL_YYABORT; + } + } + lex->sphead->backpatch(lex->spcont->pop_label()); + } + ; + +sp_opt_label: + /* Empty */ { $$= null_lex_str; } + | label_ident { $$= $1; } + ; + +sp_unlabeled_control: + BEGIN_SYM + { /* QQ This is just a dummy for grouping declarations and statements + together. No [[NOT] ATOMIC] yet, and we need to figure out how + make it coexist with the existing BEGIN COMMIT/ROLLBACK. */ + LEX *lex= Lex; + sp_label_t *lab= lex->spcont->last_label(); + + lab->type= SP_LAB_BEGIN; + lex->spcont= lex->spcont->push_context(LABEL_DEFAULT_SCOPE); + } + sp_decls + sp_proc_stmts + END + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + + sp->backpatch(ctx->last_label()); /* We always have a label */ + if ($3.hndlrs) + sp->add_instr(new sp_instr_hpop(sp->instructions(), ctx, + $3.hndlrs)); + if ($3.curs) + sp->add_instr(new sp_instr_cpop(sp->instructions(), ctx, + $3.curs)); + lex->spcont= ctx->pop_context(); + } + | LOOP_SYM + sp_proc_stmts1 END LOOP_SYM + { + LEX *lex= Lex; + uint ip= lex->sphead->instructions(); + sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip); + + lex->sphead->add_instr(i); + } + | WHILE_SYM + { Lex->sphead->reset_lex(YYTHD); } + expr DO_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint ip= sp->instructions(); + sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont, + $3, lex); + + /* Jumping forward */ + sp->push_backpatch(i, lex->spcont->last_label()); + sp->new_cont_backpatch(i); + sp->add_instr(i); + sp->restore_lex(YYTHD); + } + sp_proc_stmts1 END WHILE_SYM + { + LEX *lex= Lex; + uint ip= lex->sphead->instructions(); + sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip); + + lex->sphead->add_instr(i); + lex->sphead->do_cont_backpatch(); + } + | REPEAT_SYM sp_proc_stmts1 UNTIL_SYM + { Lex->sphead->reset_lex(YYTHD); } + expr END REPEAT_SYM + { + LEX *lex= Lex; + uint ip= lex->sphead->instructions(); + sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont, + $5, lab->ip, + lex); + lex->sphead->add_instr(i); + lex->sphead->restore_lex(YYTHD); + /* We can shortcut the cont_backpatch here */ + i->m_cont_dest= ip+1; + } + ; + +trg_action_time: + BEFORE_SYM + { Lex->trg_chistics.action_time= TRG_ACTION_BEFORE; } + | AFTER_SYM + { Lex->trg_chistics.action_time= TRG_ACTION_AFTER; } + ; + +trg_event: + INSERT + { Lex->trg_chistics.event= TRG_EVENT_INSERT; } + | UPDATE_SYM + { Lex->trg_chistics.event= TRG_EVENT_UPDATE; } + | DELETE_SYM + { Lex->trg_chistics.event= TRG_EVENT_DELETE; } ; create2: @@ -1086,14 +2770,26 @@ create2: | LIKE table_ident { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->name= (char *)$2)) - YYABORT; + MYSQL_YYABORT; + if ($2->db.str == NULL && + thd->copy_db_to(&($2->db.str), &($2->db.length))) + { + MYSQL_YYABORT; + } } | '(' LIKE table_ident ')' { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->name= (char *)$3)) - YYABORT; + MYSQL_YYABORT; + if ($3->db.str == NULL && + thd->copy_db_to(&($3->db.str), &($3->db.length))) + { + MYSQL_YYABORT; + } } ; @@ -1119,6 +2815,10 @@ create_select: lex->sql_command= SQLCOM_INSERT_SELECT; else if (lex->sql_command == SQLCOM_REPLACE) lex->sql_command= SQLCOM_REPLACE_SELECT; + /* + The following work only with the local list, the global list + is created correctly in this case + */ lex->current_select->table_list.save_and_clear(&lex->save_list); mysql_init_select(lex); lex->current_select->parsing_place= SELECT_LIST; @@ -1128,7 +2828,13 @@ create_select: Select->parsing_place= NO_MATTER; } opt_select_from - { Lex->current_select->table_list.push_front(&Lex->save_list); } + { + /* + The following work only with the local list, the global list + is created correctly in this case + */ + Lex->current_select->table_list.push_front(&Lex->save_list); + } ; opt_as: @@ -1160,7 +2866,7 @@ table_option: opt_if_not_exists: /* empty */ { $$= 0; } - | IF NOT EXISTS { $$=HA_LEX_CREATE_IF_NOT_EXISTS; }; + | IF not EXISTS { $$=HA_LEX_CREATE_IF_NOT_EXISTS; }; opt_create_table_options: /* empty */ @@ -1176,22 +2882,53 @@ create_table_options: | create_table_option ',' create_table_options; create_table_option: - ENGINE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; } - | TYPE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; WARN_DEPRECATED("TYPE=storage_engine","ENGINE=storage_engine"); } + ENGINE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; } + | TYPE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; WARN_DEPRECATED("TYPE=storage_engine","ENGINE=storage_engine"); Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; } | MAX_ROWS opt_equal ulonglong_num { Lex->create_info.max_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MAX_ROWS;} | MIN_ROWS opt_equal ulonglong_num { Lex->create_info.min_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MIN_ROWS;} - | AVG_ROW_LENGTH opt_equal ULONG_NUM { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;} - | PASSWORD opt_equal TEXT_STRING_sys { Lex->create_info.password=$3.str; } - | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3.str; } + | AVG_ROW_LENGTH opt_equal ulong_num { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;} + | PASSWORD opt_equal TEXT_STRING_sys { Lex->create_info.password=$3.str; Lex->create_info.used_fields|= HA_CREATE_USED_PASSWORD; } + | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3; Lex->create_info.used_fields|= HA_CREATE_USED_COMMENT; } | AUTO_INC opt_equal ulonglong_num { Lex->create_info.auto_increment_value=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AUTO;} - | PACK_KEYS_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_PACK_KEYS : HA_OPTION_NO_PACK_KEYS; Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} - | PACK_KEYS_SYM opt_equal DEFAULT { Lex->create_info.table_options&= ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS); Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} - | CHECKSUM_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; } - | DELAY_KEY_WRITE_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; } - | ROW_FORMAT_SYM opt_equal row_types { Lex->create_info.row_type= $3; } - | RAID_TYPE opt_equal raid_types { Lex->create_info.raid_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} - | RAID_CHUNKS opt_equal ULONG_NUM { Lex->create_info.raid_chunks= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} - | RAID_CHUNKSIZE opt_equal ULONG_NUM { Lex->create_info.raid_chunksize= $3*RAID_BLOCK_SIZE; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} + | PACK_KEYS_SYM opt_equal ulong_num + { + switch($3) { + case 0: + Lex->create_info.table_options|= HA_OPTION_NO_PACK_KEYS; + break; + case 1: + Lex->create_info.table_options|= HA_OPTION_PACK_KEYS; + break; + default: + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS; + } + | PACK_KEYS_SYM opt_equal DEFAULT + { + Lex->create_info.table_options&= + ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS); + Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS; + } + | CHECKSUM_SYM opt_equal ulong_num { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; Lex->create_info.used_fields|= HA_CREATE_USED_CHECKSUM; } + | DELAY_KEY_WRITE_SYM opt_equal ulong_num { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; Lex->create_info.used_fields|= HA_CREATE_USED_DELAY_KEY_WRITE; } + | ROW_FORMAT_SYM opt_equal row_types { Lex->create_info.row_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ROW_FORMAT; } + | RAID_TYPE opt_equal raid_types + { + my_error(ER_WARN_DEPRECATED_SYNTAX, MYF(0), "RAID_TYPE", "PARTITION"); + MYSQL_YYABORT; + } + | RAID_CHUNKS opt_equal ulong_num + { + my_error(ER_WARN_DEPRECATED_SYNTAX, MYF(0), "RAID_CHUNKS", "PARTITION"); + MYSQL_YYABORT; + } + | RAID_CHUNKSIZE opt_equal ulong_num + { + my_error(ER_WARN_DEPRECATED_SYNTAX, MYF(0), "RAID_CHUNKSIZE", "PARTITION"); + MYSQL_YYABORT; + } | UNION_SYM opt_equal '(' table_list ')' { /* Move the union list to the merge_list */ @@ -1199,18 +2936,21 @@ create_table_option: TABLE_LIST *table_list= lex->select_lex.get_table_list(); lex->create_info.merge_list= lex->select_lex.table_list; lex->create_info.merge_list.elements--; - lex->create_info.merge_list.first= (byte*) (table_list->next); + lex->create_info.merge_list.first= + (byte*) (table_list->next_local); lex->select_lex.table_list.elements=1; - lex->select_lex.table_list.next= (byte**) &(table_list->next); - table_list->next=0; + lex->select_lex.table_list.next= + (byte**) &(table_list->next_local); + table_list->next_local= 0; lex->create_info.used_fields|= HA_CREATE_USED_UNION; } | default_charset | default_collation | INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;} - | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys - { Lex->create_info.data_file_name= $4.str; } - | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; }; + | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.data_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_DATADIR; } + | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_INDEXDIR; } + | CONNECTION_SYM opt_equal TEXT_STRING_sys { Lex->create_info.connect_string.str= $3.str; Lex->create_info.connect_string.length= $3.length; Lex->create_info.used_fields|= HA_CREATE_USED_CONNECTION; } + ; default_charset: opt_default charset opt_equal charset_name_or_default @@ -1220,10 +2960,10 @@ default_charset: cinfo->default_table_charset && $4 && !my_charset_same(cinfo->default_table_charset,$4)) { - net_printf(YYTHD, ER_CONFLICTING_DECLARATIONS, - "CHARACTER SET ", cinfo->default_table_charset->csname, - "CHARACTER SET ", $4->csname); - YYABORT; + my_error(ER_CONFLICTING_DECLARATIONS, MYF(0), + "CHARACTER SET ", cinfo->default_table_charset->csname, + "CHARACTER SET ", $4->csname); + MYSQL_YYABORT; } Lex->create_info.default_table_charset= $4; Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; @@ -1237,9 +2977,9 @@ default_collation: cinfo->default_table_charset && $4 && !my_charset_same(cinfo->default_table_charset,$4)) { - net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, - $4->name, cinfo->default_table_charset->csname); - YYABORT; + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $4->name, cinfo->default_table_charset->csname); + MYSQL_YYABORT; } Lex->create_info.default_table_charset= $4; Lex->create_info.used_fields|= HA_CREATE_USED_DEFAULT_CHARSET; @@ -1250,8 +2990,8 @@ storage_engines: { $$ = ha_resolve_by_name($1.str,$1.length); if ($$ == DB_TYPE_UNKNOWN) { - net_printf(YYTHD, ER_UNKNOWN_STORAGE_ENGINE, $1.str); - YYABORT; + my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str); + MYSQL_YYABORT; } }; @@ -1259,12 +2999,14 @@ row_types: DEFAULT { $$= ROW_TYPE_DEFAULT; } | FIXED_SYM { $$= ROW_TYPE_FIXED; } | DYNAMIC_SYM { $$= ROW_TYPE_DYNAMIC; } - | COMPRESSED_SYM { $$= ROW_TYPE_COMPRESSED; }; + | COMPRESSED_SYM { $$= ROW_TYPE_COMPRESSED; } + | REDUNDANT_SYM { $$= ROW_TYPE_REDUNDANT; } + | COMPACT_SYM { $$= ROW_TYPE_COMPACT; }; raid_types: RAID_STRIPED_SYM { $$= RAID_TYPE_0; } | RAID_0_SYM { $$= RAID_TYPE_0; } - | ULONG_NUM { $$=$1;}; + | ulong_num { $$=$1;}; merge_insert_types: NO_SYM { $$= MERGE_INSERT_DISABLED; } @@ -1282,6 +3024,7 @@ udf_func_type: udf_type: STRING_SYM {$$ = (int) STRING_RESULT; } | REAL {$$ = (int) REAL_RESULT; } + | DECIMAL_SYM {$$ = (int) DECIMAL_RESULT; } | INT_SYM {$$ = (int) INT_RESULT; }; field_list: @@ -1370,7 +3113,7 @@ field_spec: LEX *lex=Lex; lex->length=lex->dec=0; lex->type=0; lex->default_value= lex->on_update_value= 0; - lex->comment=0; + lex->comment=null_lex_str; lex->charset=NULL; } type opt_attribute @@ -1380,18 +3123,20 @@ field_spec: (enum enum_field_types) $3, lex->length,lex->dec,lex->type, lex->default_value, lex->on_update_value, - lex->comment, + &lex->comment, lex->change,&lex->interval_list,lex->charset, lex->uint_geom_type)) - YYABORT; + MYSQL_YYABORT; }; type: int_type opt_len field_options { $$=$1; } | real_type opt_precision field_options { $$=$1; } | FLOAT_SYM float_options field_options { $$=FIELD_TYPE_FLOAT; } - | BIT_SYM opt_len { Lex->length=(char*) "1"; - $$=FIELD_TYPE_TINY; } + | BIT_SYM { Lex->length= (char*) "1"; + $$=FIELD_TYPE_BIT; } + | BIT_SYM '(' NUM ')' { Lex->length= $3.str; + $$=FIELD_TYPE_BIT; } | BOOL_SYM { Lex->length=(char*) "1"; $$=FIELD_TYPE_TINY; } | BOOLEAN_SYM { Lex->length=(char*) "1"; @@ -1400,10 +3145,10 @@ type: $$=FIELD_TYPE_STRING; } | char opt_binary { Lex->length=(char*) "1"; $$=FIELD_TYPE_STRING; } - | nchar '(' NUM ')' { Lex->length=$3.str; + | nchar '(' NUM ')' opt_bin_mod { Lex->length=$3.str; $$=FIELD_TYPE_STRING; Lex->charset=national_charset_info; } - | nchar { Lex->length=(char*) "1"; + | nchar opt_bin_mod { Lex->length=(char*) "1"; $$=FIELD_TYPE_STRING; Lex->charset=national_charset_info; } | BINARY '(' NUM ')' { Lex->length=$3.str; @@ -1413,13 +3158,13 @@ type: Lex->charset=&my_charset_bin; $$=FIELD_TYPE_STRING; } | varchar '(' NUM ')' opt_binary { Lex->length=$3.str; - $$=FIELD_TYPE_VAR_STRING; } - | nvarchar '(' NUM ')' { Lex->length=$3.str; - $$=FIELD_TYPE_VAR_STRING; + $$= MYSQL_TYPE_VARCHAR; } + | nvarchar '(' NUM ')' opt_bin_mod { Lex->length=$3.str; + $$= MYSQL_TYPE_VARCHAR; Lex->charset=national_charset_info; } | VARBINARY '(' NUM ')' { Lex->length=$3.str; Lex->charset=&my_charset_bin; - $$=FIELD_TYPE_VAR_STRING; } + $$= MYSQL_TYPE_VARCHAR; } | YEAR_SYM opt_len field_options { $$=FIELD_TYPE_YEAR; } | DATE_SYM { $$=FIELD_TYPE_DATE; } | TIME_SYM { $$=FIELD_TYPE_TIME; } @@ -1441,18 +3186,18 @@ type: $$=FIELD_TYPE_TINY_BLOB; } | BLOB_SYM opt_len { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_BLOB; } - | spatial_type { + | spatial_type + { #ifdef HAVE_SPATIAL - Lex->charset=&my_charset_bin; - Lex->uint_geom_type= (uint)$1; - $$=FIELD_TYPE_GEOMETRY; + Lex->charset=&my_charset_bin; + Lex->uint_geom_type= (uint)$1; + $$=FIELD_TYPE_GEOMETRY; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, - sym_group_geom.needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); + MYSQL_YYABORT; #endif - } + } | MEDIUMBLOB { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_MEDIUM_BLOB; } | LONGBLOB { Lex->charset=&my_charset_bin; @@ -1465,11 +3210,11 @@ type: | MEDIUMTEXT opt_binary { $$=FIELD_TYPE_MEDIUM_BLOB; } | LONGTEXT opt_binary { $$=FIELD_TYPE_LONG_BLOB; } | DECIMAL_SYM float_options field_options - { $$=FIELD_TYPE_DECIMAL;} + { $$=FIELD_TYPE_NEWDECIMAL;} | NUMERIC_SYM float_options field_options - { $$=FIELD_TYPE_DECIMAL;} + { $$=FIELD_TYPE_NEWDECIMAL;} | FIXED_SYM float_options field_options - { $$=FIELD_TYPE_DECIMAL;} + { $$=FIELD_TYPE_NEWDECIMAL;} | ENUM {Lex->interval_list.empty();} '(' string_list ')' opt_binary { $$=FIELD_TYPE_ENUM; } | SET { Lex->interval_list.empty();} '(' string_list ')' opt_binary @@ -1486,7 +3231,9 @@ type: spatial_type: GEOMETRY_SYM { $$= Field::GEOM_GEOMETRY; } | GEOMETRYCOLLECTION { $$= Field::GEOM_GEOMETRYCOLLECTION; } - | POINT_SYM { $$= Field::GEOM_POINT; } + | POINT_SYM { Lex->length= (char*)"21"; + $$= Field::GEOM_POINT; + } | MULTIPOINT { $$= Field::GEOM_MULTIPOINT; } | LINESTRING { $$= Field::GEOM_LINESTRING; } | MULTILINESTRING { $$= Field::GEOM_MULTILINESTRING; } @@ -1531,8 +3278,8 @@ real_type: float_options: - /* empty */ {} - | '(' NUM ')' { Lex->length=$2.str; } + /* empty */ { Lex->dec=Lex->length= (char*)0; } + | '(' NUM ')' { Lex->length=$2.str; Lex->dec= (char*)0; } | precision {}; precision: @@ -1573,7 +3320,7 @@ opt_attribute_list: attribute: NULL_SYM { Lex->type&= ~ NOT_NULL_FLAG; } - | NOT NULL_SYM { Lex->type|= NOT_NULL_FLAG; } + | not NULL_SYM { Lex->type|= NOT_NULL_FLAG; } | DEFAULT now_or_signed_literal { Lex->default_value=$2; } | ON UPDATE_SYM NOW_SYM optional_braces { Lex->on_update_value= new Item_func_now_local(); } @@ -1602,15 +3349,14 @@ attribute: lex->type|= UNIQUE_KEY_FLAG; lex->alter_info.flags|= ALTER_ADD_INDEX; } - | COMMENT_SYM TEXT_STRING_sys { Lex->comment= &$2; } - | BINARY { Lex->type|= BINCMP_FLAG; } + | COMMENT_SYM TEXT_STRING_sys { Lex->comment= $2; } | COLLATE_SYM collation_name { if (Lex->charset && !my_charset_same(Lex->charset,$2)) { - net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, - $2->name,Lex->charset->csname); - YYABORT; + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $2->name,Lex->charset->csname); + MYSQL_YYABORT; } else { @@ -1634,8 +3380,8 @@ charset_name: { if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0)))) { - net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,$1.str); - YYABORT; + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str); + MYSQL_YYABORT; } } | BINARY { $$= &my_charset_bin; } @@ -1645,6 +3391,10 @@ charset_name_or_default: charset_name { $$=$1; } | DEFAULT { $$=NULL; } ; +opt_load_data_charset: + /* Empty */ { $$= NULL; } + | charset charset_name_or_default { $$= $2; } + ; old_or_new_charset_name: ident_or_text @@ -1652,8 +3402,8 @@ old_or_new_charset_name: if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))) && !($$=get_old_charset_by_name($1.str))) { - net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,$1.str); - YYABORT; + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str); + MYSQL_YYABORT; } } | BINARY { $$= &my_charset_bin; } @@ -1668,8 +3418,8 @@ collation_name: { if (!($$=get_charset_by_name($1.str,MYF(0)))) { - net_printf(YYTHD,ER_UNKNOWN_COLLATION,$1.str); - YYABORT; + my_error(ER_UNKNOWN_COLLATION, MYF(0), $1.str); + MYSQL_YYABORT; } }; @@ -1688,14 +3438,34 @@ opt_default: opt_binary: /* empty */ { Lex->charset=NULL; } - | ASCII_SYM { Lex->charset=&my_charset_latin1; } + | ASCII_SYM opt_bin_mod { Lex->charset=&my_charset_latin1; } | BYTE_SYM { Lex->charset=&my_charset_bin; } + | UNICODE_SYM opt_bin_mod + { + if (!(Lex->charset=get_charset_by_csname("ucs2", + MY_CS_PRIMARY,MYF(0)))) + { + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2"); + MYSQL_YYABORT; + } + } + | charset charset_name opt_bin_mod { Lex->charset=$2; } + | BINARY opt_bin_charset { Lex->type|= BINCMP_FLAG; }; + +opt_bin_mod: + /* empty */ { } + | BINARY { Lex->type|= BINCMP_FLAG; }; + +opt_bin_charset: + /* empty */ { Lex->charset= NULL; } + | ASCII_SYM { Lex->charset=&my_charset_latin1; } | UNICODE_SYM { - if (!(Lex->charset=get_charset_by_csname("ucs2",MY_CS_PRIMARY,MYF(0)))) + if (!(Lex->charset=get_charset_by_csname("ucs2", + MY_CS_PRIMARY,MYF(0)))) { - net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,"ucs2"); - YYABORT; + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2"); + MYSQL_YYABORT; } } | charset charset_name { Lex->charset=$2; } ; @@ -1756,9 +3526,9 @@ key_type: #ifdef HAVE_SPATIAL $$= Key::SPATIAL; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, sym_group_geom.needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); + MYSQL_YYABORT; #endif }; @@ -1789,9 +3559,9 @@ opt_unique_or_fulltext: #ifdef HAVE_SPATIAL $$= Key::SPATIAL; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, sym_group_geom.needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); + MYSQL_YYABORT; #endif } ; @@ -1818,14 +3588,10 @@ key_part: | ident '(' NUM ')' { int key_part_len= atoi($3.str); -#if MYSQL_VERSION_ID < 50000 if (!key_part_len) { - my_printf_error(ER_UNKNOWN_ERROR, - "Key part '%s' length cannot be 0", - MYF(0), $1.str); + my_error(ER_KEY_PART_0, MYF(0), $1.str); } -#endif $$=new key_part_spec($1.str,(uint) key_part_len); }; @@ -1834,8 +3600,8 @@ opt_ident: | field_ident { $$=$1.str; }; opt_component: - /* empty */ { $$.str= 0; $$.length= 0; } - | '.' ident { $$=$2; }; + /* empty */ { $$= null_lex_str; } + | '.' ident { $$= $2; }; string_list: text_string { Lex->interval_list.push_back($1); } @@ -1855,10 +3621,12 @@ alter: lex->duplicates= DUP_ERROR; if (!lex->select_lex.add_table_to_list(thd, $4, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; lex->col_list.empty(); lex->select_lex.init_order(); - lex->select_lex.db=lex->name=0; + lex->select_lex.db= + ((TABLE_LIST*) lex->select_lex.table_list.first)->db; + lex->name=0; bzero((char*) &lex->create_info,sizeof(lex->create_info)); lex->create_info.db_type= DB_TYPE_DEFAULT; lex->create_info.default_table_charset= NULL; @@ -1875,16 +3643,66 @@ alter: opt_create_database_options { LEX *lex=Lex; + THD *thd= Lex->thd; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; - }; + if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL)) + MYSQL_YYABORT; + } + | ALTER PROCEDURE sp_name + { + LEX *lex= Lex; + + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"); + MYSQL_YYABORT; + } + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_a_chistics + { + LEX *lex=Lex; + + lex->sql_command= SQLCOM_ALTER_PROCEDURE; + lex->spname= $3; + } + | ALTER FUNCTION_SYM sp_name + { + LEX *lex= Lex; + + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"); + MYSQL_YYABORT; + } + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_a_chistics + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_ALTER_FUNCTION; + lex->spname= $3; + } + | ALTER view_algorithm_opt definer view_suid + VIEW_SYM table_ident + { + THD *thd= YYTHD; + LEX *lex= thd->lex; + lex->sql_command= SQLCOM_CREATE_VIEW; + lex->create_view_mode= VIEW_ALTER; + /* first table in list is target VIEW name */ + lex->select_lex.add_table_to_list(thd, $6, NULL, TL_OPTION_UPDATING); + } + view_list_opt AS view_select view_check_option + {} + ; ident_or_empty: /* empty */ { $$= 0; } | ident { $$= $1.str; }; - alter_list: | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; } | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; } @@ -1892,27 +3710,27 @@ alter_list: | alter_list ',' alter_list_item; add_column: - ADD opt_column + ADD opt_column { LEX *lex=Lex; - lex->change=0; - lex->alter_info.flags|= ALTER_ADD_COLUMN; + lex->change=0; + lex->alter_info.flags|= ALTER_ADD_COLUMN; }; alter_list_item: - add_column column_def opt_place { Lex->alter_info.is_simple= 0; } - | ADD key_def - { - LEX *lex=Lex; - lex->alter_info.is_simple= 0; - lex->alter_info.flags|= ALTER_ADD_INDEX; + add_column column_def opt_place { } + | ADD key_def + { + Lex->alter_info.flags|= ALTER_ADD_INDEX; } - | add_column '(' field_list ')' { Lex->alter_info.is_simple= 0; } + | add_column '(' field_list ')' + { + Lex->alter_info.flags|= ALTER_ADD_COLUMN | ALTER_ADD_INDEX; + } | CHANGE opt_column field_ident { LEX *lex=Lex; - lex->change= $3.str; - lex->alter_info.is_simple= 0; + lex->change= $3.str; lex->alter_info.flags|= ALTER_CHANGE_COLUMN; } field_spec opt_place @@ -1921,9 +3739,8 @@ alter_list_item: LEX *lex=Lex; lex->length=lex->dec=0; lex->type=0; lex->default_value= lex->on_update_value= 0; - lex->comment=0; + lex->comment=null_lex_str; lex->charset= NULL; - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_CHANGE_COLUMN; } type opt_attribute @@ -1933,27 +3750,28 @@ alter_list_item: (enum enum_field_types) $5, lex->length,lex->dec,lex->type, lex->default_value, lex->on_update_value, - lex->comment, + &lex->comment, $3.str, &lex->interval_list, lex->charset, lex->uint_geom_type)) - YYABORT; + MYSQL_YYABORT; } opt_place | DROP opt_column field_ident opt_restrict { LEX *lex=Lex; lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::COLUMN, - $3.str)); - lex->alter_info.is_simple= 0; + $3.str)); lex->alter_info.flags|= ALTER_DROP_COLUMN; } - | DROP FOREIGN KEY_SYM opt_ident { Lex->alter_info.is_simple= 0; } + | DROP FOREIGN KEY_SYM opt_ident + { + Lex->alter_info.flags|= ALTER_DROP_INDEX; + } | DROP PRIMARY_SYM KEY_SYM { LEX *lex=Lex; lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, primary_key_name)); - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_DROP_INDEX; } | DROP key_or_index field_ident @@ -1961,37 +3779,50 @@ alter_list_item: LEX *lex=Lex; lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, $3.str)); - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_DROP_INDEX; } - | DISABLE_SYM KEYS { Lex->alter_info.keys_onoff= DISABLE; } - | ENABLE_SYM KEYS { Lex->alter_info.keys_onoff= ENABLE; } + | DISABLE_SYM KEYS + { + LEX *lex=Lex; + lex->alter_info.keys_onoff= DISABLE; + lex->alter_info.flags|= ALTER_KEYS_ONOFF; + } + | ENABLE_SYM KEYS + { + LEX *lex=Lex; + lex->alter_info.keys_onoff= ENABLE; + lex->alter_info.flags|= ALTER_KEYS_ONOFF; + } | ALTER opt_column field_ident SET DEFAULT signed_literal { LEX *lex=Lex; lex->alter_info.alter_list.push_back(new Alter_column($3.str,$6)); - lex->alter_info.is_simple= 0; - lex->alter_info.flags|= ALTER_CHANGE_COLUMN; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT; } | ALTER opt_column field_ident DROP DEFAULT { LEX *lex=Lex; lex->alter_info.alter_list.push_back(new Alter_column($3.str, (Item*) 0)); - lex->alter_info.is_simple= 0; - lex->alter_info.flags|= ALTER_CHANGE_COLUMN; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT; } | RENAME opt_to table_ident { LEX *lex=Lex; + THD *thd= lex->thd; lex->select_lex.db=$3->db.str; - lex->name= $3->table.str; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, NULL)) + { + MYSQL_YYABORT; + } if (check_table_name($3->table.str,$3->table.length) || $3->db.str && check_db_name($3->db.str)) { - net_printf(lex->thd,ER_WRONG_TABLE_NAME,$3->table.str); - YYABORT; + my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str); + MYSQL_YYABORT; } + lex->name= $3->table.str; lex->alter_info.flags|= ALTER_RENAME; } | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate @@ -2004,27 +3835,29 @@ alter_list_item: $5= $5 ? $5 : $4; if (!my_charset_same($4,$5)) { - net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, - $5->name,$4->csname); - YYABORT; + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $5->name, $4->csname); + MYSQL_YYABORT; } LEX *lex= Lex; - lex->create_info.table_charset= + lex->create_info.table_charset= lex->create_info.default_table_charset= $5; lex->create_info.used_fields|= (HA_CREATE_USED_CHARSET | HA_CREATE_USED_DEFAULT_CHARSET); - lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_CONVERT; } - | create_table_options_space_separated + | create_table_options_space_separated { LEX *lex=Lex; - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_OPTIONS; } + | FORCE_SYM + { + Lex->alter_info.flags|= ALTER_FORCE; + } | alter_order_clause { LEX *lex=Lex; - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_ORDER; }; @@ -2038,9 +3871,10 @@ opt_ignore: ; opt_restrict: - /* empty */ {} - | RESTRICT {} - | CASCADE {}; + /* empty */ { Lex->drop_mode= DROP_DEFAULT; } + | RESTRICT { Lex->drop_mode= DROP_RESTRICT; } + | CASCADE { Lex->drop_mode= DROP_CASCADE; } + ; opt_place: /* empty */ {} @@ -2058,7 +3892,7 @@ opt_to: */ slave: - START_SYM SLAVE slave_thread_opts + START_SYM SLAVE slave_thread_opts { LEX *lex=Lex; lex->sql_command = SQLCOM_SLAVE_START; @@ -2098,8 +3932,9 @@ slave: start: START_SYM TRANSACTION_SYM start_transaction_opts { - Lex->sql_command = SQLCOM_BEGIN; - Lex->start_transaction_opt= $3; + LEX *lex= Lex; + lex->sql_command= SQLCOM_BEGIN; + lex->start_transaction_opt= $3; } ; @@ -2138,8 +3973,9 @@ slave_until: !((lex->mi.log_file_name && lex->mi.pos) || (lex->mi.relay_log_name && lex->mi.relay_log_pos))) { - send_error(lex->thd, ER_BAD_SLAVE_UNTIL_COND); - YYABORT; + my_message(ER_BAD_SLAVE_UNTIL_COND, + ER(ER_BAD_SLAVE_UNTIL_COND), MYF(0)); + MYSQL_YYABORT; } } @@ -2226,9 +4062,15 @@ analyze: check: CHECK_SYM table_or_tables { - LEX *lex=Lex; - lex->sql_command = SQLCOM_CHECK; - lex->check_opt.init(); + LEX *lex=Lex; + + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "CHECK"); + MYSQL_YYABORT; + } + lex->sql_command = SQLCOM_CHECK; + lex->check_opt.init(); } table_list opt_mi_check_type {} @@ -2247,7 +4089,8 @@ mi_check_type: | FAST_SYM { Lex->check_opt.flags|= T_FAST; } | MEDIUM_SYM { Lex->check_opt.flags|= T_MEDIUM; } | EXTENDED_SYM { Lex->check_opt.flags|= T_EXTEND; } - | CHANGED { Lex->check_opt.flags|= T_CHECK_ONLY_CHANGED; }; + | CHANGED { Lex->check_opt.flags|= T_CHECK_ONLY_CHANGED; } + | FOR_SYM UPGRADE_SYM { Lex->check_opt.sql_flags|= TT_FOR_UPGRADE; }; optimize: OPTIMIZE opt_no_write_to_binlog table_or_tables @@ -2270,12 +4113,29 @@ opt_no_write_to_binlog: rename: RENAME table_or_tables { - Lex->sql_command=SQLCOM_RENAME_TABLE; + Lex->sql_command= SQLCOM_RENAME_TABLE; } table_to_table_list {} + | RENAME USER clear_privileges rename_list + { + Lex->sql_command = SQLCOM_RENAME_USER; + } ; +rename_list: + user TO_SYM user + { + if (Lex->users_list.push_back($1) || Lex->users_list.push_back($3)) + MYSQL_YYABORT; + } + | rename_list ',' user TO_SYM user + { + if (Lex->users_list.push_back($3) || Lex->users_list.push_back($5)) + MYSQL_YYABORT; + } + ; + table_to_table_list: table_to_table | table_to_table_list ',' table_to_table; @@ -2289,7 +4149,7 @@ table_to_table: TL_IGNORE) || !sl->add_table_to_list(lex->thd, $3,NULL,TL_OPTION_UPDATING, TL_IGNORE)) - YYABORT; + MYSQL_YYABORT; }; keycache: @@ -2297,7 +4157,7 @@ keycache: { LEX *lex=Lex; lex->sql_command= SQLCOM_ASSIGN_TO_KEYCACHE; - lex->name_and_length= $5; + lex->ident= $5; } ; @@ -2314,7 +4174,7 @@ assign_to_keycache: TL_READ, sel->get_use_index(), (List<String> *)0)) - YYABORT; + MYSQL_YYABORT; } ; @@ -2346,7 +4206,7 @@ preload_keys: TL_READ, sel->get_use_index(), (List<String> *)0)) - YYABORT; + MYSQL_YYABORT; } ; @@ -2385,7 +4245,6 @@ select: { LEX *lex= Lex; lex->sql_command= SQLCOM_SELECT; - lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; } ; @@ -2402,15 +4261,17 @@ select_paren: SELECT_LEX * sel= lex->current_select; if (sel->set_braces(1)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } - if (sel->linkage == UNION_TYPE && - !sel->master_unit()->first_select()->braces) - { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; - } + if (sel->linkage == UNION_TYPE && + !sel->master_unit()->first_select()->braces && + sel->master_unit()->first_select()->linkage == + UNION_TYPE) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } /* select in braces, can't contain global parameters */ if (sel->master_unit()->fake_select_lex) sel->master_unit()->global_parameters= @@ -2425,14 +4286,14 @@ select_init2: SELECT_LEX * sel= lex->current_select; if (lex->current_select->set_braces(0)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if (sel->linkage == UNION_TYPE && sel->master_unit()->first_select()->braces) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } } union_clause @@ -2460,8 +4321,13 @@ select_into: | select_from into; select_from: - FROM join_table_list where_clause group_clause having_clause + FROM join_table_list where_clause group_clause having_clause opt_order_clause opt_limit_clause procedure_clause + { + Select->context.table_list= + Select->context.first_name_resolution_table= + (TABLE_LIST *) Select->table_list.first; + } | FROM DUAL_SYM where_clause opt_limit_clause /* oracle compatibility: oracle always requires FROM clause, and DUAL is system table without fields. @@ -2475,11 +4341,11 @@ select_options: { if (test_all_bits(Select->options, SELECT_ALL | SELECT_DISTINCT)) { - net_printf(Lex->thd, ER_WRONG_USAGE, "ALL", "DISTINCT"); - YYABORT; + my_error(ER_WRONG_USAGE, MYF(0), "ALL", "DISTINCT"); + MYSQL_YYABORT; } } - ; + ; select_option_list: select_option_list select_option @@ -2490,7 +4356,7 @@ select_option: | HIGH_PRIORITY { if (check_simple_select()) - YYABORT; + MYSQL_YYABORT; Lex->lock_option= TL_READ_HIGH_PRIORITY; } | DISTINCT { Select->options|= SELECT_DISTINCT; } @@ -2499,19 +4365,30 @@ select_option: | SQL_BUFFER_RESULT { if (check_simple_select()) - YYABORT; + MYSQL_YYABORT; Select->options|= OPTION_BUFFER_RESULT; } | SQL_CALC_FOUND_ROWS { if (check_simple_select()) - YYABORT; + MYSQL_YYABORT; Select->options|= OPTION_FOUND_ROWS; } - | SQL_NO_CACHE_SYM { Lex->safe_to_cache_query=0; } + | SQL_NO_CACHE_SYM + { + Lex->safe_to_cache_query=0; + Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE; + Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE; + } | SQL_CACHE_SYM { - Lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + /* Honor this flag only if SQL_NO_CACHE wasn't specified. */ + if (Lex->select_lex.sql_cache != SELECT_LEX::SQL_NO_CACHE) + { + Lex->safe_to_cache_query=1; + Lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE; + } } | ALL { Select->options|= SELECT_ALL; } ; @@ -2539,8 +4416,11 @@ select_item_list: | '*' { THD *thd= YYTHD; - if (add_item_to_list(thd, new Item_field(NULL, NULL, "*"))) - YYABORT; + if (add_item_to_list(thd, + new Item_field(&thd->lex->current_select-> + context, + NULL, NULL, "*"))) + MYSQL_YYABORT; (thd->lex->current_select->with_wild)++; }; @@ -2549,11 +4429,18 @@ select_item: remember_name select_item2 remember_end select_alias { if (add_item_to_list(YYTHD, $2)) - YYABORT; + MYSQL_YYABORT; if ($4.str) - $2->set_name($4.str,$4.length,system_charset_info); - else if (!$2->name) - $2->set_name($1,(uint) ($3 - $1), YYTHD->charset()); + { + $2->is_autogenerated_name= FALSE; + $2->set_name($4.str, $4.length, system_charset_info); + } + else if (!$2->name) { + char *str = $1; + if (str[-1] == '`') + str--; + $2->set_name(str,(uint) ($3 - str), YYTHD->charset()); + } }; remember_name: @@ -2567,7 +4454,7 @@ select_item2: | expr { $$=$1; }; select_alias: - /* empty */ { $$.str=0;} + /* empty */ { $$=null_lex_str;} | AS ident { $$=$2; } | AS TEXT_STRING_sys { $$=$2; } | ident { $$=$1; } @@ -2580,206 +4467,187 @@ optional_braces: /* all possible expressions */ expr: - expr_expr { $$= $1; } - | simple_expr { $$= $1; } - ; - -comp_op: EQ { $$ = &comp_eq_creator; } - | GE { $$ = &comp_ge_creator; } - | GT_SYM { $$ = &comp_gt_creator; } - | LE { $$ = &comp_le_creator; } - | LT { $$ = &comp_lt_creator; } - | NE { $$ = &comp_ne_creator; } + bool_term { Select->expr_list.push_front(new List<Item>); } + bool_or_expr + { + List<Item> *list= Select->expr_list.pop(); + if (list->elements) + { + list->push_front($1); + $$= new Item_cond_or(*list); + /* optimize construction of logical OR to reduce + amount of objects for complex expressions */ + } + else + $$= $1; + delete list; + } ; -all_or_any: ALL { $$ = 1; } - | ANY_SYM { $$ = 0; } +bool_or_expr: + /* empty */ + | bool_or_expr or bool_term + { Select->expr_list.head()->push_back($3); } ; -/* expressions that begin with 'expr' */ -expr_expr: - expr IN_SYM '(' expr_list ')' - { $4->push_front($1); $$= new Item_func_in(*$4); } - | expr NOT IN_SYM '(' expr_list ')' - { - $5->push_front($1); - Item_func_in *item= new Item_func_in(*$5); - item->negate(); - $$= item; - } - | expr IN_SYM in_subselect - { $$= new Item_in_subselect($1, $3); } - | expr NOT IN_SYM in_subselect +bool_term: + bool_term XOR bool_term { $$= new Item_cond_xor($1,$3); } + | bool_factor { Select->expr_list.push_front(new List<Item>); } + bool_and_expr { - $$= new Item_func_not(new Item_in_subselect($1, $4)); + List<Item> *list= Select->expr_list.pop(); + if (list->elements) + { + list->push_front($1); + $$= new Item_cond_and(*list); + /* optimize construction of logical AND to reduce + amount of objects for complex expressions */ + } + else + $$= $1; + delete list; } - | expr BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_between($1,$3,$5); } - | expr NOT BETWEEN_SYM no_and_expr AND_SYM expr - { - Item_func_between *item= new Item_func_between($1,$4,$6); - item->negate(); - $$= item; - } - | expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } - | expr OR_SYM expr { $$= new Item_cond_or($1,$3); } - | expr XOR expr { $$= new Item_cond_xor($1,$3); } - | expr AND_SYM expr { $$= new Item_cond_and($1,$3); } - | expr SOUNDS_SYM LIKE expr - { - $$= new Item_func_eq(new Item_func_soundex($1), - new Item_func_soundex($4)); - } - | expr LIKE simple_expr opt_escape - { $$= new Item_func_like($1,$3,$4); } - | expr NOT LIKE simple_expr opt_escape - { $$= new Item_func_not(new Item_func_like($1,$4,$5));} - | expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | expr NOT REGEXP expr - { $$= new Item_func_not(new Item_func_regex($1,$4)); } - | expr IS NULL_SYM { $$= new Item_func_isnull($1); } - | expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } - | expr comp_op all_or_any in_subselect %prec EQ - { - $$= all_any_subquery_creator($1, $2, $3, $4); - } - | expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } - | expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } - | expr '+' expr { $$= new Item_func_plus($1,$3); } - | expr '-' expr { $$= new Item_func_minus($1,$3); } - | expr '*' expr { $$= new Item_func_mul($1,$3); } - | expr '/' expr { $$= new Item_func_div($1,$3); } - | expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } - | expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } - | expr '|' expr { $$= new Item_func_bit_or($1,$3); } - | expr '^' expr { $$= new Item_func_bit_xor($1,$3); } - | expr '&' expr { $$= new Item_func_bit_and($1,$3); } - | expr '%' expr { $$= new Item_func_mod($1,$3); } - | expr '+' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,0); } - | expr '-' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,1); } ; -/* expressions that begin with 'expr' that do NOT follow IN_SYM */ -no_in_expr: - no_in_expr BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_between($1,$3,$5); } - | no_in_expr NOT BETWEEN_SYM no_and_expr AND_SYM expr - { - Item_func_between *item= new Item_func_between($1,$4,$6); - item->negate(); - $$= item; - } - | no_in_expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } - | no_in_expr OR_SYM expr { $$= new Item_cond_or($1,$3); } - | no_in_expr XOR expr { $$= new Item_cond_xor($1,$3); } - | no_in_expr AND_SYM expr { $$= new Item_cond_and($1,$3); } - | no_in_expr SOUNDS_SYM LIKE expr - { - $$= new Item_func_eq(new Item_func_soundex($1), - new Item_func_soundex($4)); - } - | no_in_expr LIKE simple_expr opt_escape - { $$= new Item_func_like($1,$3,$4); } - | no_in_expr NOT LIKE simple_expr opt_escape - { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } - | no_in_expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | no_in_expr NOT REGEXP expr - { $$= new Item_func_not(new Item_func_regex($1,$4)); } - | no_in_expr IS NULL_SYM { $$= new Item_func_isnull($1); } - | no_in_expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | no_in_expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | no_in_expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } - | no_in_expr comp_op all_or_any in_subselect %prec EQ - { - all_any_subquery_creator($1, $2, $3, $4); - } - | no_in_expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } - | no_in_expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } - | no_in_expr '+' expr { $$= new Item_func_plus($1,$3); } - | no_in_expr '-' expr { $$= new Item_func_minus($1,$3); } - | no_in_expr '*' expr { $$= new Item_func_mul($1,$3); } - | no_in_expr '/' expr { $$= new Item_func_div($1,$3); } - | no_in_expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } - | no_in_expr '|' expr { $$= new Item_func_bit_or($1,$3); } - | no_in_expr '^' expr { $$= new Item_func_bit_xor($1,$3); } - | no_in_expr '&' expr { $$= new Item_func_bit_and($1,$3); } - | no_in_expr '%' expr { $$= new Item_func_mod($1,$3); } - | no_in_expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } - | no_in_expr '+' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,0); } - | no_in_expr '-' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,1); } - | simple_expr; +bool_and_expr: + /* empty */ + | bool_and_expr and bool_factor + { Select->expr_list.head()->push_back($3); } + ; -/* expressions that begin with 'expr' that does NOT follow AND */ -no_and_expr: - no_and_expr IN_SYM '(' expr_list ')' - { $4->push_front($1); $$= new Item_func_in(*$4); } - | no_and_expr NOT IN_SYM '(' expr_list ')' - { - $5->push_front($1); - Item_func_in *item= new Item_func_in(*$5); - item->negate(); - $$= item; - } - | no_and_expr IN_SYM in_subselect - { $$= new Item_in_subselect($1, $3); } - | no_and_expr NOT IN_SYM in_subselect +bool_factor: + NOT_SYM bool_factor { $$= negate_expression(YYTHD, $2); } + | bool_test ; + +bool_test: + bool_pri IS TRUE_SYM + { $$= new (YYTHD->mem_root) Item_func_istrue($1); } + | bool_pri IS not TRUE_SYM + { $$= new (YYTHD->mem_root) Item_func_isnottrue($1); } + | bool_pri IS FALSE_SYM + { $$= new (YYTHD->mem_root) Item_func_isfalse($1); } + | bool_pri IS not FALSE_SYM + { $$= new (YYTHD->mem_root) Item_func_isnotfalse($1); } + | bool_pri IS UNKNOWN_SYM { $$= new Item_func_isnull($1); } + | bool_pri IS not UNKNOWN_SYM { $$= new Item_func_isnotnull($1); } + | bool_pri + ; + +bool_pri: + bool_pri IS NULL_SYM { $$= new Item_func_isnull($1); } + | bool_pri IS not NULL_SYM { $$= new Item_func_isnotnull($1); } + | bool_pri EQUAL_SYM predicate { $$= new Item_func_equal($1,$3); } + | bool_pri comp_op predicate %prec EQ + { $$= (*$2)(0)->create($1,$3); } + | bool_pri comp_op all_or_any '(' subselect ')' %prec EQ + { $$= all_any_subquery_creator($1, $2, $3, $5); } + | predicate ; + +predicate: + bit_expr IN_SYM '(' subselect ')' + { + $$= new (YYTHD->mem_root) Item_in_subselect($1, $4); + } + | bit_expr not IN_SYM '(' subselect ')' + { + THD *thd= YYTHD; + Item *item= new (thd->mem_root) Item_in_subselect($1, $5); + $$= negate_expression(thd, item); + } + | bit_expr IN_SYM '(' expr ')' + { + $$= handle_sql2003_note184_exception(YYTHD, $1, true, $4); + } + | bit_expr IN_SYM '(' expr ',' expr_list ')' + { + $6->push_front($4); + $6->push_front($1); + $$= new (YYTHD->mem_root) Item_func_in(*$6); + } + | bit_expr not IN_SYM '(' expr ')' { - $$= new Item_func_not(new Item_in_subselect($1, $4)); + $$= handle_sql2003_note184_exception(YYTHD, $1, false, $5); } - | no_and_expr BETWEEN_SYM no_and_expr AND_SYM expr + | bit_expr not IN_SYM '(' expr ',' expr_list ')' + { + $7->push_front($5); + $7->push_front($1); + Item_func_in *item = new (YYTHD->mem_root) Item_func_in(*$7); + item->negate(); + $$= item; + } + | bit_expr BETWEEN_SYM bit_expr AND_SYM predicate { $$= new Item_func_between($1,$3,$5); } - | no_and_expr NOT BETWEEN_SYM no_and_expr AND_SYM expr + | bit_expr not BETWEEN_SYM bit_expr AND_SYM predicate { Item_func_between *item= new Item_func_between($1,$4,$6); item->negate(); $$= item; } - | no_and_expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } - | no_and_expr OR_SYM expr { $$= new Item_cond_or($1,$3); } - | no_and_expr XOR expr { $$= new Item_cond_xor($1,$3); } - | no_and_expr SOUNDS_SYM LIKE expr - { - $$= new Item_func_eq(new Item_func_soundex($1), - new Item_func_soundex($4)); - } - | no_and_expr LIKE simple_expr opt_escape - { $$= new Item_func_like($1,$3,$4); } - | no_and_expr NOT LIKE simple_expr opt_escape - { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } - | no_and_expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | no_and_expr NOT REGEXP expr - { $$= new Item_func_not(new Item_func_regex($1,$4)); } - | no_and_expr IS NULL_SYM { $$= new Item_func_isnull($1); } - | no_and_expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | no_and_expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | no_and_expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } - | no_and_expr comp_op all_or_any in_subselect %prec EQ - { - all_any_subquery_creator($1, $2, $3, $4); - } - | no_and_expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } - | no_and_expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } - | no_and_expr '+' expr { $$= new Item_func_plus($1,$3); } - | no_and_expr '-' expr { $$= new Item_func_minus($1,$3); } - | no_and_expr '*' expr { $$= new Item_func_mul($1,$3); } - | no_and_expr '/' expr { $$= new Item_func_div($1,$3); } - | no_and_expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } - | no_and_expr '|' expr { $$= new Item_func_bit_or($1,$3); } - | no_and_expr '^' expr { $$= new Item_func_bit_xor($1,$3); } - | no_and_expr '&' expr { $$= new Item_func_bit_and($1,$3); } - | no_and_expr '%' expr { $$= new Item_func_mod($1,$3); } - | no_and_expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } - | no_and_expr '+' interval_expr interval + | bit_expr SOUNDS_SYM LIKE bit_expr + { $$= new Item_func_eq(new Item_func_soundex($1), + new Item_func_soundex($4)); } + | bit_expr LIKE simple_expr opt_escape + { $$= new Item_func_like($1,$3,$4,Lex->escape_used); } + | bit_expr not LIKE simple_expr opt_escape + { $$= new Item_func_not(new Item_func_like($1,$4,$5, Lex->escape_used)); } + | bit_expr REGEXP bit_expr { $$= new Item_func_regex($1,$3); } + | bit_expr not REGEXP bit_expr + { $$= negate_expression(YYTHD, new Item_func_regex($1,$4)); } + | bit_expr ; + +bit_expr: + bit_expr '|' bit_term { $$= new Item_func_bit_or($1,$3); } + | bit_term ; + +bit_term: + bit_term '&' bit_factor { $$= new Item_func_bit_and($1,$3); } + | bit_factor ; + +bit_factor: + bit_factor SHIFT_LEFT value_expr + { $$= new Item_func_shift_left($1,$3); } + | bit_factor SHIFT_RIGHT value_expr + { $$= new Item_func_shift_right($1,$3); } + | value_expr ; + +value_expr: + value_expr '+' term { $$= new Item_func_plus($1,$3); } + | value_expr '-' term { $$= new Item_func_minus($1,$3); } + | value_expr '+' interval_expr interval { $$= new Item_date_add_interval($1,$3,$4,0); } - | no_and_expr '-' interval_expr interval + | value_expr '-' interval_expr interval { $$= new Item_date_add_interval($1,$3,$4,1); } - | simple_expr; + | term ; + +term: + term '*' factor { $$= new Item_func_mul($1,$3); } + | term '/' factor { $$= new Item_func_div($1,$3); } + | term '%' factor { $$= new Item_func_mod($1,$3); } + | term DIV_SYM factor { $$= new Item_func_int_div($1,$3); } + | term MOD_SYM factor { $$= new Item_func_mod($1,$3); } + | factor ; + +factor: + factor '^' simple_expr { $$= new Item_func_bit_xor($1,$3); } + | simple_expr ; + +or: OR_SYM | OR2_SYM; +and: AND_SYM | AND_AND_SYM; +not: NOT_SYM | NOT2_SYM; +not2: '!' | NOT2_SYM; + +comp_op: EQ { $$ = &comp_eq_creator; } + | GE { $$ = &comp_ge_creator; } + | GT_SYM { $$ = &comp_gt_creator; } + | LE { $$ = &comp_le_creator; } + | LT { $$ = &comp_lt_creator; } + | NE { $$ = &comp_ne_creator; } + ; + +all_or_any: ALL { $$ = 1; } + | ANY_SYM { $$ = 0; } + ; interval_expr: INTERVAL_SYM expr { $$=$2; } @@ -2796,38 +4664,17 @@ simple_expr: } | literal | param_marker - | '@' ident_or_text SET_VAR expr - { - $$= new Item_func_set_user_var($2,$4); - Lex->uncacheable(UNCACHEABLE_RAND); - } - | '@' ident_or_text - { - $$= new Item_func_get_user_var($2); - Lex->uncacheable(UNCACHEABLE_RAND); - } - | '@' '@' opt_var_ident_type ident_or_text opt_component - { - - if ($4.str && $5.str && check_reserved_words(&$4)) - { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; - } - if (!($$= get_system_var(YYTHD, (enum_var_type) $3, $4, $5))) - YYABORT; - } + | variable | sum_expr - | '+' expr %prec NEG { $$= $2; } - | '-' expr %prec NEG { $$= new Item_func_neg($2); } - | '~' expr %prec NEG { $$= new Item_func_bit_neg($2); } - | NOT expr %prec NEG - { - $$= negate_expression(YYTHD, $2); - } - | '!' expr %prec NEG - { - $$= negate_expression(YYTHD, $2); + | simple_expr OR_OR_SYM simple_expr + { $$= new Item_func_concat($1, $3); } + | '+' simple_expr %prec NEG { $$= $2; } + | '-' simple_expr %prec NEG { $$= new Item_func_neg($2); } + | '~' simple_expr %prec NEG { $$= new Item_func_bit_neg($2); } + | not2 simple_expr %prec NEG { $$= negate_expression(YYTHD, $2); } + | '(' subselect ')' + { + $$= new Item_singlerow_subselect($2); } | '(' expr ')' { $$= $2; } | '(' expr ',' expr_list ')' @@ -2840,46 +4687,64 @@ simple_expr: $5->push_front($3); $$= new Item_row(*$5); } - | EXISTS exists_subselect { $$= $2; } - | singlerow_subselect { $$= $1; } + | EXISTS '(' subselect ')' + { + $$= new Item_exists_subselect($3); + } | '{' ident expr '}' { $$= $3; } - | MATCH ident_list_arg AGAINST '(' expr fulltext_options ')' + | MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')' { $2->push_front($5); Select->add_ftfunc_to_list((Item_func_match*) ($$=new Item_func_match(*$2,$6))); } | ASCII_SYM '(' expr ')' { $$= new Item_func_ascii($3); } - | BINARY expr %prec NEG + | BINARY simple_expr %prec NEG { - $$= create_func_cast($2, ITEM_CAST_CHAR, -1, &my_charset_bin); + $$= create_func_cast($2, ITEM_CAST_CHAR, -1, 0, &my_charset_bin); } | CAST_SYM '(' expr AS cast_type ')' { + LEX *lex= Lex; $$= create_func_cast($3, $5, - Lex->length ? atoi(Lex->length) : -1, - Lex->charset); - } - | CASE_SYM opt_expr WHEN_SYM when_list opt_else END - { $$= new Item_func_case(* $4, $2, $5 ); } + lex->length ? atoi(lex->length) : -1, + lex->dec ? atoi(lex->dec) : 0, + lex->charset); + if (!$$) + MYSQL_YYABORT; + } + | CASE_SYM opt_expr when_list opt_else END + { $$= new Item_func_case(* $3, $2, $4 ); } | CONVERT_SYM '(' expr ',' cast_type ')' { $$= create_func_cast($3, $5, Lex->length ? atoi(Lex->length) : -1, + Lex->dec ? atoi(Lex->dec) : 0, Lex->charset); + if (!$$) + MYSQL_YYABORT; } | CONVERT_SYM '(' expr USING charset_name ')' { $$= new Item_func_conv_charset($3,$5); } | DEFAULT '(' simple_ident ')' - { $$= new Item_default_value($3); } - | VALUES '(' simple_ident ')' - { $$= new Item_insert_value($3); } + { + if ($3->is_splocal()) + { + Item_splocal *il= static_cast<Item_splocal *>($3); + + my_error(ER_WRONG_COLUMN_NAME, MYF(0), il->my_name()->str); + MYSQL_YYABORT; + } + $$= new Item_default_value(Lex->current_context(), $3); + } + | VALUES '(' simple_ident_nospvar ')' + { $$= new Item_insert_value(Lex->current_context(), $3); } | FUNC_ARG0 '(' ')' { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); + MYSQL_YYABORT; } $$= ((Item*(*)(void))($1.symbol->create_func))(); } @@ -2887,10 +4752,10 @@ simple_expr: { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); + MYSQL_YYABORT; } $$= ((Item*(*)(Item*))($1.symbol->create_func))($3); } @@ -2898,10 +4763,10 @@ simple_expr: { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); + MYSQL_YYABORT; } $$= ((Item*(*)(Item*,Item*))($1.symbol->create_func))($3,$5); } @@ -2909,10 +4774,10 @@ simple_expr: { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); + MYSQL_YYABORT; } $$= ((Item*(*)(Item*,Item*,Item*))($1.symbol->create_func))($3,$5,$7); } @@ -2920,6 +4785,8 @@ simple_expr: { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 0);} | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' { $$= new Item_date_add_interval($3, $6, $7, 0); } + | REPEAT_SYM '(' expr ',' expr ')' + { $$= new Item_func_repeat($3,$5); } | ATAN '(' expr ')' { $$= new Item_func_atan($3); } | ATAN '(' expr ',' expr ')' @@ -2940,7 +4807,8 @@ simple_expr: { $5->push_front($3); $$= new Item_func_concat_ws(*$5); } | CONVERT_TZ_SYM '(' expr ',' expr ',' expr ')' { - Lex->time_zone_tables_used= &fake_time_zone_tables_list; + if (Lex->add_time_zone_tables_to_query_tables(YYTHD)) + MYSQL_YYABORT; $$= new Item_func_convert_tz($3, $5, $7); } | CURDATE optional_braces @@ -2953,7 +4821,10 @@ simple_expr: Lex->safe_to_cache_query=0; } | CURRENT_USER optional_braces - { $$= create_func_current_user(); } + { + $$= new Item_func_current_user(Lex->current_context()); + Lex->safe_to_cache_query= 0; + } | DATE_ADD_INTERVAL '(' expr ',' interval_expr interval ')' { $$= new Item_date_add_interval($3,$5,$6,0); } | DATE_SUB_INTERVAL '(' expr ',' interval_expr interval ')' @@ -3010,9 +4881,9 @@ simple_expr: #ifdef HAVE_SPATIAL $$= $1; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, sym_group_geom.needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); + MYSQL_YYABORT; #endif } | GET_FORMAT '(' date_time_type ',' expr ')' @@ -3030,8 +4901,8 @@ simple_expr: { if ($1->type() != Item::ROW_ITEM) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } $$= new Item_func_interval((Item_row *)$1); } @@ -3089,8 +4960,10 @@ simple_expr: } | OLD_PASSWORD '(' expr ')' { $$= new Item_func_old_password($3); } - | POSITION_SYM '(' no_in_expr IN_SYM expr ')' + | POSITION_SYM '(' bit_expr IN_SYM expr ')' { $$ = new Item_func_locate($5,$3); } + | QUARTER_SYM '(' expr ')' + { $$ = new Item_func_quarter($3); } | RAND '(' expr ')' { $$= new Item_func_rand($3); Lex->uncacheable(UNCACHEABLE_RAND);} | RAND '(' ')' @@ -3102,6 +4975,11 @@ simple_expr: | ROUND '(' expr ')' { $$= new Item_func_round($3, new Item_int((char*)"0",0,1),0); } | ROUND '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,0); } + | ROW_COUNT_SYM '(' ')' + { + $$= new Item_func_row_count(); + Lex->safe_to_cache_query= 0; + } | SUBDATE_SYM '(' expr ',' expr ')' { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 1);} | SUBDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' @@ -3118,12 +4996,30 @@ simple_expr: { $$= new Item_func_substr($3,$5); } | SUBSTRING_INDEX '(' expr ',' expr ',' expr ')' { $$= new Item_func_substr_index($3,$5,$7); } + | SYSDATE optional_braces + { + if (global_system_variables.sysdate_is_now == 0) + $$= new Item_func_sysdate_local(); + else $$= new Item_func_now_local(); + Lex->safe_to_cache_query=0; + } + | SYSDATE '(' expr ')' + { + if (global_system_variables.sysdate_is_now == 0) + $$= new Item_func_sysdate_local($3); + else $$= new Item_func_now_local($3); + Lex->safe_to_cache_query=0; + } | TIME_SYM '(' expr ')' { $$= new Item_time_typecast($3); } | TIMESTAMP '(' expr ')' { $$= new Item_datetime_typecast($3); } | TIMESTAMP '(' expr ',' expr ')' { $$= new Item_func_add_time($3, $5, 1, 0); } + | TIMESTAMP_ADD '(' interval_time_st ',' expr ',' expr ')' + { $$= new Item_date_add_interval($7,$5,$3,0); } + | TIMESTAMP_DIFF '(' interval_time_st ',' expr ',' expr ')' + { $$= new Item_func_timestamp_diff($5,$7,$3); } | TRIM '(' expr ')' { $$= new Item_func_trim($3); } | TRIM '(' LEADING expr FROM expr ')' @@ -3142,48 +5038,136 @@ simple_expr: { $$= new Item_func_trim($5,$3); } | TRUNCATE_SYM '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,1); } - | UDA_CHAR_SUM '(' udf_sum_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_sum_udf_str($1, *$3); - else - $$ = new Item_sum_udf_str($1); - } - | UDA_FLOAT_SUM '(' udf_sum_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_sum_udf_float($1, *$3); - else - $$ = new Item_sum_udf_float($1); - } - | UDA_INT_SUM '(' udf_sum_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_sum_udf_int($1, *$3); - else - $$ = new Item_sum_udf_int($1); - } - | UDF_CHAR_FUNC '(' udf_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_func_udf_str($1, *$3); - else - $$ = new Item_func_udf_str($1); - } - | UDF_FLOAT_FUNC '(' udf_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_func_udf_float($1, *$3); - else - $$ = new Item_func_udf_float($1); - } - | UDF_INT_FUNC '(' udf_expr_list ')' + | ident '.' ident '(' opt_expr_list ')' { - if ($3 != NULL) - $$ = new Item_func_udf_int($1, *$3); + LEX *lex= Lex; + sp_name *name= new sp_name($1, $3); + + name->init_qname(YYTHD); + sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION); + if ($5) + $$= new Item_func_sp(Lex->current_context(), name, *$5); else - $$ = new Item_func_udf_int($1); + $$= new Item_func_sp(Lex->current_context(), name); + lex->safe_to_cache_query=0; } + | IDENT_sys '(' + { +#ifdef HAVE_DLOPEN + udf_func *udf= 0; + LEX *lex= Lex; + if (using_udf_functions && + (udf= find_udf($1.str, $1.length)) && + udf->type == UDFTYPE_AGGREGATE) + { + if (lex->current_select->inc_in_sum_expr()) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + } + lex->current_select->udf_list.push_front(udf); +#endif + } + udf_expr_list ')' + { + LEX *lex= Lex; +#ifdef HAVE_DLOPEN + udf_func *udf; + + if (NULL != (udf= lex->current_select->udf_list.pop())) + { + if (udf->type == UDFTYPE_AGGREGATE) + Select->in_sum_expr--; + + switch (udf->returns) { + case STRING_RESULT: + if (udf->type == UDFTYPE_FUNCTION) + { + if ($4 != NULL) + $$ = new Item_func_udf_str(udf, *$4); + else + $$ = new Item_func_udf_str(udf); + } + else + { + if ($4 != NULL) + $$ = new Item_sum_udf_str(udf, *$4); + else + $$ = new Item_sum_udf_str(udf); + } + break; + case REAL_RESULT: + if (udf->type == UDFTYPE_FUNCTION) + { + if ($4 != NULL) + $$ = new Item_func_udf_float(udf, *$4); + else + $$ = new Item_func_udf_float(udf); + } + else + { + if ($4 != NULL) + $$ = new Item_sum_udf_float(udf, *$4); + else + $$ = new Item_sum_udf_float(udf); + } + break; + case INT_RESULT: + if (udf->type == UDFTYPE_FUNCTION) + { + if ($4 != NULL) + $$ = new Item_func_udf_int(udf, *$4); + else + $$ = new Item_func_udf_int(udf); + } + else + { + if ($4 != NULL) + $$ = new Item_sum_udf_int(udf, *$4); + else + $$ = new Item_sum_udf_int(udf); + } + break; + case DECIMAL_RESULT: + if (udf->type == UDFTYPE_FUNCTION) + { + if ($4 != NULL) + $$ = new Item_func_udf_decimal(udf, *$4); + else + $$ = new Item_func_udf_decimal(udf); + } + else + { + if ($4 != NULL) + $$ = new Item_sum_udf_decimal(udf, *$4); + else + $$ = new Item_sum_udf_decimal(udf); + } + break; + default: + MYSQL_YYABORT; + } + } + else +#endif /* HAVE_DLOPEN */ + { + THD *thd= lex->thd; + LEX_STRING db; + if (thd->copy_db_to(&db.str, &db.length)) + MYSQL_YYABORT; + sp_name *name= new sp_name(db, $1); + if (name) + name->init_qname(thd); + + sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION); + if ($4) + $$= new Item_func_sp(Lex->current_context(), name, *$4); + else + $$= new Item_func_sp(Lex->current_context(), name); + lex->safe_to_cache_query=0; + } + } | UNIQUE_USERS '(' text_literal ',' NUM ',' NUM ',' expr_list ')' { $$= new Item_func_unique_users($3,atoi($5.str),atoi($7.str), * $9); @@ -3216,7 +5200,7 @@ simple_expr: { $$= new Item_func_yearweek($3,new Item_int((char*) "0",0,1)); } | YEARWEEK '(' expr ',' expr ')' { $$= new Item_func_yearweek($3, $5); } - | BENCHMARK_SYM '(' ULONG_NUM ',' expr ')' + | BENCHMARK_SYM '(' ulong_num ',' expr ')' { $$=new Item_func_benchmark($3,$5); Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); @@ -3225,7 +5209,9 @@ simple_expr: { $$=new Item_extract( $3, $5); }; geometry_function: - GEOMFROMTEXT '(' expr ')' + CONTAINS_SYM '(' expr ',' expr ')' + { $$= GEOM_NEW(Item_func_spatial_rel($3, $5, Item_func::SP_CONTAINS_FUNC)); } + | GEOMFROMTEXT '(' expr ')' { $$= GEOM_NEW(Item_func_geometry_from_text($3)); } | GEOMFROMTEXT '(' expr ',' expr ')' { $$= GEOM_NEW(Item_func_geometry_from_text($3, $5)); } @@ -3291,27 +5277,63 @@ fulltext_options: ; udf_expr_list: - /* empty */ { $$= NULL; } - | expr_list { $$= $1;}; + /* empty */ { $$= NULL; } + | udf_expr_list2 { $$= $1;} + ; -udf_sum_expr_list: - { - LEX *lex= Lex; - if (lex->current_select->inc_in_sum_expr()) +udf_expr_list2: + { Select->expr_list.push_front(new List<Item>); } + udf_expr_list3 + { $$= Select->expr_list.pop(); } + ; + +udf_expr_list3: + udf_expr { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + Select->expr_list.head()->push_back($1); } - } - udf_expr_list + | udf_expr_list3 ',' udf_expr + { + Select->expr_list.head()->push_back($3); + } + ; + +udf_expr: + remember_name expr remember_end select_alias { - Select->in_sum_expr--; + udf_func *udf= Select->udf_list.head(); + /* + Use Item::name as a storage for the attribute value of user + defined function argument. It is safe to use Item::name + because the syntax will not allow having an explicit name here. + See WL#1017 re. udf attributes. + */ + if ($4.str) + { + if (!udf) + { + /* + Disallow using AS to specify explicit names for the arguments + of stored routine calls + */ + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + + $2->is_autogenerated_name= FALSE; + $2->set_name($4.str, $4.length, system_charset_info); + } + else if (udf) + $2->set_name($1, (uint) ($3 - $1), YYTHD->charset()); $$= $2; - }; + } + ; sum_expr: AVG_SYM '(' in_sum_expr ')' { $$=new Item_sum_avg($3); } + | AVG_SYM '(' DISTINCT in_sum_expr ')' + { $$=new Item_sum_avg_distinct($4); } | BIT_AND '(' in_sum_expr ')' { $$=new Item_sum_and($3); } | BIT_OR '(' in_sum_expr ')' @@ -3332,25 +5354,82 @@ sum_expr: { $$= new Item_sum_unique_users($3,atoi($5.str),atoi($7.str),$9); } | MIN_SYM '(' in_sum_expr ')' { $$=new Item_sum_min($3); } +/* + According to ANSI SQL, DISTINCT is allowed and has + no sence inside MIN and MAX grouping functions; so MIN|MAX(DISTINCT ...) + is processed like an ordinary MIN | MAX() + */ + | MIN_SYM '(' DISTINCT in_sum_expr ')' + { $$=new Item_sum_min($4); } | MAX_SYM '(' in_sum_expr ')' { $$=new Item_sum_max($3); } + | MAX_SYM '(' DISTINCT in_sum_expr ')' + { $$=new Item_sum_max($4); } | STD_SYM '(' in_sum_expr ')' - { $$=new Item_sum_std($3); } + { $$=new Item_sum_std($3, 0); } | VARIANCE_SYM '(' in_sum_expr ')' - { $$=new Item_sum_variance($3); } + { $$=new Item_sum_variance($3, 0); } + | STDDEV_SAMP_SYM '(' in_sum_expr ')' + { $$=new Item_sum_std($3, 1); } + | VAR_SAMP_SYM '(' in_sum_expr ')' + { $$=new Item_sum_variance($3, 1); } | SUM_SYM '(' in_sum_expr ')' { $$=new Item_sum_sum($3); } + | SUM_SYM '(' DISTINCT in_sum_expr ')' + { $$=new Item_sum_sum_distinct($4); } | GROUP_CONCAT_SYM '(' opt_distinct { Select->in_sum_expr++; } expr_list opt_gorder_clause opt_gconcat_separator ')' { - Select->in_sum_expr--; - $$=new Item_func_group_concat($3,$5,Select->gorder_list,$7); + SELECT_LEX *sel= Select; + sel->in_sum_expr--; + $$=new Item_func_group_concat(Lex->current_context(), $3, $5, + sel->gorder_list, $7); $5->empty(); }; +variable: + '@' + { + if (! Lex->parsing_options.allows_variable) + { + my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); + MYSQL_YYABORT; + } + } + variable_aux + { + $$= $3; + } + ; + +variable_aux: + ident_or_text SET_VAR expr + { + $$= new Item_func_set_user_var($1, $3); + LEX *lex= Lex; + lex->uncacheable(UNCACHEABLE_RAND); + } + | ident_or_text + { + $$= new Item_func_get_user_var($1); + LEX *lex= Lex; + lex->uncacheable(UNCACHEABLE_RAND); + } + | '@' opt_var_ident_type ident_or_text opt_component + { + if ($3.str && $4.str && check_reserved_words(&$3)) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + if (!($$= get_system_var(YYTHD, $2, $3, $4))) + MYSQL_YYABORT; + } + ; + opt_distinct: /* empty */ { $$ = 0; } |DISTINCT { $$ = 1; }; @@ -3381,8 +5460,8 @@ in_sum_expr: LEX *lex= Lex; if (lex->current_select->inc_in_sum_expr()) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } } expr @@ -3392,16 +5471,22 @@ in_sum_expr: }; cast_type: - BINARY opt_len { $$=ITEM_CAST_CHAR; Lex->charset= &my_charset_bin; } - | CHAR_SYM opt_len opt_binary { $$=ITEM_CAST_CHAR; } - | NCHAR_SYM opt_len { $$=ITEM_CAST_CHAR; Lex->charset= national_charset_info; } - | SIGNED_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } - | SIGNED_SYM INT_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } - | UNSIGNED { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } - | UNSIGNED INT_SYM { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->length= (char*)0; } - | DATE_SYM { $$=ITEM_CAST_DATE; Lex->charset= NULL; Lex->length= (char*)0; } - | TIME_SYM { $$=ITEM_CAST_TIME; Lex->charset= NULL; Lex->length= (char*)0; } - | DATETIME { $$=ITEM_CAST_DATETIME; Lex->charset= NULL; Lex->length= (char*)0; } + BINARY opt_len { $$=ITEM_CAST_CHAR; Lex->charset= &my_charset_bin; Lex->dec= 0; } + | CHAR_SYM opt_len opt_binary { $$=ITEM_CAST_CHAR; Lex->dec= 0; } + | NCHAR_SYM opt_len { $$=ITEM_CAST_CHAR; Lex->charset= national_charset_info; Lex->dec=0; } + | SIGNED_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; } + | SIGNED_SYM INT_SYM { $$=ITEM_CAST_SIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; } + | UNSIGNED { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; } + | UNSIGNED INT_SYM { $$=ITEM_CAST_UNSIGNED_INT; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; } + | DATE_SYM { $$=ITEM_CAST_DATE; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; } + | TIME_SYM { $$=ITEM_CAST_TIME; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; } + | DATETIME { $$=ITEM_CAST_DATETIME; Lex->charset= NULL; Lex->dec=Lex->length= (char*)0; } + | DECIMAL_SYM float_options { $$=ITEM_CAST_DECIMAL; Lex->charset= NULL; } + ; + +opt_expr_list: + /* empty */ { $$= NULL; } + | expr_list { $$= $1;} ; expr_list: @@ -3435,76 +5520,180 @@ opt_else: | ELSE expr { $$= $2; }; when_list: - { Select->when_list.push_front(new List<Item>); } - when_list2 - { $$= Select->when_list.pop(); }; + WHEN_SYM expr THEN_SYM expr + { + $$= new List<Item>; + $$->push_back($2); + $$->push_back($4); + } + | when_list WHEN_SYM expr THEN_SYM expr + { + $1->push_back($3); + $1->push_back($5); + $$= $1; + } + ; -when_list2: - expr THEN_SYM expr - { - SELECT_LEX *sel=Select; - sel->when_list.head()->push_back($1); - sel->when_list.head()->push_back($3); - } - | when_list2 WHEN_SYM expr THEN_SYM expr - { - SELECT_LEX *sel=Select; - sel->when_list.head()->push_back($3); - sel->when_list.head()->push_back($5); - }; +/* Warning - may return NULL in case of incomplete SELECT */ +table_ref: + table_factor { $$=$1; } + | join_table + { + LEX *lex= Lex; + if (!($$= lex->current_select->nest_last_join(lex->thd))) + MYSQL_YYABORT; + } + ; join_table_list: - join_table { $$=$1; } - | join_table_list ',' join_table_list { $$=$3; } - | join_table_list normal_join join_table_list { $$=$3; } - | join_table_list STRAIGHT_JOIN join_table_list - { $$=$3 ; $1->next->straight=1; } - | join_table_list normal_join join_table_list ON expr - { add_join_on($3,$5); $$=$3; } - | join_table_list normal_join join_table_list + derived_table_list { MYSQL_YYABORT_UNLESS($$=$1); } + ; + +/* Warning - may return NULL in case of incomplete SELECT */ +derived_table_list: + table_ref { $$=$1; } + | derived_table_list ',' table_ref + { + MYSQL_YYABORT_UNLESS($1 && ($$=$3)); + } + ; + +/* + Notice that JOIN is a left-associative operation, and it must be parsed + as such, that is, the parser must process first the left join operand + then the right one. Such order of processing ensures that the parser + produces correct join trees which is essential for semantic analysis + and subsequent optimization phases. +*/ +join_table: +/* INNER JOIN variants */ + /* + Use %prec to evaluate production 'table_ref' before 'normal_join' + so that [INNER | CROSS] JOIN is properly nested as other + left-associative joins. + */ + table_ref %prec TABLE_REF_PRIORITY normal_join table_ref + { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); } + | table_ref STRAIGHT_JOIN table_factor + { MYSQL_YYABORT_UNLESS($1 && ($$=$3)); $3->straight=1; } + | table_ref normal_join table_ref + ON + { + MYSQL_YYABORT_UNLESS($1 && $3); + /* Change the current name resolution context to a local context. */ + if (push_new_name_resolution_context(YYTHD, $1, $3)) + MYSQL_YYABORT; + Select->parsing_place= IN_ON; + } + expr + { + add_join_on($3,$6); + Lex->pop_context(); + Select->parsing_place= NO_MATTER; + } + | table_ref STRAIGHT_JOIN table_factor + ON + { + MYSQL_YYABORT_UNLESS($1 && $3); + /* Change the current name resolution context to a local context. */ + if (push_new_name_resolution_context(YYTHD, $1, $3)) + MYSQL_YYABORT; + Select->parsing_place= IN_ON; + } + expr + { + $3->straight=1; + add_join_on($3,$6); + Lex->pop_context(); + Select->parsing_place= NO_MATTER; + } + | table_ref normal_join table_ref USING { - SELECT_LEX *sel= Select; - sel->db1=$1->db; sel->table1=$1->alias; - sel->db2=$3->db; sel->table2=$3->alias; + MYSQL_YYABORT_UNLESS($1 && $3); } '(' using_list ')' - { add_join_on($3,$7); $$=$3; } + { add_join_natural($1,$3,$7,Select); $$=$3; } + | table_ref NATURAL JOIN_SYM table_factor + { + MYSQL_YYABORT_UNLESS($1 && ($$=$4)); + add_join_natural($1,$4,NULL,Select); + } - | join_table_list LEFT opt_outer JOIN_SYM join_table_list ON expr - { add_join_on($5,$7); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; } - | join_table_list LEFT opt_outer JOIN_SYM join_table_list +/* LEFT JOIN variants */ + | table_ref LEFT opt_outer JOIN_SYM table_ref + ON + { + MYSQL_YYABORT_UNLESS($1 && $5); + /* Change the current name resolution context to a local context. */ + if (push_new_name_resolution_context(YYTHD, $1, $5)) + MYSQL_YYABORT; + Select->parsing_place= IN_ON; + } + expr { - SELECT_LEX *sel= Select; - sel->db1=$1->db; sel->table1=$1->alias; - sel->db2=$5->db; sel->table2=$5->alias; + add_join_on($5,$8); + Lex->pop_context(); + $5->outer_join|=JOIN_TYPE_LEFT; + $$=$5; + Select->parsing_place= NO_MATTER; + } + | table_ref LEFT opt_outer JOIN_SYM table_factor + { + MYSQL_YYABORT_UNLESS($1 && $5); } USING '(' using_list ')' - { add_join_on($5,$9); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; } - | join_table_list NATURAL LEFT opt_outer JOIN_SYM join_table_list + { + add_join_natural($1,$5,$9,Select); + $5->outer_join|=JOIN_TYPE_LEFT; + $$=$5; + } + | table_ref NATURAL LEFT opt_outer JOIN_SYM table_factor { - add_join_natural($1,$1->next); - $1->next->outer_join|=JOIN_TYPE_LEFT; + MYSQL_YYABORT_UNLESS($1 && $6); + add_join_natural($1,$6,NULL,Select); + $6->outer_join|=JOIN_TYPE_LEFT; $$=$6; } - | join_table_list RIGHT opt_outer JOIN_SYM join_table_list ON expr - { add_join_on($1,$7); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$5; } - | join_table_list RIGHT opt_outer JOIN_SYM join_table_list + +/* RIGHT JOIN variants */ + | table_ref RIGHT opt_outer JOIN_SYM table_ref + ON + { + MYSQL_YYABORT_UNLESS($1 && $5); + /* Change the current name resolution context to a local context. */ + if (push_new_name_resolution_context(YYTHD, $1, $5)) + MYSQL_YYABORT; + Select->parsing_place= IN_ON; + } + expr + { + LEX *lex= Lex; + if (!($$= lex->current_select->convert_right_join())) + MYSQL_YYABORT; + add_join_on($$, $8); + Lex->pop_context(); + Select->parsing_place= NO_MATTER; + } + | table_ref RIGHT opt_outer JOIN_SYM table_factor { - SELECT_LEX *sel= Select; - sel->db1=$1->db; sel->table1=$1->alias; - sel->db2=$5->db; sel->table2=$5->alias; + MYSQL_YYABORT_UNLESS($1 && $5); } USING '(' using_list ')' - { add_join_on($1,$9); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$5; } - | join_table_list NATURAL RIGHT opt_outer JOIN_SYM join_table_list + { + LEX *lex= Lex; + if (!($$= lex->current_select->convert_right_join())) + MYSQL_YYABORT; + add_join_natural($$,$5,$9,Select); + } + | table_ref NATURAL RIGHT opt_outer JOIN_SYM table_factor { - add_join_natural($1->next,$1); - $1->outer_join|=JOIN_TYPE_RIGHT; - $$=$6; - } - | join_table_list NATURAL JOIN_SYM join_table_list - { add_join_natural($1,$1->next); $$=$4; }; + MYSQL_YYABORT_UNLESS($1 && $6); + add_join_natural($6,$1,NULL,Select); + LEX *lex= Lex; + if (!($$= lex->current_select->convert_right_join())) + MYSQL_YYABORT; + }; normal_join: JOIN_SYM {} @@ -3512,7 +5701,8 @@ normal_join: | CROSS JOIN_SYM {} ; -join_table: +/* Warning - may return NULL in case of incomplete SELECT */ +table_factor: { SELECT_LEX *sel= Select; sel->use_index_ptr=sel->ignore_index_ptr=0; @@ -3527,54 +5717,132 @@ join_table: lex->lock_option, sel->get_use_index(), sel->get_ignore_index()))) - YYABORT; + MYSQL_YYABORT; + sel->add_joined_table($$); } - | '{' ident join_table LEFT OUTER JOIN_SYM join_table ON expr '}' - { add_join_on($7,$9); $7->outer_join|=JOIN_TYPE_LEFT; $$=$7; } - | '(' select_derived union_opt ')' opt_table_alias + | '{' ident table_ref LEFT OUTER JOIN_SYM table_ref + ON + { + /* Change the current name resolution context to a local context. */ + if (push_new_name_resolution_context(YYTHD, $3, $7)) + MYSQL_YYABORT; + + } + expr '}' + { + LEX *lex= Lex; + MYSQL_YYABORT_UNLESS($3 && $7); + add_join_on($7,$10); + Lex->pop_context(); + $7->outer_join|=JOIN_TYPE_LEFT; + $$=$7; + if (!($$= lex->current_select->nest_last_join(lex->thd))) + MYSQL_YYABORT; + } + | select_derived_init get_select_lex select_derived2 + { + LEX *lex= Lex; + SELECT_LEX *sel= lex->current_select; + if ($1) + { + if (sel->set_braces(1)) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + /* select in braces, can't contain global parameters */ + if (sel->master_unit()->fake_select_lex) + sel->master_unit()->global_parameters= + sel->master_unit()->fake_select_lex; + } + if ($2->init_nested_join(lex->thd)) + MYSQL_YYABORT; + $$= 0; + /* incomplete derived tables return NULL, we must be + nested in select_derived rule to be here. */ + } + | '(' get_select_lex select_derived union_opt ')' opt_table_alias { - LEX *lex=Lex; - SELECT_LEX_UNIT *unit= lex->current_select->master_unit(); - lex->current_select= unit->outer_select(); - if (!($$= lex->current_select-> - add_table_to_list(lex->thd, new Table_ident(unit), $5, 0, - TL_READ,(List<String> *)0, - (List<String> *)0))) - - YYABORT; + /* Use $2 instead of Lex->current_select as derived table will + alter value of Lex->current_select. */ + + if (!($3 || $6) && $2->embedding && + !$2->embedding->nested_join->join_list.elements) + { + /* we have a derived table ($3 == NULL) but no alias, + Since we are nested in further parentheses so we + can pass NULL to the outer level parentheses + Permits parsing of "((((select ...))) as xyz)" */ + $$= 0; + } + else + if (!$3) + { + /* Handle case of derived table, alias may be NULL if there + are no outer parentheses, add_table_to_list() will throw + error in this case */ + LEX *lex=Lex; + SELECT_LEX *sel= lex->current_select; + SELECT_LEX_UNIT *unit= sel->master_unit(); + lex->current_select= sel= unit->outer_select(); + if (!($$= sel-> + add_table_to_list(lex->thd, new Table_ident(unit), $6, 0, + TL_READ,(List<String> *)0, + (List<String> *)0))) + + MYSQL_YYABORT; + sel->add_joined_table($$); + lex->pop_context(); + } + else + if ($4 || $6) + { + /* simple nested joins cannot have aliases or unions */ + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + else + $$= $3; } - | '(' join_table_list ')' { $$=$2; }; + ; +/* handle contents of parentheses in join expression */ select_derived: - SELECT_SYM select_derived2 - | '(' select_derived ')' + get_select_lex { - LEX *lex= Lex; - SELECT_LEX * sel= lex->current_select; - if (sel->set_braces(1)) - { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; - } - /* select in braces, can't contain global parameters */ - if (sel->master_unit()->fake_select_lex) - sel->master_unit()->global_parameters= - sel->master_unit()->fake_select_lex; - }; + LEX *lex= Lex; + if ($1->init_nested_join(lex->thd)) + MYSQL_YYABORT; + } + derived_table_list + { + LEX *lex= Lex; + /* for normal joins, $3 != NULL and end_nested_join() != NULL, + for derived tables, both must equal NULL */ + + if (!($$= $1->end_nested_join(lex->thd)) && $3) + MYSQL_YYABORT; + if (!$3 && $$) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + } + ; select_derived2: { LEX *lex= Lex; - lex->derived_tables= 1; + lex->derived_tables|= DERIVED_SUBQUERY; if (lex->sql_command == (int)SQLCOM_HA_READ || lex->sql_command == (int)SQLCOM_KILL) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE || mysql_new_select(lex, 1)) - YYABORT; + MYSQL_YYABORT; mysql_init_select(lex); lex->current_select->linkage= DERIVED_TABLE_TYPE; lex->current_select->parsing_place= SELECT_LIST; @@ -3586,10 +5854,44 @@ select_derived2: opt_select_from ; +get_select_lex: + /* Empty */ { $$= Select; } + ; + +select_derived_init: + SELECT_SYM + { + LEX *lex= Lex; + + if (! lex->parsing_options.allows_derived) + { + my_error(ER_VIEW_SELECT_DERIVED, MYF(0)); + MYSQL_YYABORT; + } + + SELECT_LEX *sel= lex->current_select; + TABLE_LIST *embedding; + if (!sel->embedding || sel->end_nested_join(lex->thd)) + { + /* we are not in parentheses */ + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + embedding= Select->embedding; + $$= embedding && + !embedding->nested_join->join_list.elements; + /* return true if we are deeply nested */ + } + ; + opt_outer: /* empty */ {} | OUTER {}; +opt_for_join: + /* empty */ + | FOR_SYM JOIN_SYM; + opt_key_definition: /* empty */ {} | USE_SYM key_usage_list @@ -3613,7 +5915,8 @@ opt_key_definition: }; key_usage_list: - key_or_index { Select->interval_list.empty(); } + key_or_index opt_for_join + { Select->interval_list.empty(); } '(' key_list_or_empty ')' { $$= &Select->interval_list; } ; @@ -3640,38 +5943,44 @@ key_usage_list2: using_list: ident { - SELECT_LEX *sel= Select; - if (!($$= new Item_func_eq(new Item_field(sel->db1, sel->table1, - $1.str), - new Item_field(sel->db2, sel->table2, - $1.str)))) - YYABORT; + if (!($$= new List<String>)) + MYSQL_YYABORT; + $$->push_back(new (YYTHD->mem_root) + String((const char *) $1.str, $1.length, + system_charset_info)); } | using_list ',' ident { - SELECT_LEX *sel= Select; - if (!($$= new Item_cond_and(new Item_func_eq(new Item_field(sel->db1,sel->table1,$3.str), new Item_field(sel->db2,sel->table2,$3.str)), $1))) - YYABORT; + $1->push_back(new (YYTHD->mem_root) + String((const char *) $3.str, $3.length, + system_charset_info)); + $$= $1; }; interval: - DAY_HOUR_SYM { $$=INTERVAL_DAY_HOUR; } + interval_time_st {} + | DAY_HOUR_SYM { $$=INTERVAL_DAY_HOUR; } | DAY_MICROSECOND_SYM { $$=INTERVAL_DAY_MICROSECOND; } | DAY_MINUTE_SYM { $$=INTERVAL_DAY_MINUTE; } | DAY_SECOND_SYM { $$=INTERVAL_DAY_SECOND; } - | DAY_SYM { $$=INTERVAL_DAY; } | HOUR_MICROSECOND_SYM { $$=INTERVAL_HOUR_MICROSECOND; } | HOUR_MINUTE_SYM { $$=INTERVAL_HOUR_MINUTE; } | HOUR_SECOND_SYM { $$=INTERVAL_HOUR_SECOND; } - | HOUR_SYM { $$=INTERVAL_HOUR; } | MICROSECOND_SYM { $$=INTERVAL_MICROSECOND; } | MINUTE_MICROSECOND_SYM { $$=INTERVAL_MINUTE_MICROSECOND; } | MINUTE_SECOND_SYM { $$=INTERVAL_MINUTE_SECOND; } + | SECOND_MICROSECOND_SYM { $$=INTERVAL_SECOND_MICROSECOND; } + | YEAR_MONTH_SYM { $$=INTERVAL_YEAR_MONTH; }; + +interval_time_st: + DAY_SYM { $$=INTERVAL_DAY; } + | WEEK_SYM { $$=INTERVAL_WEEK; } + | HOUR_SYM { $$=INTERVAL_HOUR; } + | FRAC_SECOND_SYM { $$=INTERVAL_MICROSECOND; } | MINUTE_SYM { $$=INTERVAL_MINUTE; } | MONTH_SYM { $$=INTERVAL_MONTH; } - | SECOND_MICROSECOND_SYM { $$=INTERVAL_SECOND_MICROSECOND; } + | QUARTER_SYM { $$=INTERVAL_QUARTER; } | SECOND_SYM { $$=INTERVAL_SECOND; } - | YEAR_MONTH_SYM { $$=INTERVAL_YEAR_MONTH; } | YEAR_SYM { $$=INTERVAL_YEAR; } ; @@ -3730,10 +6039,17 @@ having_clause: ; opt_escape: - ESCAPE_SYM simple_expr { $$= $2; } + ESCAPE_SYM simple_expr + { + Lex->escape_used= TRUE; + $$= $2; + } | /* empty */ - { - $$= new Item_string("\\", 1, &my_charset_latin1); + { + Lex->escape_used= FALSE; + $$= ((YYTHD->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) ? + new Item_string("", 0, &my_charset_latin1) : + new Item_string("\\", 1, &my_charset_latin1)); } ; @@ -3748,9 +6064,9 @@ group_clause: group_list: group_list ',' order_ident order_dir - { if (add_group_to_list(YYTHD, $3,(bool) $4)) YYABORT; } + { if (add_group_to_list(YYTHD, $3,(bool) $4)) MYSQL_YYABORT; } | order_ident order_dir - { if (add_group_to_list(YYTHD, $1,(bool) $2)) YYABORT; }; + { if (add_group_to_list(YYTHD, $1,(bool) $2)) MYSQL_YYABORT; }; olap_opt: /* empty */ {} @@ -3759,22 +6075,22 @@ olap_opt: LEX *lex=Lex; if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) { - net_printf(lex->thd, ER_WRONG_USAGE, "WITH CUBE", + my_error(ER_WRONG_USAGE, MYF(0), "WITH CUBE", "global union parameters"); - YYABORT; + MYSQL_YYABORT; } lex->current_select->olap= CUBE_TYPE; - net_printf(lex->thd, ER_NOT_SUPPORTED_YET, "CUBE"); - YYABORT; /* To be deleted in 5.1 */ + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "CUBE"); + MYSQL_YYABORT; /* To be deleted in 5.1 */ } | WITH ROLLUP_SYM { LEX *lex= Lex; if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) { - net_printf(lex->thd, ER_WRONG_USAGE, "WITH ROLLUP", + my_error(ER_WRONG_USAGE, MYF(0), "WITH ROLLUP", "global union parameters"); - YYABORT; + MYSQL_YYABORT; } lex->current_select->olap= ROLLUP_TYPE; } @@ -3794,12 +6110,12 @@ alter_order_list: ; alter_order_item: - simple_ident order_dir + simple_ident_nospvar order_dir { THD *thd= YYTHD; bool ascending= ($2 == 1) ? true : false; if (add_order_to_list(thd, $1, ascending)) - YYABORT; + MYSQL_YYABORT; } ; @@ -3820,10 +6136,9 @@ order_clause: if (sel->linkage != GLOBAL_OPTIONS_TYPE && sel->olap != UNSPECIFIED_OLAP_TYPE) { - net_printf(lex->thd, ER_WRONG_USAGE, - "CUBE/ROLLUP", - "ORDER BY"); - YYABORT; + my_error(ER_WRONG_USAGE, MYF(0), + "CUBE/ROLLUP", "ORDER BY"); + MYSQL_YYABORT; } if (lex->sql_command != SQLCOM_ALTER_TABLE && !unit->fake_select_lex) { @@ -3838,17 +6153,17 @@ order_clause: SELECT_LEX *first_sl= unit->first_select(); if (!first_sl->next_select() && (first_sl->order_list.elements || - first_sl->select_limit != HA_POS_ERROR) && + first_sl->select_limit) && unit->add_fake_select_lex(lex->thd)) - YYABORT; + MYSQL_YYABORT; } } order_list; order_list: order_list ',' order_ident order_dir - { if (add_order_to_list(YYTHD, $3,(bool) $4)) YYABORT; } + { if (add_order_to_list(YYTHD, $3,(bool) $4)) MYSQL_YYABORT; } | order_ident order_dir - { if (add_order_to_list(YYTHD, $1,(bool) $2)) YYABORT; }; + { if (add_order_to_list(YYTHD, $1,(bool) $2)) MYSQL_YYABORT; }; order_dir: /* empty */ { $$ = 1; } @@ -3861,8 +6176,8 @@ opt_limit_clause_init: { LEX *lex= Lex; SELECT_LEX *sel= lex->current_select; - sel->offset_limit= 0L; - sel->select_limit= HA_POS_ERROR; + sel->offset_limit= 0; + sel->select_limit= 0; } | limit_clause {} ; @@ -3877,21 +6192,21 @@ limit_clause: ; limit_options: - ULONG_NUM + limit_option { SELECT_LEX *sel= Select; sel->select_limit= $1; - sel->offset_limit= 0L; + sel->offset_limit= 0; sel->explicit_limit= 1; } - | ULONG_NUM ',' ULONG_NUM + | limit_option ',' limit_option { SELECT_LEX *sel= Select; sel->select_limit= $3; sel->offset_limit= $1; sel->explicit_limit= 1; } - | ULONG_NUM OFFSET_SYM ULONG_NUM + | limit_option OFFSET_SYM limit_option { SELECT_LEX *sel= Select; sel->select_limit= $1; @@ -3899,26 +6214,32 @@ limit_options: sel->explicit_limit= 1; } ; - +limit_option: + param_marker + | ULONGLONG_NUM { $$= new Item_uint($1.str, $1.length); } + | LONG_NUM { $$= new Item_uint($1.str, $1.length); } + | NUM { $$= new Item_uint($1.str, $1.length); } + ; delete_limit_clause: /* empty */ { LEX *lex=Lex; - lex->current_select->select_limit= HA_POS_ERROR; + lex->current_select->select_limit= 0; } - | LIMIT ulonglong_num + | LIMIT limit_option { SELECT_LEX *sel= Select; - sel->select_limit= (ha_rows) $2; + sel->select_limit= $2; sel->explicit_limit= 1; }; -ULONG_NUM: - NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } +ulong_num: + NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | HEX_NUM { $$= (ulong) strtol($1.str, (char**) 0, 16); } | LONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } | ULONGLONG_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } - | REAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } + | DECIMAL_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } | FLOAT_NUM { int error; $$= (ulong) my_strtoll10($1.str, (char**) 0, &error); } ; @@ -3926,7 +6247,7 @@ ulonglong_num: NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } | ULONGLONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } | LONG_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } - | REAL_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } + | DECIMAL_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } | FLOAT_NUM { int error; $$= (ulonglong) my_strtoll10($1.str, (char**) 0, &error); } ; @@ -3935,18 +6256,26 @@ procedure_clause: | PROCEDURE ident /* Procedure name */ { LEX *lex=Lex; + + if (! lex->parsing_options.allows_select_procedure) + { + my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), "PROCEDURE"); + MYSQL_YYABORT; + } + if (&lex->select_lex != lex->current_select) { - net_printf(lex->thd, ER_WRONG_USAGE, - "PROCEDURE", - "subquery"); - YYABORT; + my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery"); + MYSQL_YYABORT; } lex->proc_list.elements=0; lex->proc_list.first=0; lex->proc_list.next= (byte**) &lex->proc_list.first; - if (add_proc_to_list(lex->thd, new Item_field(NULL,NULL,$2.str))) - YYABORT; + if (add_proc_to_list(lex->thd, new Item_field(&lex-> + current_select-> + context, + NULL,NULL,$2.str))) + MYSQL_YYABORT; Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); } '(' procedure_list ')'; @@ -3965,9 +6294,10 @@ procedure_item: { LEX *lex= Lex; if (add_proc_to_list(lex->thd, $2)) - YYABORT; + MYSQL_YYABORT; if (!$2->name) - $2->set_name($1,(uint) ((char*) lex->tok_end - $1), YYTHD->charset()); + $2->set_name($1,(uint) ((char*) lex->tok_end - $1), + YYTHD->charset()); } ; @@ -3976,7 +6306,7 @@ select_var_list_init: { LEX *lex=Lex; if (!lex->describe && (!(lex->result= new select_dumpvar()))) - YYABORT; + MYSQL_YYABORT; } select_var_list {} @@ -3987,37 +6317,85 @@ select_var_list: | select_var_ident {} ; -select_var_ident: '@' ident_or_text +select_var_ident: + '@' ident_or_text + { + LEX *lex=Lex; + if (lex->result) + ((select_dumpvar *)lex->result)->var_list.push_back( new my_var($2,0,0,(enum_field_types)0)); + else + /* + The parser won't create select_result instance only + if it's an EXPLAIN. + */ + DBUG_ASSERT(lex->describe); + } + | ident_or_text { LEX *lex=Lex; - if (lex->result && ((select_dumpvar *)lex->result)->var_list.push_back((LEX_STRING*) sql_memdup(&$2,sizeof(LEX_STRING)))) - YYABORT; + sp_variable_t *t; + + if (!lex->spcont || !(t=lex->spcont->find_variable(&$1))) + { + my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); + MYSQL_YYABORT; + } + if (lex->result) + { + my_var *var; + ((select_dumpvar *)lex->result)-> + var_list.push_back(var= new my_var($1,1,t->offset,t->type)); +#ifndef DBUG_OFF + if (var) + var->sp= lex->sphead; +#endif + } + else + { + /* + The parser won't create select_result instance only + if it's an EXPLAIN. + */ + DBUG_ASSERT(lex->describe); + } } ; into: - INTO OUTFILE TEXT_STRING_sys + INTO + { + if (! Lex->parsing_options.allows_select_into) + { + my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), "INTO"); + MYSQL_YYABORT; + } + } + into_destination + ; + +into_destination: + OUTFILE TEXT_STRING_filesystem { LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - if (!(lex->exchange= new sql_exchange($3.str, 0)) || + if (!(lex->exchange= new sql_exchange($2.str, 0)) || !(lex->result= new select_export(lex->exchange))) - YYABORT; + MYSQL_YYABORT; } opt_field_term opt_line_term - | INTO DUMPFILE TEXT_STRING_sys + | DUMPFILE TEXT_STRING_filesystem { LEX *lex=Lex; if (!lex->describe) { lex->uncacheable(UNCACHEABLE_SIDEEFFECT); - if (!(lex->exchange= new sql_exchange($3.str,1))) - YYABORT; + if (!(lex->exchange= new sql_exchange($2.str,1))) + MYSQL_YYABORT; if (!(lex->result= new select_dump(lex->exchange))) - YYABORT; + MYSQL_YYABORT; } } - | INTO select_var_list_init + | select_var_list_init { Lex->uncacheable(UNCACHEABLE_SIDEEFFECT); } @@ -4056,13 +6434,12 @@ drop: LEX *lex=Lex; lex->sql_command= SQLCOM_DROP_INDEX; lex->alter_info.reset(); - lex->alter_info.is_simple= 0; lex->alter_info.flags= ALTER_DROP_INDEX; lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, $3.str)); if (!lex->current_select->add_table_to_list(lex->thd, $5, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; } | DROP DATABASE if_exists ident { @@ -4071,22 +6448,48 @@ drop: lex->drop_if_exists=$3; lex->name=$4.str; } - | DROP UDF_SYM IDENT_sys + | DROP FUNCTION_SYM if_exists sp_name { LEX *lex=Lex; + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"); + MYSQL_YYABORT; + } lex->sql_command = SQLCOM_DROP_FUNCTION; - lex->udf.name = $3; + lex->drop_if_exists= $3; + lex->spname= $4; } - | DROP USER + | DROP PROCEDURE if_exists sp_name { LEX *lex=Lex; - lex->sql_command = SQLCOM_DROP_USER; - lex->users_list.empty(); + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"); + MYSQL_YYABORT; + } + lex->sql_command = SQLCOM_DROP_PROCEDURE; + lex->drop_if_exists= $3; + lex->spname= $4; } - user_list - {} - ; - + | DROP USER clear_privileges user_list + { + Lex->sql_command = SQLCOM_DROP_USER; + } + | DROP VIEW_SYM if_exists table_list opt_restrict + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_DROP_VIEW; + lex->drop_if_exists= $3; + } + | DROP TRIGGER_SYM if_exists sp_name + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_DROP_TRIGGER; + lex->drop_if_exists= $3; + lex->spname= $4; + } + ; table_list: table_name @@ -4096,7 +6499,7 @@ table_name: table_ident { if (!Select->add_table_to_list(YYTHD, $1, NULL, TL_OPTION_UPDATING)) - YYABORT; + MYSQL_YYABORT; } ; @@ -4118,11 +6521,10 @@ insert: { LEX *lex= Lex; lex->sql_command= SQLCOM_INSERT; - lex->duplicates= DUP_ERROR; - mysql_init_select(lex); + lex->duplicates= DUP_ERROR; + mysql_init_select(lex); /* for subselects */ lex->lock_option= (using_update_log) ? TL_READ_NO_INSERT : TL_READ; - lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; } insert_lock_option opt_ignore insert2 { @@ -4139,8 +6541,7 @@ replace: LEX *lex=Lex; lex->sql_command = SQLCOM_REPLACE; lex->duplicates= DUP_REPLACE; - mysql_init_select(lex); - lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; + mysql_init_select(lex); } replace_lock_option insert2 { @@ -4152,7 +6553,19 @@ replace: ; insert_lock_option: - /* empty */ { $$= TL_WRITE_CONCURRENT_INSERT; } + /* empty */ + { +#ifdef HAVE_QUERY_CACHE + /* + If it is SP we do not allow insert optimisation whan result of + insert visible only after the table unlocking but everyone can + read table. + */ + $$= (Lex->sphead ? TL_WRITE :TL_WRITE_CONCURRENT_INSERT); +#else + $$= TL_WRITE_CONCURRENT_INSERT; +#endif + } | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; } | DELAYED_SYM { $$= TL_WRITE_DELAYED; } | HIGH_PRIORITY { $$= TL_WRITE; } @@ -4184,15 +6597,10 @@ insert_field_spec: LEX *lex=Lex; if (!(lex->insert_list = new List_item) || lex->many_values.push_back(lex->insert_list)) - YYABORT; + MYSQL_YYABORT; } ident_eq_list; -opt_field_spec: - /* empty */ { } - | '(' fields ')' { } - | '(' ')' { }; - fields: fields ',' insert_ident { Lex->field_list.push_back($3); } | insert_ident { Lex->field_list.push_back($1); }; @@ -4214,12 +6622,12 @@ ident_eq_list: ident_eq_value; ident_eq_value: - simple_ident equal expr_or_default + simple_ident_nospvar equal expr_or_default { LEX *lex=Lex; if (lex->field_list.push_back($1) || lex->insert_list->push_back($3)) - YYABORT; + MYSQL_YYABORT; }; equal: EQ {} @@ -4235,13 +6643,13 @@ no_braces: '(' { if (!(Lex->insert_list = new List_item)) - YYABORT; + MYSQL_YYABORT; } opt_values ')' { LEX *lex=Lex; if (lex->many_values.push_back(lex->insert_list)) - YYABORT; + MYSQL_YYABORT; }; opt_values: @@ -4252,18 +6660,18 @@ values: values ',' expr_or_default { if (Lex->insert_list->push_back($3)) - YYABORT; + MYSQL_YYABORT; } | expr_or_default { if (Lex->insert_list->push_back($1)) - YYABORT; + MYSQL_YYABORT; } ; expr_or_default: expr { $$= $1;} - | DEFAULT {$$= new Item_default_value(); } + | DEFAULT {$$= new Item_default_value(Lex->current_context()); } ; opt_insert_update: @@ -4288,19 +6696,20 @@ update: { LEX *lex= Lex; if (lex->select_lex.table_list.elements > 1) - { lex->sql_command= SQLCOM_UPDATE_MULTI; - lex->multi_lock_option= $3; - } else if (lex->select_lex.get_table_list()->derived) { /* it is single table update and it is update of derived table */ - net_printf(lex->thd, ER_NON_UPDATABLE_TABLE, - lex->select_lex.get_table_list()->alias, "UPDATE"); - YYABORT; + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), + lex->select_lex.get_table_list()->alias, "UPDATE"); + MYSQL_YYABORT; } - else - Select->set_lock_for_tables($3); + /* + In case of multi-update setting write lock for all tables may + be too pessimistic. We will decrease lock level if possible in + mysql_multi_update(). + */ + Select->set_lock_for_tables($3); } where_clause opt_order_clause delete_limit_clause {} ; @@ -4310,10 +6719,10 @@ update_list: | update_elem; update_elem: - simple_ident equal expr_or_default + simple_ident_nospvar equal expr_or_default { if (add_item_to_list(YYTHD, $1) || add_value_to_list(YYTHD, $3)) - YYABORT; + MYSQL_YYABORT; }; insert_update_list: @@ -4321,13 +6730,13 @@ insert_update_list: | insert_update_elem; insert_update_elem: - simple_ident equal expr_or_default - { + simple_ident_nospvar equal expr_or_default + { LEX *lex= Lex; if (lex->update_list.push_back($1) || lex->value_list.push_back($3)) - YYABORT; - }; + MYSQL_YYABORT; + }; opt_low_priority: /* empty */ { $$= YYTHD->update_lock_default; } @@ -4340,7 +6749,7 @@ delete: { LEX *lex= Lex; lex->sql_command= SQLCOM_DELETE; - mysql_init_select(lex); + mysql_init_select(lex); lex->lock_option= lex->thd->update_lock_default; lex->ignore= 0; lex->select_lex.init_order(); @@ -4353,15 +6762,24 @@ single_multi: { if (!Select->add_table_to_list(YYTHD, $2, NULL, TL_OPTION_UPDATING, Lex->lock_option)) - YYABORT; + MYSQL_YYABORT; } where_clause opt_order_clause delete_limit_clause {} - | table_wild_list {mysql_init_multi_delete(Lex);} - FROM join_table_list {fix_multi_delete_lex(Lex);} where_clause - | FROM table_wild_list { mysql_init_multi_delete(Lex);} - USING join_table_list {fix_multi_delete_lex(Lex);} where_clause - {} + | table_wild_list + { mysql_init_multi_delete(Lex); } + FROM join_table_list where_clause + { + if (multi_delete_set_locks_and_link_aux_tables(Lex)) + MYSQL_YYABORT; + } + | FROM table_wild_list + { mysql_init_multi_delete(Lex); } + USING join_table_list where_clause + { + if (multi_delete_set_locks_and_link_aux_tables(Lex)) + MYSQL_YYABORT; + } ; table_wild_list: @@ -4374,7 +6792,7 @@ table_wild_one: if (!Select->add_table_to_list(YYTHD, new Table_ident($1), $3, TL_OPTION_UPDATING | TL_OPTION_ALIAS, Lex->lock_option)) - YYABORT; + MYSQL_YYABORT; } | ident '.' ident opt_wild opt_table_alias { @@ -4384,7 +6802,7 @@ table_wild_one: TL_OPTION_UPDATING | TL_OPTION_ALIAS, Lex->lock_option)) - YYABORT; + MYSQL_YYABORT; } ; @@ -4408,6 +6826,7 @@ truncate: LEX* lex= Lex; lex->sql_command= SQLCOM_TRUNCATE; lex->select_lex.options= 0; + lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED; lex->select_lex.init_order(); } ; @@ -4422,6 +6841,9 @@ show: SHOW { LEX *lex=Lex; lex->wild=0; + lex->lock_option= TL_READ; + mysql_init_select(lex); + lex->current_select->parsing_place= SELECT_LIST; bzero((char*) &lex->create_info,sizeof(lex->create_info)); } show_param @@ -4429,42 +6851,67 @@ show: SHOW ; show_param: - DATABASES wild - { Lex->sql_command= SQLCOM_SHOW_DATABASES; } - | TABLES opt_db wild - { - LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_TABLES; - lex->select_lex.db= $2; - } - | TABLE_SYM STATUS_SYM opt_db wild - { - LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_TABLES; - lex->describe= DESCRIBE_EXTENDED; - lex->select_lex.db= $3; - } - | OPEN_SYM TABLES opt_db wild + DATABASES wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_DATABASES; + if (prepare_schema_table(YYTHD, lex, 0, SCH_SCHEMATA)) + MYSQL_YYABORT; + } + | opt_full TABLES opt_db wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_TABLES; + lex->select_lex.db= $3; + if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLE_NAMES)) + MYSQL_YYABORT; + } + | opt_full TRIGGERS_SYM opt_db wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_TRIGGERS; + lex->select_lex.db= $3; + if (prepare_schema_table(YYTHD, lex, 0, SCH_TRIGGERS)) + MYSQL_YYABORT; + } + | TABLE_SYM STATUS_SYM opt_db wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_TABLE_STATUS; + lex->select_lex.db= $3; + if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLES)) + MYSQL_YYABORT; + } + | OPEN_SYM TABLES opt_db wild_and_where { LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_OPEN_TABLES; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_OPEN_TABLES; lex->select_lex.db= $3; + if (prepare_schema_table(YYTHD, lex, 0, SCH_OPEN_TABLES)) + MYSQL_YYABORT; } | ENGINE_SYM storage_engines { Lex->create_info.db_type= $2; } show_engine_param - | opt_full COLUMNS from_or_in table_ident opt_db wild + | opt_full COLUMNS from_or_in table_ident opt_db wild_and_where { - Lex->sql_command= SQLCOM_SHOW_FIELDS; + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_FIELDS; if ($5) $4->change_db($5); - if (!Select->add_table_to_list(YYTHD, $4, NULL, 0)) - YYABORT; + if (prepare_schema_table(YYTHD, lex, $4, SCH_COLUMNS)) + MYSQL_YYABORT; } | NEW_SYM MASTER_SYM FOR_SYM SLAVE WITH MASTER_LOG_FILE_SYM EQ TEXT_STRING_sys AND_SYM MASTER_LOG_POS_SYM EQ ulonglong_num AND_SYM MASTER_SERVER_ID_SYM EQ - ULONG_NUM + ulong_num { Lex->sql_command = SQLCOM_SHOW_NEW_MASTER; Lex->mi.log_file_name = $8.str; @@ -4484,13 +6931,15 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS; } opt_limit_clause_init - | keys_or_index from_or_in table_ident opt_db - { - Lex->sql_command= SQLCOM_SHOW_KEYS; + | keys_or_index from_or_in table_ident opt_db where_clause + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_KEYS; if ($4) $3->change_db($4); - if (!Select->add_table_to_list(YYTHD, $3, NULL, 0)) - YYABORT; + if (prepare_schema_table(YYTHD, lex, $3, SCH_STATISTICS)) + MYSQL_YYABORT; } | COLUMN_SYM TYPES_SYM { @@ -4521,22 +6970,46 @@ show_param: { Lex->sql_command = SQLCOM_SHOW_WARNS;} | ERRORS opt_limit_clause_init { Lex->sql_command = SQLCOM_SHOW_ERRORS;} - | STATUS_SYM wild - { Lex->sql_command= SQLCOM_SHOW_STATUS; } + | opt_var_type STATUS_SYM wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_STATUS; + lex->option_type= $1; + if (prepare_schema_table(YYTHD, lex, 0, SCH_STATUS)) + MYSQL_YYABORT; + } | INNOBASE_SYM STATUS_SYM { Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS"); } + | MUTEX_SYM STATUS_SYM + { Lex->sql_command = SQLCOM_SHOW_MUTEX_STATUS; } | opt_full PROCESSLIST_SYM { Lex->sql_command= SQLCOM_SHOW_PROCESSLIST;} - | opt_var_type VARIABLES wild + | opt_var_type VARIABLES wild_and_where { - THD *thd= YYTHD; - thd->lex->sql_command= SQLCOM_SHOW_VARIABLES; - thd->lex->option_type= (enum_var_type) $1; + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_VARIABLES; + lex->option_type= $1; + if (prepare_schema_table(YYTHD, lex, 0, SCH_VARIABLES)) + MYSQL_YYABORT; } - | charset wild - { Lex->sql_command= SQLCOM_SHOW_CHARSETS; } - | COLLATION_SYM wild - { Lex->sql_command= SQLCOM_SHOW_COLLATIONS; } + | charset wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_CHARSETS; + if (prepare_schema_table(YYTHD, lex, 0, SCH_CHARSETS)) + MYSQL_YYABORT; + } + | COLLATION_SYM wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_COLLATIONS; + if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS)) + MYSQL_YYABORT; + } | BERKELEY_DB_SYM LOGS_SYM { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); } | LOGS_SYM @@ -4545,23 +7018,10 @@ show_param: { LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_GRANTS; - THD *thd= lex->thd; LEX_USER *curr_user; - if (!(curr_user= (LEX_USER*) thd->alloc(sizeof(st_lex_user)))) - YYABORT; - curr_user->user.str= thd->priv_user; - curr_user->user.length= strlen(thd->priv_user); - if (*thd->priv_host != 0) - { - curr_user->host.str= thd->priv_host; - curr_user->host.length= strlen(thd->priv_host); - } - else - { - curr_user->host.str= (char *) "%"; - curr_user->host.length= 1; - } - curr_user->password.str=NullS; + if (!(curr_user= (LEX_USER*) lex->thd->alloc(sizeof(st_lex_user)))) + MYSQL_YYABORT; + bzero(curr_user, sizeof(st_lex_user)); lex->grant_user= curr_user; } | GRANTS FOR_SYM user @@ -4569,7 +7029,7 @@ show_param: LEX *lex=Lex; lex->sql_command= SQLCOM_SHOW_GRANTS; lex->grant_user=$3; - lex->grant_user->password.str=NullS; + lex->grant_user->password=null_lex_str; } | CREATE DATABASE opt_if_not_exists ident { @@ -4579,9 +7039,19 @@ show_param: } | CREATE TABLE_SYM table_ident { - Lex->sql_command = SQLCOM_SHOW_CREATE; - if (!Select->add_table_to_list(YYTHD, $3, NULL,0)) - YYABORT; + LEX *lex= Lex; + lex->sql_command = SQLCOM_SHOW_CREATE; + if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL,0)) + MYSQL_YYABORT; + lex->only_view= 0; + } + | CREATE VIEW_SYM table_ident + { + LEX *lex= Lex; + lex->sql_command = SQLCOM_SHOW_CREATE; + if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL, 0)) + MYSQL_YYABORT; + lex->only_view= 1; } | MASTER_SYM STATUS_SYM { @@ -4590,7 +7060,62 @@ show_param: | SLAVE STATUS_SYM { Lex->sql_command = SQLCOM_SHOW_SLAVE_STAT; - }; + } + | CREATE PROCEDURE sp_name + { + LEX *lex= Lex; + + lex->sql_command = SQLCOM_SHOW_CREATE_PROC; + lex->spname= $3; + } + | CREATE FUNCTION_SYM sp_name + { + LEX *lex= Lex; + + lex->sql_command = SQLCOM_SHOW_CREATE_FUNC; + lex->spname= $3; + } + | PROCEDURE STATUS_SYM wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_STATUS_PROC; + if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ)) + MYSQL_YYABORT; + if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) + MYSQL_YYABORT; + } + | FUNCTION_SYM STATUS_SYM wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_STATUS_FUNC; + if (!sp_add_to_query_tables(YYTHD, lex, "mysql", "proc", TL_READ)) + MYSQL_YYABORT; + if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) + MYSQL_YYABORT; + } + | PROCEDURE CODE_SYM sp_name + { +#ifdef DBUG_OFF + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; +#else + Lex->sql_command= SQLCOM_SHOW_PROC_CODE; + Lex->spname= $3; +#endif + } + | FUNCTION_SYM CODE_SYM sp_name + { +#ifdef DBUG_OFF + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; +#else + Lex->sql_command= SQLCOM_SHOW_FUNC_CODE; + Lex->spname= $3; +#endif + } + ; show_engine_param: STATUS_SYM @@ -4603,8 +7128,8 @@ show_engine_param: Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; break; default: - net_printf(YYTHD, ER_NOT_SUPPORTED_YET, "STATUS"); - YYABORT; + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "STATUS"); + MYSQL_YYABORT; } } | LOGS_SYM @@ -4614,8 +7139,8 @@ show_engine_param: Lex->sql_command = SQLCOM_SHOW_LOGS; break; default: - net_printf(YYTHD, ER_NOT_SUPPORTED_YET, "LOGS"); - YYABORT; + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "LOGS"); + MYSQL_YYABORT; } }; @@ -4631,12 +7156,6 @@ opt_db: /* empty */ { $$= 0; } | from_or_in ident { $$= $2.str; }; -wild: - /* empty */ - | LIKE TEXT_STRING_sys - { Lex->wild= new (YYTHD->mem_root) String($2.str, $2.length, - system_charset_info); }; - opt_full: /* empty */ { Lex->verbose=0; } | FULL { Lex->verbose=1; }; @@ -4653,17 +7172,34 @@ binlog_from: /* empty */ { Lex->mi.pos = 4; /* skip magic number */ } | FROM ulonglong_num { Lex->mi.pos = $2; }; +wild_and_where: + /* empty */ + | LIKE TEXT_STRING_sys + { Lex->wild= new (YYTHD->mem_root) String($2.str, $2.length, + system_charset_info); } + | WHERE expr + { + Select->where= $2; + if ($2) + $2->top_level_item(); + } + ; + /* A Oracle compatible synonym for show */ describe: describe_command table_ident { - LEX *lex=Lex; - lex->wild=0; - lex->verbose=0; - lex->sql_command=SQLCOM_SHOW_FIELDS; - if (!Select->add_table_to_list(lex->thd, $2, NULL,0)) - YYABORT; + LEX *lex= Lex; + lex->lock_option= TL_READ; + mysql_init_select(lex); + lex->current_select->parsing_place= SELECT_LIST; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_FIELDS; + lex->select_lex.db= 0; + lex->verbose= 0; + if (prepare_schema_table(YYTHD, lex, $2, SCH_COLUMNS)) + MYSQL_YYABORT; } opt_describe_column {} | describe_command opt_extended_describe @@ -4697,7 +7233,8 @@ flush: FLUSH_SYM opt_no_write_to_binlog { LEX *lex=Lex; - lex->sql_command= SQLCOM_FLUSH; lex->type=0; + lex->sql_command= SQLCOM_FLUSH; + lex->type= 0; lex->no_write_to_binlog= $2; } flush_options @@ -4764,39 +7301,29 @@ purge_option: } | BEFORE_SYM expr { - if (!$2) - /* Can only be an out of memory situation, no need for a message */ - YYABORT; - if ($2->fix_fields(Lex->thd, 0, &$2) || $2->check_cols(1)) - { - net_printf(Lex->thd, ER_WRONG_ARGUMENTS, "PURGE LOGS BEFORE"); - YYABORT; - } - Item *tmp= new Item_func_unix_timestamp($2); - /* - it is OK only emulate fix_fieds, because we need only - value of constant - */ - tmp->quick_fix_field(); - Lex->sql_command = SQLCOM_PURGE_BEFORE; - Lex->purge_time= (ulong) tmp->val_int(); + LEX *lex= Lex; + lex->value_list.empty(); + lex->value_list.push_front($2); + lex->sql_command= SQLCOM_PURGE_BEFORE; } ; /* kill threads */ kill: - KILL_SYM { Lex->sql_command= SQLCOM_KILL; } expr + KILL_SYM { Lex->sql_command= SQLCOM_KILL; } kill_option expr { LEX *lex=Lex; - if ($3->fix_fields(lex->thd, 0, &$3) || $3->check_cols(1)) - { - send_error(lex->thd, ER_SET_CONSTANTS_ONLY); - YYABORT; - } - lex->thread_id= (ulong) $3->val_int(); + lex->value_list.empty(); + lex->value_list.push_front($4); }; +kill_option: + /* empty */ { Lex->type= 0; } + | CONNECTION_SYM { Lex->type= 0; } + | QUERY_SYM { Lex->type= ONLY_KILL_QUERY; } + ; + /* change database */ use: USE_SYM ident @@ -4808,38 +7335,74 @@ use: USE_SYM ident /* import, export of files */ -load: LOAD DATA_SYM load_data_lock opt_local INFILE TEXT_STRING_sys +load: LOAD DATA_SYM + { + LEX *lex=Lex; + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD DATA"); + MYSQL_YYABORT; + } + lex->fname_start= lex->ptr; + } + load_data + {} + | + LOAD TABLE_SYM table_ident FROM MASTER_SYM + { + LEX *lex=Lex; + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD TABLE"); + MYSQL_YYABORT; + } + lex->sql_command = SQLCOM_LOAD_MASTER_TABLE; + WARN_DEPRECATED("LOAD TABLE FROM MASTER", + "mysqldump or future " + "BACKUP/RESTORE DATABASE facility"); + if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING)) + MYSQL_YYABORT; + }; + +load_data: + load_data_lock opt_local INFILE TEXT_STRING_filesystem { LEX *lex=Lex; lex->sql_command= SQLCOM_LOAD; - lex->lock_option= $3; - lex->local_file= $4; + lex->lock_option= $1; + lex->local_file= $2; lex->duplicates= DUP_ERROR; lex->ignore= 0; - if (!(lex->exchange= new sql_exchange($6.str,0))) - YYABORT; - lex->field_list.empty(); - } - opt_duplicate INTO TABLE_SYM table_ident opt_field_term opt_line_term - opt_ignore_lines opt_field_spec - { - if (!Select->add_table_to_list(YYTHD, $11, NULL, TL_OPTION_UPDATING)) - YYABORT; + if (!(lex->exchange= new sql_exchange($4.str, 0))) + MYSQL_YYABORT; + } + opt_duplicate INTO + { + LEX *lex=Lex; + lex->fname_end= lex->ptr; } - | - LOAD TABLE_SYM table_ident FROM MASTER_SYM + TABLE_SYM table_ident { - Lex->sql_command = SQLCOM_LOAD_MASTER_TABLE; - WARN_DEPRECATED("LOAD TABLE FROM MASTER", "mysqldump or future BACKUP/RESTORE DATABASE facility"); - if (!Select->add_table_to_list(YYTHD, $3, NULL, TL_OPTION_UPDATING)) - YYABORT; - + LEX *lex=Lex; + if (!Select->add_table_to_list(YYTHD, $10, NULL, TL_OPTION_UPDATING, + lex->lock_option)) + MYSQL_YYABORT; + lex->field_list.empty(); + lex->update_list.empty(); + lex->value_list.empty(); } + opt_load_data_charset + { Lex->exchange->cs= $12; } + opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec + opt_load_data_set_spec + {} | - LOAD DATA_SYM FROM MASTER_SYM + FROM MASTER_SYM { Lex->sql_command = SQLCOM_LOAD_MASTER_DATA; - WARN_DEPRECATED("LOAD DATA FROM MASTER", "mysqldump or future BACKUP/RESTORE DATABASE facility"); + WARN_DEPRECATED("LOAD DATA FROM MASTER", + "mysqldump or future " + "BACKUP/RESTORE DATABASE facility"); }; opt_local: @@ -4848,7 +7411,18 @@ opt_local: load_data_lock: /* empty */ { $$= YYTHD->update_lock_default; } - | CONCURRENT { $$= TL_WRITE_CONCURRENT_INSERT ; } + | CONCURRENT + { +#ifdef HAVE_QUERY_CACHE + /* + Ignore this option in SP to avoid problem with query cache + */ + if (Lex->sphead != 0) + $$= YYTHD->update_lock_default; + else +#endif + $$= TL_WRITE_CONCURRENT_INSERT; + } | LOW_PRIORITY { $$= TL_WRITE_LOW_PRIORITY; }; @@ -4868,24 +7442,24 @@ field_term_list: field_term: TERMINATED BY text_string { - DBUG_ASSERT(Lex->exchange); + DBUG_ASSERT(Lex->exchange != 0); Lex->exchange->field_term= $3; } | OPTIONALLY ENCLOSED BY text_string { LEX *lex= Lex; - DBUG_ASSERT(lex->exchange); + DBUG_ASSERT(lex->exchange != 0); lex->exchange->enclosed= $4; lex->exchange->opt_enclosed= 1; } | ENCLOSED BY text_string { - DBUG_ASSERT(Lex->exchange); + DBUG_ASSERT(Lex->exchange != 0); Lex->exchange->enclosed= $3; } | ESCAPED BY text_string { - DBUG_ASSERT(Lex->exchange); + DBUG_ASSERT(Lex->exchange != 0); Lex->exchange->escaped= $3; }; @@ -4900,12 +7474,12 @@ line_term_list: line_term: TERMINATED BY text_string { - DBUG_ASSERT(Lex->exchange); + DBUG_ASSERT(Lex->exchange != 0); Lex->exchange->line_term= $3; } | STARTING BY text_string { - DBUG_ASSERT(Lex->exchange); + DBUG_ASSERT(Lex->exchange != 0); Lex->exchange->line_start= $3; }; @@ -4913,10 +7487,33 @@ opt_ignore_lines: /* empty */ | IGNORE_SYM NUM LINES { - DBUG_ASSERT(Lex->exchange); + DBUG_ASSERT(Lex->exchange != 0); Lex->exchange->skip_lines= atol($2.str); }; +opt_field_or_var_spec: + /* empty */ { } + | '(' fields_or_vars ')' { } + | '(' ')' { }; + +fields_or_vars: + fields_or_vars ',' field_or_var + { Lex->field_list.push_back($3); } + | field_or_var + { Lex->field_list.push_back($1); } + ; + +field_or_var: + simple_ident_nospvar {$$= $1;} + | '@' ident_or_text + { $$= new Item_user_var_as_out_param($2); } + ; + +opt_load_data_set_spec: + /* empty */ { } + | SET insert_update_list { }; + + /* Common definitions */ text_literal: @@ -4938,15 +7535,25 @@ text_string: { $$= new (YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); } | HEX_NUM { - Item *tmp = new Item_varbinary($1.str,$1.length); + Item *tmp= new Item_hex_string($1.str, $1.length); /* - it is OK only emulate fix_fieds, because we need only + it is OK only emulate fix_fields, because we need only value of constant */ $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : (String*) 0; } + | BIN_NUM + { + Item *tmp= new Item_bin_string($1.str, $1.length); + /* + it is OK only emulate fix_fields, because we need only + value of constant + */ + $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + } ; param_marker: @@ -4954,12 +7561,17 @@ param_marker: { THD *thd=YYTHD; LEX *lex= thd->lex; - Item_param *item= new Item_param((uint) (lex->tok_start - - (uchar *) thd->query)); + Item_param *item; + if (! lex->parsing_options.allows_variable) + { + my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); + MYSQL_YYABORT; + } + item= new Item_param((uint) (lex->tok_start - (uchar *) thd->query)); if (!($$= item) || lex->param_list.push_back(item)) { - send_error(thd, ER_OUT_OF_RESOURCES); - YYABORT; + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + MYSQL_YYABORT; } } ; @@ -4982,10 +7594,11 @@ literal: Lex->next_state=MY_LEX_OPERATOR_OR_IDENT;} | FALSE_SYM { $$= new Item_int((char*) "FALSE",0,1); } | TRUE_SYM { $$= new Item_int((char*) "TRUE",1,1); } - | HEX_NUM { $$ = new Item_varbinary($1.str,$1.length);} + | HEX_NUM { $$ = new Item_hex_string($1.str, $1.length);} + | BIN_NUM { $$= new Item_bin_string($1.str, $1.length); } | UNDERSCORE_CHARSET HEX_NUM { - Item *tmp= new Item_varbinary($2.str,$2.length); + Item *tmp= new Item_hex_string($2.str, $2.length); /* it is OK only emulate fix_fieds, because we need only value of constant @@ -4997,6 +7610,20 @@ literal: str ? str->length() : 0, Lex->underscore_charset); } + | UNDERSCORE_CHARSET BIN_NUM + { + Item *tmp= new Item_bin_string($2.str, $2.length); + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + String *str= tmp ? + tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + $$= new Item_string(str ? str->ptr() : "", + str ? str->length() : 0, + Lex->charset); + } | DATE_SYM text_literal { $$ = $2; } | TIME_SYM text_literal { $$ = $2; } | TIMESTAMP text_literal { $$ = $2; }; @@ -5005,30 +7632,46 @@ NUM_literal: NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); } | LONG_NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); } | ULONGLONG_NUM { $$ = new Item_uint($1.str, $1.length); } - | REAL_NUM { $$ = new Item_real($1.str, $1.length); } - | FLOAT_NUM { $$ = new Item_float($1.str, $1.length); } + | DECIMAL_NUM + { + $$= new Item_decimal($1.str, $1.length, YYTHD->charset()); + if (YYTHD->net.report_error) + { + MYSQL_YYABORT; + } + } + | FLOAT_NUM + { + $$ = new Item_float($1.str, $1.length); + if (YYTHD->net.report_error) + { + MYSQL_YYABORT; + } + } ; - + /********************************************************************** -** Createing different items. +** Creating different items. **********************************************************************/ insert_ident: - simple_ident { $$=$1; } + simple_ident_nospvar { $$=$1; } | table_wild { $$=$1; }; table_wild: ident '.' '*' { - $$ = new Item_field(NullS,$1.str,"*"); - Lex->current_select->with_wild++; + SELECT_LEX *sel= Select; + $$ = new Item_field(Lex->current_context(), NullS, $1.str, "*"); + sel->with_wild++; } | ident '.' ident '.' '*' { - $$ = new Item_field((YYTHD->client_capabilities & - CLIENT_NO_SCHEMA ? NullS : $1.str), - $3.str,"*"); - Lex->current_select->with_wild++; + SELECT_LEX *sel= Select; + $$ = new Item_field(Lex->current_context(), (YYTHD->client_capabilities & + CLIENT_NO_SCHEMA ? NullS : $1.str), + $3.str,"*"); + sel->with_wild++; } ; @@ -5038,28 +7681,122 @@ order_ident: simple_ident: ident { + sp_variable_t *spv; + LEX *lex = Lex; + sp_pcontext *spc = lex->spcont; + if (spc && (spv = spc->find_variable(&$1))) + { + /* We're compiling a stored procedure and found a variable */ + if (! lex->parsing_options.allows_variable) + { + my_error(ER_VIEW_SELECT_VARIABLE, MYF(0)); + MYSQL_YYABORT; + } + + Item_splocal *splocal; + splocal= new Item_splocal($1, spv->offset, spv->type, + lex->tok_start_prev - + lex->sphead->m_tmp_query); +#ifndef DBUG_OFF + if (splocal) + splocal->m_sp= lex->sphead; +#endif + $$ = (Item*) splocal; + lex->safe_to_cache_query=0; + } + else + { + SELECT_LEX *sel=Select; + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field(Lex->current_context(), NullS, NullS, $1.str) : + (Item*) new Item_ref(Lex->current_context(), NullS, NullS, $1.str); + } + } + | simple_ident_q { $$= $1; } + ; + +simple_ident_nospvar: + ident + { SELECT_LEX *sel=Select; $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? - (Item*) new Item_field(NullS,NullS,$1.str) : - (Item*) new Item_ref(NullS, NullS, $1.str); + (Item*) new Item_field(Lex->current_context(), NullS, NullS, $1.str) : + (Item*) new Item_ref(Lex->current_context(), NullS, NullS, $1.str); } - | ident '.' ident + | simple_ident_q { $$= $1; } + ; + +simple_ident_q: + ident '.' ident { THD *thd= YYTHD; LEX *lex= thd->lex; - SELECT_LEX *sel= lex->current_select; - if (sel->no_table_names_allowed) - { - my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, - ER(ER_TABLENAME_NOT_ALLOWED_HERE), - MYF(0), $1.str, thd->where); - } - $$= (sel->parsing_place != IN_HAVING || - sel->get_in_sum_expr() > 0) ? - (Item*) new Item_field(NullS,$1.str,$3.str) : - (Item*) new Item_ref(NullS, $1.str, $3.str); - } + + /* + FIXME This will work ok in simple_ident_nospvar case because + we can't meet simple_ident_nospvar in trigger now. But it + should be changed in future. + */ + if (lex->sphead && lex->sphead->m_type == TYPE_ENUM_TRIGGER && + (!my_strcasecmp(system_charset_info, $1.str, "NEW") || + !my_strcasecmp(system_charset_info, $1.str, "OLD"))) + { + Item_trigger_field *trg_fld; + bool new_row= ($1.str[0]=='N' || $1.str[0]=='n'); + + if (lex->trg_chistics.event == TRG_EVENT_INSERT && + !new_row) + { + my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "OLD", "on INSERT"); + MYSQL_YYABORT; + } + + if (lex->trg_chistics.event == TRG_EVENT_DELETE && + new_row) + { + my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "NEW", "on DELETE"); + MYSQL_YYABORT; + } + + DBUG_ASSERT(!new_row || + (lex->trg_chistics.event == TRG_EVENT_INSERT || + lex->trg_chistics.event == TRG_EVENT_UPDATE)); + const bool read_only= + !(new_row && lex->trg_chistics.action_time == TRG_ACTION_BEFORE); + if (!(trg_fld= new Item_trigger_field(Lex->current_context(), + new_row ? + Item_trigger_field::NEW_ROW: + Item_trigger_field::OLD_ROW, + $3.str, + SELECT_ACL, + read_only))) + MYSQL_YYABORT; + + /* + Let us add this item to list of all Item_trigger_field objects + in trigger. + */ + lex->trg_table_fields.link_in_list((byte *)trg_fld, + (byte**)&trg_fld->next_trg_field); + + $$= (Item *)trg_fld; + } + else + { + SELECT_LEX *sel= lex->current_select; + if (sel->no_table_names_allowed) + { + my_error(ER_TABLENAME_NOT_ALLOWED_HERE, + MYF(0), $1.str, thd->where); + } + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field(Lex->current_context(), NullS, $1.str, $3.str) : + (Item*) new Item_ref(Lex->current_context(), NullS, $1.str, $3.str); + } + } | '.' ident '.' ident { THD *thd= YYTHD; @@ -5067,14 +7804,13 @@ simple_ident: SELECT_LEX *sel= lex->current_select; if (sel->no_table_names_allowed) { - my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, - ER(ER_TABLENAME_NOT_ALLOWED_HERE), - MYF(0), $2.str, thd->where); + my_error(ER_TABLENAME_NOT_ALLOWED_HERE, + MYF(0), $2.str, thd->where); } $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? - (Item*) new Item_field(NullS,$2.str,$4.str) : - (Item*) new Item_ref(NullS, $2.str, $4.str); + (Item*) new Item_field(Lex->current_context(), NullS, $2.str, $4.str) : + (Item*) new Item_ref(Lex->current_context(), NullS, $2.str, $4.str); } | ident '.' ident '.' ident { @@ -5083,16 +7819,17 @@ simple_ident: SELECT_LEX *sel= lex->current_select; if (sel->no_table_names_allowed) { - my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, - ER(ER_TABLENAME_NOT_ALLOWED_HERE), - MYF(0), $3.str, thd->where); + my_error(ER_TABLENAME_NOT_ALLOWED_HERE, + MYF(0), $3.str, thd->where); } $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? - (Item*) new Item_field((YYTHD->client_capabilities & + (Item*) new Item_field(Lex->current_context(), + (YYTHD->client_capabilities & CLIENT_NO_SCHEMA ? NullS : $1.str), $3.str, $5.str) : - (Item*) new Item_ref((YYTHD->client_capabilities & + (Item*) new Item_ref(Lex->current_context(), + (YYTHD->client_capabilities & CLIENT_NO_SCHEMA ? NullS : $1.str), $3.str, $5.str); }; @@ -5105,13 +7842,14 @@ field_ident: TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first; if (my_strcasecmp(table_alias_charset, $1.str, table->db)) { - net_printf(YYTHD, ER_WRONG_DB_NAME, $1.str); - YYABORT; + my_error(ER_WRONG_DB_NAME, MYF(0), $1.str); + MYSQL_YYABORT; } - if (my_strcasecmp(table_alias_charset, $3.str, table->real_name)) + if (my_strcasecmp(table_alias_charset, $3.str, + table->table_name)) { - net_printf(YYTHD, ER_WRONG_TABLE_NAME, $3.str); - YYABORT; + my_error(ER_WRONG_TABLE_NAME, MYF(0), $3.str); + MYSQL_YYABORT; } $$=$5; } @@ -5120,8 +7858,8 @@ field_ident: TABLE_LIST *table= (TABLE_LIST*) Select->table_list.first; if (my_strcasecmp(table_alias_charset, $1.str, table->alias)) { - net_printf(YYTHD, ER_WRONG_TABLE_NAME, $1.str); - YYABORT; + my_error(ER_WRONG_TABLE_NAME, MYF(0), $1.str); + MYSQL_YYABORT; } $$=$3; } @@ -5151,9 +7889,9 @@ IDENT_sys: $1.length, &dummy_error); if (wlen < $1.length) { - net_printf(YYTHD, ER_INVALID_CHARACTER_STRING, cs->csname, - $1.str + wlen); - YYABORT; + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), + cs->csname, $1.str + wlen); + MYSQL_YYABORT; } $$= $1; } @@ -5188,6 +7926,18 @@ TEXT_STRING_literal: ; +TEXT_STRING_filesystem: + TEXT_STRING + { + THD *thd= YYTHD; + if (thd->charset_is_character_set_filesystem) + $$= $1; + else + thd->convert_string(&$$, thd->variables.character_set_filesystem, + $1.str, $1.length, thd->charset()); + } + ; + ident: IDENT_sys { $$=$1; } | keyword @@ -5198,8 +7948,18 @@ ident: } ; +label_ident: + IDENT_sys { $$=$1; } + | keyword_sp + { + THD *thd= YYTHD; + $$.str= thd->strmake($1.str, $1.length); + $$.length= $1.length; + } + ; + ident_or_text: - ident { $$=$1;} + ident { $$=$1;} | TEXT_STRING_sys { $$=$1;} | LEX_HOSTNAME { $$=$1;}; @@ -5208,116 +7968,158 @@ user: { THD *thd= YYTHD; if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) - YYABORT; + MYSQL_YYABORT; $$->user = $1; $$->host.str= (char *) "%"; $$->host.length= 1; + + if (check_string_length(&$$->user, + ER(ER_USERNAME), USERNAME_LENGTH)) + MYSQL_YYABORT; } | ident_or_text '@' ident_or_text { THD *thd= YYTHD; if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) - YYABORT; + MYSQL_YYABORT; $$->user = $1; $$->host=$3; + + if (check_string_length(&$$->user, + ER(ER_USERNAME), USERNAME_LENGTH) || + check_string_length(&$$->host, + ER(ER_HOSTNAME), HOSTNAME_LENGTH)) + MYSQL_YYABORT; } | CURRENT_USER optional_braces { - THD *thd= YYTHD; - if (!($$=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) - YYABORT; - $$->user.str= thd->priv_user; - $$->user.length= strlen(thd->priv_user); - if (*thd->priv_host != 0) - { - $$->host.str= thd->priv_host; - $$->host.length= strlen(thd->priv_host); - } - else - { - $$->host.str= (char *) "%"; - $$->host.length= 1; - } + if (!($$=(LEX_USER*) YYTHD->alloc(sizeof(st_lex_user)))) + MYSQL_YYABORT; + /* + empty LEX_USER means current_user and + will be handled in the get_current_user() function + later + */ + bzero($$, sizeof(LEX_USER)); }; -/* Keyword that we allow for identifiers */ - +/* Keyword that we allow for identifiers (except SP labels) */ keyword: + keyword_sp {} + | ASCII_SYM {} + | BACKUP_SYM {} + | BEGIN_SYM {} + | BYTE_SYM {} + | CACHE_SYM {} + | CHARSET {} + | CHECKSUM_SYM {} + | CLOSE_SYM {} + | COMMENT_SYM {} + | COMMIT_SYM {} + | CONTAINS_SYM {} + | DEALLOCATE_SYM {} + | DO_SYM {} + | END {} + | EXECUTE_SYM {} + | FLUSH_SYM {} + | HANDLER_SYM {} + | HELP_SYM {} + | LANGUAGE_SYM {} + | NO_SYM {} + | OPEN_SYM {} + | PREPARE_SYM {} + | REPAIR {} + | RESET_SYM {} + | RESTORE_SYM {} + | ROLLBACK_SYM {} + | SAVEPOINT_SYM {} + | SECURITY_SYM {} + | SIGNED_SYM {} + | SLAVE {} + | START_SYM {} + | STOP_SYM {} + | TRUNCATE_SYM {} + | UNICODE_SYM {} + | XA_SYM {} + | UPGRADE_SYM {} + ; + +/* + * Keywords that we allow for labels in SPs. + * Anything that's the beginning of a statement or characteristics + * must be in keyword above, otherwise we get (harmful) shift/reduce + * conflicts. + */ +keyword_sp: ACTION {} | ADDDATE_SYM {} | AFTER_SYM {} | AGAINST {} | AGGREGATE_SYM {} + | ALGORITHM_SYM {} | ANY_SYM {} - | ASCII_SYM {} | AUTO_INC {} | AVG_ROW_LENGTH {} | AVG_SYM {} - | BACKUP_SYM {} - | BEGIN_SYM {} | BERKELEY_DB_SYM {} | BINLOG_SYM {} | BIT_SYM {} | BOOL_SYM {} | BOOLEAN_SYM {} - | BYTE_SYM {} | BTREE_SYM {} - | CACHE_SYM {} + | CASCADED {} + | CHAIN_SYM {} | CHANGED {} - | CHARSET {} - | CHECKSUM_SYM {} | CIPHER_SYM {} | CLIENT_SYM {} - | CLOSE_SYM {} + | CODE_SYM {} | COLLATION_SYM {} - | COMMENT_SYM {} + | COLUMNS {} | COMMITTED_SYM {} - | COMMIT_SYM {} + | COMPACT_SYM {} | COMPRESSED_SYM {} | CONCURRENT {} + | CONNECTION_SYM {} | CONSISTENT_SYM {} | CUBE_SYM {} | DATA_SYM {} | DATETIME {} | DATE_SYM {} | DAY_SYM {} - | DEALLOCATE_SYM {} + | DEFINER_SYM {} | DELAY_KEY_WRITE_SYM {} | DES_KEY_FILE {} | DIRECTORY_SYM {} | DISCARD {} - | DO_SYM {} | DUMPFILE {} | DUPLICATE_SYM {} | DYNAMIC_SYM {} - | END {} | ENUM {} | ENGINE_SYM {} | ENGINES_SYM {} | ERRORS {} | ESCAPE_SYM {} | EVENTS_SYM {} - | EXECUTE_SYM {} | EXPANSION_SYM {} | EXTENDED_SYM {} | FAST_SYM {} + | FOUND_SYM {} | DISABLE_SYM {} | ENABLE_SYM {} | FULL {} | FILE_SYM {} | FIRST_SYM {} | FIXED_SYM {} - | FLUSH_SYM {} + | FRAC_SECOND_SYM {} | GEOMETRY_SYM {} | GEOMETRYCOLLECTION {} | GET_FORMAT {} | GRANTS {} | GLOBAL_SYM {} - | HANDLER_SYM {} | HASH_SYM {} - | HELP_SYM {} | HOSTS_SYM {} | HOUR_SYM {} | IDENTIFIED_SYM {} + | INVOKER_SYM {} | IMPORT {} | INDEXES {} | ISOLATION {} @@ -5351,8 +8153,11 @@ keyword: | MAX_CONNECTIONS_PER_HOUR {} | MAX_QUERIES_PER_HOUR {} | MAX_UPDATES_PER_HOUR {} + | MAX_USER_CONNECTIONS_SYM {} | MEDIUM_SYM {} + | MERGE_SYM {} | MICROSECOND_SYM {} + | MIGRATE_SYM {} | MINUTE_SYM {} | MIN_ROWS {} | MODIFY_SYM {} @@ -5361,28 +8166,31 @@ keyword: | MULTILINESTRING {} | MULTIPOINT {} | MULTIPOLYGON {} + | MUTEX_SYM {} + | NAME_SYM {} | NAMES_SYM {} | NATIONAL_SYM {} | NCHAR_SYM {} | NDBCLUSTER_SYM {} | NEXT_SYM {} | NEW_SYM {} - | NO_SYM {} | NONE_SYM {} | NVARCHAR_SYM {} | OFFSET_SYM {} | OLD_PASSWORD {} | ONE_SHOT_SYM {} - | OPEN_SYM {} + | ONE_SYM {} | PACK_KEYS_SYM {} | PARTIAL {} | PASSWORD {} + | PHASE_SYM {} | POINT_SYM {} | POLYGON {} - | PREPARE_SYM {} | PREV_SYM {} + | PRIVILEGES {} | PROCESS {} | PROCESSLIST_SYM {} + | QUARTER_SYM {} | QUERY_SYM {} | QUICK {} | RAID_0_SYM {} @@ -5390,64 +8198,68 @@ keyword: | RAID_CHUNKSIZE {} | RAID_STRIPED_SYM {} | RAID_TYPE {} + | RECOVER_SYM {} + | REDUNDANT_SYM {} | RELAY_LOG_FILE_SYM {} | RELAY_LOG_POS_SYM {} | RELOAD {} - | REPAIR {} | REPEATABLE_SYM {} | REPLICATION {} - | RESET_SYM {} | RESOURCES {} - | RESTORE_SYM {} - | ROLLBACK_SYM {} + | RESUME_SYM {} + | RETURNS_SYM {} | ROLLUP_SYM {} + | ROUTINE_SYM {} | ROWS_SYM {} | ROW_FORMAT_SYM {} | ROW_SYM {} | RTREE_SYM {} - | SAVEPOINT_SYM {} | SECOND_SYM {} | SERIAL_SYM {} | SERIALIZABLE_SYM {} | SESSION_SYM {} - | SIGNED_SYM {} | SIMPLE_SYM {} | SHARE_SYM {} | SHUTDOWN {} - | SLAVE {} | SNAPSHOT_SYM {} | SOUNDS_SYM {} | SQL_CACHE_SYM {} | SQL_BUFFER_RESULT {} | SQL_NO_CACHE_SYM {} | SQL_THREAD {} - | START_SYM {} | STATUS_SYM {} - | STOP_SYM {} | STORAGE_SYM {} | STRING_SYM {} | SUBDATE_SYM {} | SUBJECT_SYM {} | SUPER_SYM {} + | SUSPEND_SYM {} + | TABLES {} | TABLESPACE {} | TEMPORARY {} + | TEMPTABLE_SYM {} | TEXT_SYM {} | TRANSACTION_SYM {} - | TRUNCATE_SYM {} + | TRIGGERS_SYM {} | TIMESTAMP {} + | TIMESTAMP_ADD {} + | TIMESTAMP_DIFF {} | TIME_SYM {} - | TYPE_SYM {} | TYPES_SYM {} + | TYPE_SYM {} | UDF_RETURNS_SYM {} - | UDF_SYM {} + | FUNCTION_SYM {} | UNCOMMITTED_SYM {} - | UNICODE_SYM {} + | UNDEFINED_SYM {} + | UNKNOWN_SYM {} | UNTIL_SYM {} | USER {} | USE_FRM {} | VARIABLES {} + | VIEW_SYM {} | VALUE_SYM {} | WARNINGS {} + | WEEK_SYM {} | WORK_SYM {} | X509_SYM {} | YEAR_SYM {} @@ -5460,7 +8272,7 @@ set: { LEX *lex=Lex; lex->sql_command= SQLCOM_SET_OPTION; - mysql_init_select(lex); + mysql_init_select(lex); lex->option_type=OPT_SESSION; lex->var_list.empty(); lex->one_shot_set= 0; @@ -5474,25 +8286,92 @@ opt_option: | OPTION {}; option_value_list: - option_value_ext - | option_value_list ',' option_value_ext; + option_type_value + | option_value_list ',' option_type_value; -option_value_ext: - option_type_ext sys_option_value {} - | option_type option_value {} - ; +option_type_value: + { + if (Lex->sphead) + { + /* + If we are in SP we want have own LEX for each assignment. + This is mostly because it is hard for several sp_instr_set + and sp_instr_set_trigger instructions share one LEX. + (Well, it is theoretically possible but adds some extra + overhead on preparation for execution stage and IMO less + robust). + + QQ: May be we should simply prohibit group assignments in SP? + */ + LEX *lex; + Lex->sphead->reset_lex(YYTHD); + lex= Lex; -option_type_ext: - option_type {} - | GLOBAL_SYM { Lex->option_type= OPT_GLOBAL; } - | LOCAL_SYM { Lex->option_type= OPT_SESSION; } - | SESSION_SYM { Lex->option_type= OPT_SESSION; } - ; + /* Set new LEX as if we at start of set rule. */ + lex->sql_command= SQLCOM_SET_OPTION; + mysql_init_select(lex); + lex->option_type=OPT_SESSION; + lex->var_list.empty(); + lex->one_shot_set= 0; + lex->sphead->m_tmp_query= lex->tok_start; + } + } + ext_option_value + { + LEX *lex= Lex; + + if (lex->sphead) + { + sp_head *sp= lex->sphead; + + if (!lex->var_list.is_empty()) + { + /* + We have assignment to user or system variable or + option setting, so we should construct sp_instr_stmt + for it. + */ + LEX_STRING qbuff; + sp_instr_stmt *i; + + if (!(i= new sp_instr_stmt(sp->instructions(), lex->spcont, + lex))) + MYSQL_YYABORT; + + /* + Extract the query statement from the tokenizer. The + end is either lex->ptr, if there was no lookahead, + lex->tok_end otherwise. + */ + if (yychar == YYEMPTY) + qbuff.length= lex->ptr - sp->m_tmp_query; + else + qbuff.length= lex->tok_end - sp->m_tmp_query; + + if (!(qbuff.str= alloc_root(YYTHD->mem_root, qbuff.length + 5))) + MYSQL_YYABORT; + + strmake(strmake(qbuff.str, "SET ", 4), (char *)sp->m_tmp_query, + qbuff.length); + qbuff.length+= 4; + i->m_query= qbuff; + sp->add_instr(i); + } + lex->sphead->restore_lex(YYTHD); + } + }; option_type: - /* empty */ {} - | ONE_SHOT_SYM { Lex->option_type= OPT_SESSION; Lex->one_shot_set= 1; } - ; + option_type2 {} + | GLOBAL_SYM { $$=OPT_GLOBAL; } + | LOCAL_SYM { $$=OPT_SESSION; } + | SESSION_SYM { $$=OPT_SESSION; } + ; + +option_type2: + /* empty */ { $$= OPT_DEFAULT; } + | ONE_SHOT_SYM { Lex->one_shot_set= 1; $$= OPT_SESSION; } + ; opt_var_type: /* empty */ { $$=OPT_SESSION; } @@ -5508,37 +8387,113 @@ opt_var_ident_type: | SESSION_SYM '.' { $$=OPT_SESSION; } ; +ext_option_value: + sys_option_value + | option_type2 option_value; + sys_option_value: - internal_variable_name equal set_expr_or_default + option_type internal_variable_name equal set_expr_or_default { LEX *lex=Lex; - lex->var_list.push_back(new set_var(lex->option_type, $1.var, - &$1.base_name, $3)); + + if ($2.var == &trg_new_row_fake_var) + { + /* We are in trigger and assigning value to field of new row */ + Item *it; + Item_trigger_field *trg_fld; + sp_instr_set_trigger_field *sp_fld; + LINT_INIT(sp_fld); + if ($1) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + if ($4) + it= $4; + else + { + /* QQ: Shouldn't this be field's default value ? */ + it= new Item_null(); + } + + DBUG_ASSERT(lex->trg_chistics.action_time == TRG_ACTION_BEFORE && + (lex->trg_chistics.event == TRG_EVENT_INSERT || + lex->trg_chistics.event == TRG_EVENT_UPDATE)); + if (!(trg_fld= new Item_trigger_field(Lex->current_context(), + Item_trigger_field::NEW_ROW, + $2.base_name.str, + UPDATE_ACL, FALSE)) || + !(sp_fld= new sp_instr_set_trigger_field(lex->sphead-> + instructions(), + lex->spcont, + trg_fld, + it, lex))) + MYSQL_YYABORT; + + /* + Let us add this item to list of all Item_trigger_field + objects in trigger. + */ + lex->trg_table_fields.link_in_list((byte *)trg_fld, + (byte **)&trg_fld->next_trg_field); + + lex->sphead->add_instr(sp_fld); + } + else if ($2.var) + { /* System variable */ + if ($1) + lex->option_type= $1; + lex->var_list.push_back(new set_var(lex->option_type, $2.var, + &$2.base_name, $4)); + } + else + { + /* An SP local variable */ + sp_pcontext *ctx= lex->spcont; + sp_variable_t *spv; + sp_instr_set *sp_set; + Item *it; + if ($1) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + + spv= ctx->find_variable(&$2.base_name); + + if ($4) + it= $4; + else if (spv->dflt) + it= spv->dflt; + else + it= new Item_null(); + sp_set= new sp_instr_set(lex->sphead->instructions(), ctx, + spv->offset, it, spv->type, lex, TRUE); + lex->sphead->add_instr(sp_set); + } } - | TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types - { - LEX *lex=Lex; - LEX_STRING tmp; - tmp.str=0; - tmp.length=0; - lex->var_list.push_back(new set_var(lex->option_type, + | option_type TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types + { + LEX *lex=Lex; + if ($1) + lex->option_type= $1; + lex->var_list.push_back(new set_var(lex->option_type, find_sys_var("tx_isolation"), - &tmp, - new Item_int((int32) $4))); - } + &null_lex_str, + new Item_int((int32) $5))); + } ; option_value: '@' ident_or_text equal expr { - Lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4))); + Lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4))); } | '@' '@' opt_var_ident_type internal_variable_name equal set_expr_or_default - { - LEX *lex=Lex; - lex->var_list.push_back(new set_var((enum_var_type) $3, $4.var, - &$4.base_name, $6)); - } + { + LEX *lex=Lex; + lex->var_list.push_back(new set_var($3, $4.var, &$4.base_name, $6)); + } | charset old_or_new_charset_name_or_default { THD *thd= YYTHD; @@ -5546,16 +8501,31 @@ option_value: $2= $2 ? $2: global_system_variables.character_set_client; lex->var_list.push_back(new set_var_collation_client($2,thd->variables.collation_database,$2)); } + | NAMES_SYM equal expr + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + LEX_STRING names; + + names.str= (char *)"names"; + names.length= 5; + if (spc && spc->find_variable(&names)) + my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str); + else + my_parse_error(ER(ER_SYNTAX_ERROR)); + + MYSQL_YYABORT; + } | NAMES_SYM charset_name_or_default opt_collate { - THD *thd= YYTHD; LEX *lex= Lex; $2= $2 ? $2 : global_system_variables.character_set_client; $3= $3 ? $3 : $2; if (!my_charset_same($2,$3)) { - net_printf(thd,ER_COLLATION_CHARSET_MISMATCH,$3->name,$2->csname); - YYABORT; + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $3->name, $2->csname); + MYSQL_YYABORT; } lex->var_list.push_back(new set_var_collation_client($3,$3,$3)); } @@ -5563,10 +8533,21 @@ option_value: { THD *thd=YYTHD; LEX_USER *user; + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + LEX_STRING pw; + + pw.str= (char *)"password"; + pw.length= 8; + if (spc && spc->find_variable(&pw)) + { + my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str); + MYSQL_YYABORT; + } if (!(user=(LEX_USER*) thd->alloc(sizeof(LEX_USER)))) - YYABORT; - user->host.str=0; - user->user.str=thd->priv_user; + MYSQL_YYABORT; + user->host=null_lex_str; + user->user.str=thd->security_ctx->priv_user; thd->lex->var_list.push_back(new set_var_password(user, $3)); } | PASSWORD FOR_SYM user equal text_or_password @@ -5578,41 +8559,92 @@ option_value: internal_variable_name: ident { - sys_var *tmp=find_sys_var($1.str, $1.length); - if (!tmp) - YYABORT; - $$.var= tmp; - $$.base_name.str=0; - $$.base_name.length=0; - /* - If this is time_zone variable we should open time zone - describing tables - */ - if (tmp == &sys_time_zone) - Lex->time_zone_tables_used= &fake_time_zone_tables_list; + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + sp_variable_t *spv; + + /* We have to lookup here since local vars can shadow sysvars */ + if (!spc || !(spv = spc->find_variable(&$1))) + { + /* Not an SP local variable */ + sys_var *tmp=find_sys_var($1.str, $1.length); + if (!tmp) + MYSQL_YYABORT; + $$.var= tmp; + $$.base_name= null_lex_str; + /* + If this is time_zone variable we should open time zone + describing tables + */ + if (tmp == &sys_time_zone && + lex->add_time_zone_tables_to_query_tables(YYTHD)) + MYSQL_YYABORT; + else if (spc && tmp == &sys_autocommit) + { + /* + We don't allow setting AUTOCOMMIT from a stored function + or trigger. + */ + lex->sphead->m_flags|= sp_head::HAS_SET_AUTOCOMMIT_STMT; + } + } + else + { + /* An SP local variable */ + $$.var= NULL; + $$.base_name= $1; + } } | ident '.' ident { + LEX *lex= Lex; if (check_reserved_words(&$1)) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + if (lex->sphead && lex->sphead->m_type == TYPE_ENUM_TRIGGER && + (!my_strcasecmp(system_charset_info, $1.str, "NEW") || + !my_strcasecmp(system_charset_info, $1.str, "OLD"))) + { + if ($1.str[0]=='O' || $1.str[0]=='o') + { + my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "OLD", ""); + MYSQL_YYABORT; + } + if (lex->trg_chistics.event == TRG_EVENT_DELETE) + { + my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), + "NEW", "on DELETE"); + MYSQL_YYABORT; + } + if (lex->trg_chistics.action_time == TRG_ACTION_AFTER) + { + my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "NEW", "after "); + MYSQL_YYABORT; + } + /* This special combination will denote field of NEW row */ + $$.var= &trg_new_row_fake_var; + $$.base_name= $3; + } + else + { + sys_var *tmp=find_sys_var($3.str, $3.length); + if (!tmp) + MYSQL_YYABORT; + if (!tmp->is_struct()) + my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str); + $$.var= tmp; + $$.base_name= $1; } - sys_var *tmp=find_sys_var($3.str, $3.length); - if (!tmp) - YYABORT; - if (!tmp->is_struct()) - net_printf(YYTHD, ER_VARIABLE_IS_NOT_STRUCT, $3.str); - $$.var= tmp; - $$.base_name= $1; } | DEFAULT '.' ident { sys_var *tmp=find_sys_var($3.str, $3.length); if (!tmp) - YYABORT; + MYSQL_YYABORT; if (!tmp->is_struct()) - net_printf(YYTHD, ER_VARIABLE_IS_NOT_STRUCT, $3.str); + my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str); $$.var= tmp; $$.base_name.str= (char*) "default"; $$.base_name.length= 7; @@ -5657,7 +8689,14 @@ set_expr_or_default: lock: LOCK_SYM table_or_tables { - Lex->sql_command=SQLCOM_LOCK_TABLES; + LEX *lex= Lex; + + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "LOCK"); + MYSQL_YYABORT; + } + lex->sql_command= SQLCOM_LOCK_TABLES; } table_lock_list {} @@ -5675,7 +8714,7 @@ table_lock: table_ident opt_table_alias lock_option { if (!Select->add_table_to_list(YYTHD, $1, $2, 0, (thr_lock_type) $3)) - YYABORT; + MYSQL_YYABORT; } ; @@ -5687,7 +8726,19 @@ lock_option: ; unlock: - UNLOCK_SYM table_or_tables { Lex->sql_command=SQLCOM_UNLOCK_TABLES; } + UNLOCK_SYM + { + LEX *lex= Lex; + + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "UNLOCK"); + MYSQL_YYABORT; + } + lex->sql_command= SQLCOM_UNLOCK_TABLES; + } + table_or_tables + {} ; @@ -5699,33 +8750,48 @@ handler: HANDLER_SYM table_ident OPEN_SYM opt_table_alias { LEX *lex= Lex; + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER"); + MYSQL_YYABORT; + } lex->sql_command = SQLCOM_HA_OPEN; if (!lex->current_select->add_table_to_list(lex->thd, $2, $4, 0)) - YYABORT; + MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb CLOSE_SYM { LEX *lex= Lex; + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER"); + MYSQL_YYABORT; + } lex->sql_command = SQLCOM_HA_CLOSE; if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0)) - YYABORT; + MYSQL_YYABORT; } | HANDLER_SYM table_ident_nodb READ_SYM { LEX *lex=Lex; + if (lex->sphead) + { + my_error(ER_SP_BADSTATEMENT, MYF(0), "HANDLER"); + MYSQL_YYABORT; + } lex->sql_command = SQLCOM_HA_READ; lex->ha_rkey_mode= HA_READ_KEY_EXACT; /* Avoid purify warnings */ - lex->current_select->select_limit= 1; - lex->current_select->offset_limit= 0L; + lex->current_select->select_limit= new Item_int((int32) 1); + lex->current_select->offset_limit= 0; if (!lex->current_select->add_table_to_list(lex->thd, $2, 0, 0)) - YYABORT; + MYSQL_YYABORT; } handler_read_or_scan where_clause opt_limit_clause {} ; handler_read_or_scan: - handler_scan_function { Lex->backup_dir= 0; } - | ident handler_rkey_function { Lex->backup_dir= $1.str; } + handler_scan_function { Lex->ident= null_lex_str; } + | ident handler_rkey_function { Lex->ident= $1; } ; handler_scan_function: @@ -5744,7 +8810,7 @@ handler_rkey_function: lex->ha_read_mode = RKEY; lex->ha_rkey_mode=$1; if (!(lex->insert_list = new List_item)) - YYABORT; + MYSQL_YYABORT; } '(' values ')' { } ; @@ -5759,53 +8825,101 @@ handler_rkey_mode: /* GRANT / REVOKE */ revoke: - REVOKE - { - LEX *lex=Lex; - lex->sql_command = SQLCOM_REVOKE; - lex->users_list.empty(); - lex->columns.empty(); - lex->grant= lex->grant_tot_col=0; - lex->select_lex.db=0; - lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; - lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; - bzero((char*) &lex->mqh, sizeof(lex->mqh)); - } - revoke_command + REVOKE clear_privileges revoke_command {} ; revoke_command: - grant_privileges ON opt_table FROM user_list - {} + grant_privileges ON opt_table grant_ident FROM grant_list + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_REVOKE; + lex->type= 0; + } + | + grant_privileges ON FUNCTION_SYM grant_ident FROM grant_list + { + LEX *lex= Lex; + if (lex->columns.elements) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + lex->sql_command= SQLCOM_REVOKE; + lex->type= TYPE_ENUM_FUNCTION; + + } + | + grant_privileges ON PROCEDURE grant_ident FROM grant_list + { + LEX *lex= Lex; + if (lex->columns.elements) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + lex->sql_command= SQLCOM_REVOKE; + lex->type= TYPE_ENUM_PROCEDURE; + } | - ALL opt_privileges ',' GRANT OPTION FROM user_list + ALL opt_privileges ',' GRANT OPTION FROM grant_list { Lex->sql_command = SQLCOM_REVOKE_ALL; } ; grant: - GRANT + GRANT clear_privileges grant_command + {} + ; + +grant_command: + grant_privileges ON opt_table grant_ident TO_SYM grant_list + require_clause grant_options { - LEX *lex=Lex; - lex->users_list.empty(); - lex->columns.empty(); - lex->sql_command = SQLCOM_GRANT; - lex->grant= lex->grant_tot_col= 0; - lex->select_lex.db= 0; - lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; - lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; - bzero((char *)&(lex->mqh),sizeof(lex->mqh)); - } - grant_privileges ON opt_table TO_SYM user_list + LEX *lex= Lex; + lex->sql_command= SQLCOM_GRANT; + lex->type= 0; + } + | + grant_privileges ON FUNCTION_SYM grant_ident TO_SYM grant_list require_clause grant_options - {} - ; + { + LEX *lex= Lex; + if (lex->columns.elements) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + lex->sql_command= SQLCOM_GRANT; + lex->type= TYPE_ENUM_FUNCTION; + } + | + grant_privileges ON PROCEDURE grant_ident TO_SYM grant_list + require_clause grant_options + { + LEX *lex= Lex; + if (lex->columns.elements) + { + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; + } + lex->sql_command= SQLCOM_GRANT; + lex->type= TYPE_ENUM_PROCEDURE; + } + ; +opt_table: + /* Empty */ + | TABLE_SYM ; + grant_privileges: - grant_privilege_list {} - | ALL opt_privileges { Lex->grant = GLOBAL_ACLS;} + object_privilege_list { } + | ALL opt_privileges + { + Lex->all_privileges= 1; + Lex->grant= GLOBAL_ACLS; + } ; opt_privileges: @@ -5813,11 +8927,11 @@ opt_privileges: | PRIVILEGES ; -grant_privilege_list: - grant_privilege - | grant_privilege_list ',' grant_privilege; +object_privilege_list: + object_privilege + | object_privilege_list ',' object_privilege; -grant_privilege: +object_privilege: SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list {} | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list {} | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list {} @@ -5838,8 +8952,13 @@ grant_privilege: | SUPER_SYM { Lex->grant |= SUPER_ACL;} | CREATE TEMPORARY TABLES { Lex->grant |= CREATE_TMP_ACL;} | LOCK_SYM TABLES { Lex->grant |= LOCK_TABLES_ACL; } - | REPLICATION SLAVE { Lex->grant |= REPL_SLAVE_ACL;} - | REPLICATION CLIENT_SYM { Lex->grant |= REPL_CLIENT_ACL;} + | REPLICATION SLAVE { Lex->grant |= REPL_SLAVE_ACL; } + | REPLICATION CLIENT_SYM { Lex->grant |= REPL_CLIENT_ACL; } + | CREATE VIEW_SYM { Lex->grant |= CREATE_VIEW_ACL; } + | SHOW VIEW_SYM { Lex->grant |= SHOW_VIEW_ACL; } + | CREATE ROUTINE_SYM { Lex->grant |= CREATE_PROC_ACL; } + | ALTER ROUTINE_SYM { Lex->grant |= ALTER_PROC_ACL; } + | CREATE USER { Lex->grant |= CREATE_USER_ACL; } ; @@ -5859,8 +8978,8 @@ require_list_element: LEX *lex=Lex; if (lex->x509_subject) { - net_printf(lex->thd,ER_DUP_ARGUMENT, "SUBJECT"); - YYABORT; + my_error(ER_DUP_ARGUMENT, MYF(0), "SUBJECT"); + MYSQL_YYABORT; } lex->x509_subject=$2.str; } @@ -5869,8 +8988,8 @@ require_list_element: LEX *lex=Lex; if (lex->x509_issuer) { - net_printf(lex->thd,ER_DUP_ARGUMENT, "ISSUER"); - YYABORT; + my_error(ER_DUP_ARGUMENT, MYF(0), "ISSUER"); + MYSQL_YYABORT; } lex->x509_issuer=$2.str; } @@ -5879,24 +8998,27 @@ require_list_element: LEX *lex=Lex; if (lex->ssl_cipher) { - net_printf(lex->thd,ER_DUP_ARGUMENT, "CIPHER"); - YYABORT; + my_error(ER_DUP_ARGUMENT, MYF(0), "CIPHER"); + MYSQL_YYABORT; } lex->ssl_cipher=$2.str; } ; -opt_table: +grant_ident: '*' { LEX *lex= Lex; - lex->current_select->db= lex->thd->db; + THD *thd= lex->thd; + if (thd->copy_db_to(&lex->current_select->db, NULL)) + MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); - YYABORT; + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, + ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); + MYSQL_YYABORT; } } | ident '.' '*' @@ -5907,8 +9029,9 @@ opt_table: lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); - YYABORT; + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, + ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); + MYSQL_YYABORT; } } | '*' '.' '*' @@ -5919,15 +9042,16 @@ opt_table: lex->grant= GLOBAL_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); - YYABORT; + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, + ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); + MYSQL_YYABORT; } } | table_ident { LEX *lex=Lex; if (!lex->current_select->add_table_to_list(lex->thd, $1,NULL,0)) - YYABORT; + MYSQL_YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = TABLE_ACLS & ~GRANT_ACL; } @@ -5935,11 +9059,21 @@ opt_table: user_list: - grant_user { if (Lex->users_list.push_back($1)) YYABORT;} - | user_list ',' grant_user + user { if (Lex->users_list.push_back($1)) MYSQL_YYABORT;} + | user_list ',' user + { + if (Lex->users_list.push_back($3)) + MYSQL_YYABORT; + } + ; + + +grant_list: + grant_user { if (Lex->users_list.push_back($1)) MYSQL_YYABORT;} + | grant_list ',' grant_user { if (Lex->users_list.push_back($3)) - YYABORT; + MYSQL_YYABORT; } ; @@ -5971,9 +9105,9 @@ grant_user: } } | user IDENTIFIED_SYM BY PASSWORD TEXT_STRING - { $$=$1; $1->password=$5 ; } + { $$= $1; $1->password= $5; } | user - { $$=$1; $1->password.str=NullS; } + { $$= $1; $1->password= null_lex_str; } ; @@ -6041,52 +9175,109 @@ grant_option_list: grant_option: GRANT OPTION { Lex->grant |= GRANT_ACL;} - | MAX_QUERIES_PER_HOUR ULONG_NUM + | MAX_QUERIES_PER_HOUR ulong_num { - Lex->mqh.questions=$2; - Lex->mqh.bits |= 1; + LEX *lex=Lex; + lex->mqh.questions=$2; + lex->mqh.specified_limits|= USER_RESOURCES::QUERIES_PER_HOUR; } - | MAX_UPDATES_PER_HOUR ULONG_NUM + | MAX_UPDATES_PER_HOUR ulong_num { - Lex->mqh.updates=$2; - Lex->mqh.bits |= 2; + LEX *lex=Lex; + lex->mqh.updates=$2; + lex->mqh.specified_limits|= USER_RESOURCES::UPDATES_PER_HOUR; } - | MAX_CONNECTIONS_PER_HOUR ULONG_NUM + | MAX_CONNECTIONS_PER_HOUR ulong_num { - Lex->mqh.connections=$2; - Lex->mqh.bits |= 4; + LEX *lex=Lex; + lex->mqh.conn_per_hour= $2; + lex->mqh.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR; + } + | MAX_USER_CONNECTIONS_SYM ulong_num + { + LEX *lex=Lex; + lex->mqh.user_conn= $2; + lex->mqh.specified_limits|= USER_RESOURCES::USER_CONNECTIONS; } ; begin: - BEGIN_SYM { Lex->sql_command = SQLCOM_BEGIN; Lex->start_transaction_opt= 0;} opt_work {} + BEGIN_SYM + { + LEX *lex=Lex; + lex->sql_command = SQLCOM_BEGIN; + lex->start_transaction_opt= 0; + } + opt_work {} ; opt_work: /* empty */ {} - | WORK_SYM {;} + | WORK_SYM {} ; +opt_chain: + /* empty */ { $$= (YYTHD->variables.completion_type == 1); } + | AND_SYM NO_SYM CHAIN_SYM { $$=0; } + | AND_SYM CHAIN_SYM { $$=1; } + ; + +opt_release: + /* empty */ { $$= (YYTHD->variables.completion_type == 2); } + | RELEASE_SYM { $$=1; } + | NO_SYM RELEASE_SYM { $$=0; } + ; + +opt_savepoint: + /* empty */ {} + | SAVEPOINT_SYM {} + ; + commit: - COMMIT_SYM { Lex->sql_command = SQLCOM_COMMIT;}; + COMMIT_SYM opt_work opt_chain opt_release + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_COMMIT; + lex->tx_chain= $3; + lex->tx_release= $4; + } + ; rollback: - ROLLBACK_SYM + ROLLBACK_SYM opt_work opt_chain opt_release { - Lex->sql_command = SQLCOM_ROLLBACK; + LEX *lex=Lex; + lex->sql_command= SQLCOM_ROLLBACK; + lex->tx_chain= $3; + lex->tx_release= $4; } - | ROLLBACK_SYM TO_SYM SAVEPOINT_SYM ident + | ROLLBACK_SYM opt_work + TO_SYM opt_savepoint ident { - Lex->sql_command = SQLCOM_ROLLBACK_TO_SAVEPOINT; - Lex->savepoint_name = $4.str; - }; + LEX *lex=Lex; + lex->sql_command= SQLCOM_ROLLBACK_TO_SAVEPOINT; + lex->ident= $5; + } + ; + savepoint: SAVEPOINT_SYM ident { - Lex->sql_command = SQLCOM_SAVEPOINT; - Lex->savepoint_name = $2.str; - }; + LEX *lex=Lex; + lex->sql_command= SQLCOM_SAVEPOINT; + lex->ident= $2; + } + ; +release: + RELEASE_SYM SAVEPOINT_SYM ident + { + LEX *lex=Lex; + lex->sql_command= SQLCOM_RELEASE_SAVEPOINT; + lex->ident= $3; + } + ; + /* UNIONS : glue selects together */ @@ -6101,36 +9292,44 @@ union_list: UNION_SYM union_option { LEX *lex=Lex; - if (lex->exchange) + if (lex->result) { /* Only the last SELECT can have INTO...... */ - net_printf(lex->thd, ER_WRONG_USAGE, "UNION", "INTO"); - YYABORT; + my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO"); + MYSQL_YYABORT; } if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } + /* This counter shouldn't be incremented for UNION parts */ + Lex->nest_level--; if (mysql_new_select(lex, 0)) - YYABORT; + MYSQL_YYABORT; mysql_init_select(lex); lex->current_select->linkage=UNION_TYPE; if ($2) /* UNION DISTINCT - remember position */ lex->current_select->master_unit()->union_distinct= lex->current_select; } - select_init {} + select_init + { + /* + Remove from the name resolution context stack the context of the + last select in the union. + */ + Lex->pop_context(); + } ; union_opt: - union_list {} - | optional_order_or_limit {} + /* Empty */ { $$= 0; } + | union_list { $$= 1; } + | union_order_or_limit { $$= 1; } ; -optional_order_or_limit: - /* Empty */ {} - | +union_order_or_limit: { THD *thd= YYTHD; LEX *lex= thd->lex; @@ -6165,65 +9364,475 @@ union_option: | ALL { $$=0; } ; -singlerow_subselect: - subselect_start singlerow_subselect_init - subselect_end - { - $$= $2; - }; - -singlerow_subselect_init: - select_init2 - { - $$= new Item_singlerow_subselect(Lex->current_select-> - master_unit()->first_select()); - }; - -exists_subselect: - subselect_start exists_subselect_init - subselect_end - { - $$= $2; - }; - -exists_subselect_init: - select_init2 - { - $$= new Item_exists_subselect(Lex->current_select->master_unit()-> - first_select()); - }; - -in_subselect: - subselect_start in_subselect_init - subselect_end - { - $$= $2; - }; +subselect: + SELECT_SYM subselect_start subselect_init subselect_end + { + $$= $3; + } + | '(' subselect_start subselect ')' + { + THD *thd= YYTHD; + /* + note that a local variable can't be used for + $3 as it's used in local variable construction + and some compilers can't guarnatee the order + in which the local variables are initialized. + */ + List_iterator<Item> it($3->item_list); + Item *item; + /* + we must fill the items list for the "derived table". + */ + while ((item= it++)) + add_item_to_list(thd, item); + } + union_clause subselect_end { $$= $3; }; -in_subselect_init: +subselect_init: select_init2 { $$= Lex->current_select->master_unit()->first_select(); }; subselect_start: - '(' SELECT_SYM { LEX *lex=Lex; if (lex->sql_command == (int)SQLCOM_HA_READ || lex->sql_command == (int)SQLCOM_KILL) { - yyerror(ER(ER_SYNTAX_ERROR)); - YYABORT; + my_parse_error(ER(ER_SYNTAX_ERROR)); + MYSQL_YYABORT; } + /* + we are making a "derived table" for the parenthesis + as we need to have a lex level to fit the union + after the parenthesis, e.g. + (SELECT .. ) UNION ... becomes + SELECT * FROM ((SELECT ...) UNION ...) + */ if (mysql_new_select(Lex, 1)) - YYABORT; + MYSQL_YYABORT; }; subselect_end: - ')' { LEX *lex=Lex; + lex->pop_context(); + SELECT_LEX *child= lex->current_select; lex->current_select = lex->current_select->return_after_parsing(); + lex->nest_level--; + lex->current_select->n_child_sum_items += child->n_sum_items; + /* + A subselect can add fields to an outer select. Reserve space for + them. + */ + lex->current_select->select_n_where_fields+= + child->select_n_where_fields; }; +/************************************************************************** + + CREATE VIEW | TRIGGER | PROCEDURE statements. + +**************************************************************************/ + +view_or_trigger_or_sp: + definer view_or_trigger_or_sp_tail + {} + | view_replace_or_algorithm definer view_tail + {} + ; + +view_or_trigger_or_sp_tail: + view_tail + {} + | trigger_tail + {} + | sp_tail + {} + ; + +/************************************************************************** + + DEFINER clause support. + +**************************************************************************/ + +definer: + /* empty */ + { + /* + We have to distinguish missing DEFINER-clause from case when + CURRENT_USER specified as definer explicitly in order to properly + handle CREATE TRIGGER statements which come to replication thread + from older master servers (i.e. to create non-suid trigger in this + case). + */ + YYTHD->lex->definer= 0; + } + | DEFINER_SYM EQ user + { + YYTHD->lex->definer= get_current_user(YYTHD, $3); + } + ; + +/************************************************************************** + + CREATE VIEW statement parts. + +**************************************************************************/ + +view_replace_or_algorithm: + view_replace + {} + | view_replace view_algorithm + {} + | view_algorithm + {} + ; + +view_replace: + OR_SYM REPLACE + { Lex->create_view_mode= VIEW_CREATE_OR_REPLACE; } + ; + +view_algorithm: + ALGORITHM_SYM EQ UNDEFINED_SYM + { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; } + | ALGORITHM_SYM EQ MERGE_SYM + { Lex->create_view_algorithm= VIEW_ALGORITHM_MERGE; } + | ALGORITHM_SYM EQ TEMPTABLE_SYM + { Lex->create_view_algorithm= VIEW_ALGORITHM_TMPTABLE; } + ; + +view_algorithm_opt: + /* empty */ + { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; } + | view_algorithm + {} + ; + +view_suid: + /* empty */ + { Lex->create_view_suid= VIEW_SUID_DEFAULT; } + | SQL_SYM SECURITY_SYM DEFINER_SYM + { Lex->create_view_suid= VIEW_SUID_DEFINER; } + | SQL_SYM SECURITY_SYM INVOKER_SYM + { Lex->create_view_suid= VIEW_SUID_INVOKER; } + ; + +view_tail: + view_suid VIEW_SYM table_ident + { + THD *thd= YYTHD; + LEX *lex= thd->lex; + lex->sql_command= SQLCOM_CREATE_VIEW; + /* first table in list is target VIEW name */ + if (!lex->select_lex.add_table_to_list(thd, $3, NULL, TL_OPTION_UPDATING)) + MYSQL_YYABORT; + } + view_list_opt AS view_select view_check_option + {} + ; + +view_list_opt: + /* empty */ + {} + | '(' view_list ')' + ; + +view_list: + ident + { + Lex->view_list.push_back((LEX_STRING*) + sql_memdup(&$1, sizeof(LEX_STRING))); + } + | view_list ',' ident + { + Lex->view_list.push_back((LEX_STRING*) + sql_memdup(&$3, sizeof(LEX_STRING))); + } + ; + +view_select: + { + LEX *lex= Lex; + lex->parsing_options.allows_variable= FALSE; + lex->parsing_options.allows_select_into= FALSE; + lex->parsing_options.allows_select_procedure= FALSE; + lex->parsing_options.allows_derived= FALSE; + } + view_select_aux + { + LEX *lex= Lex; + lex->parsing_options.allows_variable= TRUE; + lex->parsing_options.allows_select_into= TRUE; + lex->parsing_options.allows_select_procedure= TRUE; + lex->parsing_options.allows_derived= TRUE; + } + ; + +view_select_aux: + SELECT_SYM remember_name select_init2 + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + char *stmt_beg= (lex->sphead ? + (char *)lex->sphead->m_tmp_query : + thd->query); + lex->create_view_select_start= $2 - stmt_beg; + } + | '(' remember_name select_paren ')' union_opt + { + THD *thd=YYTHD; + LEX *lex= thd->lex; + char *stmt_beg= (lex->sphead ? + (char *)lex->sphead->m_tmp_query : + thd->query); + lex->create_view_select_start= $2 - stmt_beg; + } + ; + +view_check_option: + /* empty */ + { Lex->create_view_check= VIEW_CHECK_NONE; } + | WITH CHECK_SYM OPTION + { Lex->create_view_check= VIEW_CHECK_CASCADED; } + | WITH CASCADED CHECK_SYM OPTION + { Lex->create_view_check= VIEW_CHECK_CASCADED; } + | WITH LOCAL_SYM CHECK_SYM OPTION + { Lex->create_view_check= VIEW_CHECK_LOCAL; } + ; + +/************************************************************************** + + CREATE TRIGGER statement parts. + +**************************************************************************/ + +trigger_tail: + TRIGGER_SYM remember_name sp_name trg_action_time trg_event + ON remember_name table_ident FOR_SYM remember_name EACH_SYM ROW_SYM + { + LEX *lex= Lex; + sp_head *sp; + + if (lex->sphead) + { + my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER"); + MYSQL_YYABORT; + } + + if (!(sp= new sp_head())) + MYSQL_YYABORT; + sp->reset_thd_mem_root(YYTHD); + sp->init(lex); + sp->init_sp_name(YYTHD, $3); + + lex->stmt_definition_begin= $2; + lex->ident.str= $7; + lex->ident.length= $10 - $7; + + sp->m_type= TYPE_ENUM_TRIGGER; + lex->sphead= sp; + lex->spname= $3; + /* + We have to turn of CLIENT_MULTI_QUERIES while parsing a + stored procedure, otherwise yylex will chop it into pieces + at each ';'. + */ + sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES; + YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES; + + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + lex->sphead->m_chistics= &lex->sp_chistics; + lex->sphead->m_body_begin= lex->ptr; + while (my_isspace(system_charset_info, lex->sphead->m_body_begin[0])) + ++lex->sphead->m_body_begin; + } + sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + lex->sql_command= SQLCOM_CREATE_TRIGGER; + sp->init_strings(YYTHD, lex); + /* Restore flag if it was cleared above */ + if (sp->m_old_cmq) + YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES; + sp->restore_thd_mem_root(YYTHD); + + if (sp->is_not_allowed_in_function("trigger")) + MYSQL_YYABORT; + + /* + We have to do it after parsing trigger body, because some of + sp_proc_stmt alternatives are not saving/restoring LEX, so + lex->query_tables can be wiped out. + */ + if (!lex->select_lex.add_table_to_list(YYTHD, $8, + (LEX_STRING*) 0, + TL_OPTION_UPDATING, + TL_IGNORE)) + MYSQL_YYABORT; + } + ; + +/************************************************************************** + + CREATE FUNCTION | PROCEDURE statements parts. + +**************************************************************************/ + +sp_tail: + udf_func_type remember_name FUNCTION_SYM sp_name + { + LEX *lex=Lex; + lex->udf.type= $1; + lex->stmt_definition_begin= $2; + lex->spname= $4; + } + create_function_tail + {} + | PROCEDURE remember_name sp_name + { + LEX *lex= Lex; + sp_head *sp; + + if (lex->sphead) + { + my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "PROCEDURE"); + MYSQL_YYABORT; + } + + lex->stmt_definition_begin= $2; + + /* Order is important here: new - reset - init */ + sp= new sp_head(); + sp->reset_thd_mem_root(YYTHD); + sp->init(lex); + sp->init_sp_name(YYTHD, $3); + + sp->m_type= TYPE_ENUM_PROCEDURE; + lex->sphead= sp; + /* + * We have to turn of CLIENT_MULTI_QUERIES while parsing a + * stored procedure, otherwise yylex will chop it into pieces + * at each ';'. + */ + sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES; + YYTHD->client_capabilities &= (~CLIENT_MULTI_QUERIES); + } + '(' + { + LEX *lex= Lex; + + lex->sphead->m_param_begin= lex->tok_start+1; + } + sp_pdparam_list + ')' + { + LEX *lex= Lex; + + lex->sphead->m_param_end= lex->tok_start; + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_c_chistics + { + LEX *lex= Lex; + + lex->sphead->m_chistics= &lex->sp_chistics; + lex->sphead->m_body_begin= lex->tok_start; + } + sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + sp->init_strings(YYTHD, lex); + lex->sql_command= SQLCOM_CREATE_PROCEDURE; + /* Restore flag if it was cleared above */ + if (sp->m_old_cmq) + YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES; + sp->restore_thd_mem_root(YYTHD); + } + ; + +/*************************************************************************/ + +xa: XA_SYM begin_or_start xid opt_join_or_resume + { + Lex->sql_command = SQLCOM_XA_START; + } + | XA_SYM END xid opt_suspend + { + Lex->sql_command = SQLCOM_XA_END; + } + | XA_SYM PREPARE_SYM xid + { + Lex->sql_command = SQLCOM_XA_PREPARE; + } + | XA_SYM COMMIT_SYM xid opt_one_phase + { + Lex->sql_command = SQLCOM_XA_COMMIT; + } + | XA_SYM ROLLBACK_SYM xid + { + Lex->sql_command = SQLCOM_XA_ROLLBACK; + } + | XA_SYM RECOVER_SYM + { + Lex->sql_command = SQLCOM_XA_RECOVER; + } + ; + +xid: text_string + { + MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE); + if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID)))) + MYSQL_YYABORT; + Lex->xid->set(1L, $1->ptr(), $1->length(), 0, 0); + } + | text_string ',' text_string + { + MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); + if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID)))) + MYSQL_YYABORT; + Lex->xid->set(1L, $1->ptr(), $1->length(), $3->ptr(), $3->length()); + } + | text_string ',' text_string ',' ulong_num + { + MYSQL_YYABORT_UNLESS($1->length() <= MAXGTRIDSIZE && $3->length() <= MAXBQUALSIZE); + if (!(Lex->xid=(XID *)YYTHD->alloc(sizeof(XID)))) + MYSQL_YYABORT; + Lex->xid->set($5, $1->ptr(), $1->length(), $3->ptr(), $3->length()); + } + ; + +begin_or_start: BEGIN_SYM {} + | START_SYM {} + ; + +opt_join_or_resume: + /* nothing */ { Lex->xa_opt=XA_NONE; } + | JOIN_SYM { Lex->xa_opt=XA_JOIN; } + | RESUME_SYM { Lex->xa_opt=XA_RESUME; } + ; + +opt_one_phase: + /* nothing */ { Lex->xa_opt=XA_NONE; } + | ONE_SYM PHASE_SYM { Lex->xa_opt=XA_ONE_PHASE; } + ; + +opt_suspend: + /* nothing */ { Lex->xa_opt=XA_NONE; } + | SUSPEND_SYM { Lex->xa_opt=XA_SUSPEND; } + opt_migrate + ; + +opt_migrate: + /* nothing */ { } + | FOR_SYM MIGRATE_SYM { Lex->xa_opt=XA_FOR_MIGRATE; } + ; + + diff --git a/sql/stacktrace.c b/sql/stacktrace.c index 1b8267763a0..d8e9b7fd883 100644 --- a/sql/stacktrace.c +++ b/sql/stacktrace.c @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -222,7 +221,7 @@ terribly wrong...\n"); fprintf(stderr, "Stack trace seems successful - bottom reached\n"); end: - fprintf(stderr, "Please read http://dev.mysql.com/doc/mysql/en/Using_stack_trace.html and follow instructions on how to resolve the stack trace. Resolved\n\ + fprintf(stderr, "Please read http://dev.mysql.com/doc/mysql/en/using-stack-trace.html and follow instructions on how to resolve the stack trace. Resolved\n\ stack trace is much more helpful in diagnosing the problem, so please do \n\ resolve it\n"); } diff --git a/sql/stacktrace.h b/sql/stacktrace.h index 527d10d70a2..f5c92e54e1c 100644 --- a/sql/stacktrace.h +++ b/sql/stacktrace.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 81aca092cec..308e6fd3dcd 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -147,10 +146,10 @@ uint find_type(TYPELIB *lib, const char *find, uint length, bool part_match) uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) { - int find,pos; + int pos; const char *j; DBUG_ENTER("find_type2"); - DBUG_PRINT("enter",("x: '%s' lib: 0x%lx",x,typelib)); + DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, (long) typelib)); if (!typelib->count) { @@ -158,7 +157,7 @@ uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) DBUG_RETURN(0); } - for (find=0, pos=0 ; (j=typelib->type_names[pos]) ; pos++) + for (pos=0 ; (j=typelib->type_names[pos]) ; pos++) { if (!my_strnncoll(cs, (const uchar*) x, length, (const uchar*) j, typelib->type_lengths[pos])) diff --git a/sql/structs.h b/sql/structs.h index 2037496635a..2dcafdef615 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -80,7 +79,7 @@ typedef struct st_key_part_info { /* Info about a key part */ uint16 store_length; uint16 key_type; uint16 fieldnr; /* Fieldnum in UNIREG */ - uint8 key_part_flag; /* 0 or HA_REVERSE_SORT */ + uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */ uint8 type; uint8 null_bit; /* Position to null_bit */ } KEY_PART_INFO ; @@ -104,6 +103,7 @@ typedef struct st_key { union { int bdb_return_if_eq; } handler; + struct st_table *table; } KEY; @@ -169,9 +169,11 @@ typedef struct st_known_date_time_format { enum SHOW_TYPE { SHOW_UNDEF, - SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR, SHOW_BOOL, - SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION, + SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR, + SHOW_DOUBLE_STATUS, + SHOW_BOOL, SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION, SHOW_LONG_CONST, SHOW_INT_CONST, SHOW_HAVE, SHOW_SYS, SHOW_HA_ROWS, + SHOW_VARS, #ifdef HAVE_OPENSSL SHOW_SSL_CTX_SESS_ACCEPT, SHOW_SSL_CTX_SESS_ACCEPT_GOOD, SHOW_SSL_GET_VERSION, SHOW_SSL_CTX_GET_SESSION_CACHE_MODE, @@ -186,8 +188,10 @@ enum SHOW_TYPE SHOW_SSL_CTX_SESS_TIMEOUTS, SHOW_SSL_CTX_SESS_CACHE_FULL, SHOW_SSL_GET_CIPHER_LIST, #endif /* HAVE_OPENSSL */ + SHOW_NET_COMPRESSION, SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING, SHOW_SLAVE_RETRIED_TRANS, - SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG, SHOW_KEY_CACHE_LONGLONG + SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG, SHOW_KEY_CACHE_LONGLONG, + SHOW_LONG_STATUS, SHOW_LONG_CONST_STATUS, SHOW_SLAVE_SKIP_ERRORS }; enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED}; @@ -209,16 +213,65 @@ typedef struct st_lex_user { } LEX_USER; +/* + This structure specifies the maximum amount of resources which + can be consumed by each account. Zero value of a member means + there is no limit. +*/ typedef struct user_resources { - uint questions, updates, connections, bits; + /* Maximum number of queries/statements per hour. */ + uint questions; + /* + Maximum number of updating statements per hour (which statements are + updating is defined by uc_update_queries array). + */ + uint updates; + /* Maximum number of connections established per hour. */ + uint conn_per_hour; + /* Maximum number of concurrent connections. */ + uint user_conn; + /* + Values of this enum and specified_limits member are used by the + parser to store which user limits were specified in GRANT statement. + */ + enum {QUERIES_PER_HOUR= 1, UPDATES_PER_HOUR= 2, CONNECTIONS_PER_HOUR= 4, + USER_CONNECTIONS= 8}; + uint specified_limits; } USER_RESOURCES; + +/* + This structure is used for counting resources consumed and for checking + them against specified user limits. +*/ typedef struct user_conn { - char *user, *host; - uint len, connections, conn_per_hour, updates, questions, user_len; + /* + Pointer to user+host key (pair separated by '\0') defining the entity + for which resources are counted (By default it is user account thus + priv_user/priv_host pair is used. If --old-style-user-limits option + is enabled, resources are counted for each user+host separately). + */ + char *user; + /* Pointer to host part of the key. */ + char *host; + /* Total length of the key. */ + uint len; + /* Current amount of concurrent connections for this account. */ + uint connections; + /* + Current number of connections per hour, number of updating statements + per hour and total number of statements per hour for this account. + */ + uint conn_per_hour, updates, questions; + /* Maximum amount of resources which account is allowed to consume. */ USER_RESOURCES user_resources; + /* + The moment of time when per hour counters were reset last time + (i.e. start of "hour" for conn_per_hour, updates, questions counters). + */ time_t intime; } USER_CONN; + /* Bits in form->update */ #define REG_MAKE_DUPP 1 /* Make a copy of record when read */ #define REG_NEW_RECORD 2 /* Write a new record if not found */ diff --git a/sql/table.cc b/sql/table.cc index a85da8395e7..960534bf7d4 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -20,7 +19,7 @@ #include "mysql_priv.h" #include <errno.h> #include <m_ctype.h> - +#include "md5.h" /* Functions defined in this file */ @@ -62,8 +61,8 @@ static byte* get_field_name(Field **buff,uint *length, 6 Unknown .frm version */ -int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, - uint ha_open_flags, TABLE *outparam) +int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, + uint prgflag, uint ha_open_flags, TABLE *outparam) { reg1 uint i; reg2 uchar *strpos; @@ -71,7 +70,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, uint rec_buff_length,n_length,int_length,records,key_parts,keys, interval_count,interval_parts,read_length,db_create_options; uint key_info_length, com_length; - ulong pos; + ulong pos, record_offset; char index_file[FN_REFLEN], *names, *keynames, *comment_pos; uchar head[288],*disk_buff,new_field_pack_flag; my_string record; @@ -83,49 +82,68 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, KEY *keyinfo; KEY_PART_INFO *key_part; uchar *null_pos; - uint null_bit, new_frm_ver, field_pack_length; + uint null_bit_pos, new_frm_ver, field_pack_length; SQL_CRYPT *crypted=0; MEM_ROOT **root_ptr, *old_root; + TABLE_SHARE *share; DBUG_ENTER("openfrm"); - DBUG_PRINT("enter",("name: '%s' form: %lx",name,outparam)); - - bzero((char*) outparam,sizeof(*outparam)); - outparam->blob_ptr_size=sizeof(char*); - disk_buff=NULL; record= NULL; keynames=NullS; - outparam->db_stat = db_stat; - error=1; + DBUG_PRINT("enter",("name: '%s' form: 0x%lx", name, (long) outparam)); - init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0); + error= 1; + disk_buff= NULL; root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); old_root= *root_ptr; - *root_ptr= &outparam->mem_root; - outparam->real_name=strdup_root(&outparam->mem_root, - name+dirname_length(name)); - outparam->table_name=my_strdup(alias,MYF(MY_WME)); - if (!outparam->real_name || !outparam->table_name) - goto err_end; - *fn_ext(outparam->real_name)='\0'; // Remove extension + bzero((char*) outparam,sizeof(*outparam)); + outparam->in_use= thd; + outparam->s= share= &outparam->share_not_to_be_used; - if ((file=my_open(fn_format(index_file,name,"",reg_ext,MY_UNPACK_FILENAME), + if ((file=my_open(fn_format(index_file, name, "", reg_ext, + MY_UNPACK_FILENAME), O_RDONLY | O_SHARE, MYF(0))) < 0) + goto err; + + error= 4; + if (my_read(file,(byte*) head,64,MYF(MY_NABP))) + goto err; + + if (memcmp(head, STRING_WITH_LEN("TYPE=")) == 0) { - goto err_end; /* purecov: inspected */ + // new .frm + my_close(file,MYF(MY_WME)); + + if (db_stat & NO_ERR_ON_NEW_FRM) + DBUG_RETURN(5); + file= -1; + // caller can't process new .frm + goto err; } - error=4; - if (!(outparam->path= strdup_root(&outparam->mem_root,name))) - goto err_not_open; - *fn_ext(outparam->path)='\0'; // Remove extension + if (prgflag & OPEN_VIEW_NO_PARSE) + goto err; + + share->blob_ptr_size= sizeof(char*); + outparam->db_stat= db_stat; + init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0); + *root_ptr= &outparam->mem_root; + + share->table_name= strdup_root(&outparam->mem_root, + name+dirname_length(name)); + share->path= strdup_root(&outparam->mem_root, name); + outparam->alias= my_strdup(alias, MYF(MY_WME)); + if (!share->table_name || !share->path || !outparam->alias) + goto err; + *fn_ext(share->table_name)='\0'; // Remove extension + *fn_ext(share->path)='\0'; // Remove extension - if (my_read(file,(byte*) head,64,MYF(MY_NABP))) goto err_not_open; if (head[0] != (uchar) 254 || head[1] != 1) - goto err_not_open; /* purecov: inspected */ - if (head[2] != FRM_VER && head[2] != FRM_VER+1 && head[2] != FRM_VER+3) + goto err; /* purecov: inspected */ + if (head[2] != FRM_VER && head[2] != FRM_VER+1 && + ! (head[2] >= FRM_VER+3 && head[2] <= FRM_VER+4)) { error= 6; - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ } new_field_pack_flag=head[27]; new_frm_ver= (head[2] - FRM_VER); @@ -133,25 +151,35 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, error=3; if (!(pos=get_form_pos(file,head,(TYPELIB*) 0))) - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ *fn_ext(index_file)='\0'; // Remove .frm extension - outparam->frm_version= head[2]; - outparam->db_type=ha_checktype((enum db_type) (uint) *(head+3)); - outparam->db_create_options=db_create_options=uint2korr(head+30); - outparam->db_options_in_use=outparam->db_create_options; - null_field_first=0; + share->frm_version= head[2]; + /* + Check if .frm file created by MySQL 5.0. In this case we want to + display CHAR fields as CHAR and not as VARCHAR. + We do it this way as we want to keep the old frm version to enable + MySQL 4.1 to read these files. + */ + if (share->frm_version == FRM_VER_TRUE_VARCHAR -1 && head[33] == 5) + share->frm_version= FRM_VER_TRUE_VARCHAR; + + share->db_type= ha_checktype(thd,(enum db_type) (uint) *(head+3),0,0); + share->db_create_options= db_create_options=uint2korr(head+30); + share->db_options_in_use= share->db_create_options; + share->mysql_version= uint4korr(head+51); + null_field_first= 0; if (!head[32]) // New frm file in 3.23 { - outparam->avg_row_length=uint4korr(head+34); - outparam->row_type=(row_type) head[40]; - outparam->raid_type= head[41]; - outparam->raid_chunks= head[42]; - outparam->raid_chunksize= uint4korr(head+43); - outparam->table_charset=get_charset((uint) head[38],MYF(0)); - null_field_first=1; + share->avg_row_length= uint4korr(head+34); + share-> row_type= (row_type) head[40]; + share->raid_type= head[41]; + share->raid_chunks= head[42]; + share->raid_chunksize= uint4korr(head+43); + share->table_charset= get_charset((uint) head[38],MYF(0)); + null_field_first= 1; } - if (!outparam->table_charset) + if (!share->table_charset) { /* unknown charset in head[38] or pre-3.23 frm */ if (use_mb(default_charset_info)) @@ -162,35 +190,34 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, "so character column sizes may have changed", name); } - outparam->table_charset=default_charset_info; + share->table_charset= default_charset_info; } - outparam->db_record_offset=1; + share->db_record_offset= 1; if (db_create_options & HA_OPTION_LONG_BLOB_PTR) - outparam->blob_ptr_size=portable_sizeof_char_ptr; - /* Set temporaryly a good value for db_low_byte_first */ - outparam->db_low_byte_first=test(outparam->db_type != DB_TYPE_ISAM); + share->blob_ptr_size= portable_sizeof_char_ptr; + /* Set temporarily a good value for db_low_byte_first */ + share->db_low_byte_first= test(share->db_type != DB_TYPE_ISAM); error=4; - outparam->max_rows=uint4korr(head+18); - outparam->min_rows=uint4korr(head+22); + share->max_rows= uint4korr(head+18); + share->min_rows= uint4korr(head+22); /* Read keyinformation */ key_info_length= (uint) uint2korr(head+28); VOID(my_seek(file,(ulong) uint2korr(head+6),MY_SEEK_SET,MYF(0))); if (read_string(file,(gptr*) &disk_buff,key_info_length)) - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ if (disk_buff[0] & 0x80) { - outparam->keys= keys= (disk_buff[1] << 7) | (disk_buff[0] & 0x7f); - outparam->key_parts= key_parts= uint2korr(disk_buff+2); + share->keys= keys= (disk_buff[1] << 7) | (disk_buff[0] & 0x7f); + share->key_parts= key_parts= uint2korr(disk_buff+2); } else { - outparam->keys= keys= disk_buff[0]; - outparam->key_parts= key_parts= disk_buff[1]; + share->keys= keys= disk_buff[0]; + share->key_parts= key_parts= disk_buff[1]; } - outparam->keys_for_keyread.init(0); - outparam->keys_in_use.init(keys); - outparam->read_only_keys.init(keys); + share->keys_for_keyread.init(0); + share->keys_in_use.init(keys); outparam->quick_keys.init(); outparam->used_keys.init(); outparam->keys_in_use_for_query.init(); @@ -198,7 +225,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, n_length=keys*sizeof(KEY)+key_parts*sizeof(KEY_PART_INFO); if (!(keyinfo = (KEY*) alloc_root(&outparam->mem_root, n_length+uint2korr(disk_buff+4)))) - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ bzero((char*) keyinfo,n_length); outparam->key_info=keyinfo; key_part= my_reinterpret_cast(KEY_PART_INFO*) (keyinfo+keys); @@ -207,11 +234,12 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, ulong *rec_per_key; if (!(rec_per_key= (ulong*) alloc_root(&outparam->mem_root, sizeof(ulong*)*key_parts))) - goto err_not_open; + goto err; for (i=0 ; i < keys ; i++, keyinfo++) { - if (new_frm_ver == 3) + keyinfo->table= outparam; + if (new_frm_ver >= 3) { keyinfo->flags= (uint) uint2korr(strpos) ^ HA_NOSAME; keyinfo->key_length= (uint) uint2korr(strpos+2); @@ -260,9 +288,9 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, keynames=(char*) key_part; strpos+= (strmov(keynames, (char *) strpos) - keynames)+1; - outparam->reclength = uint2korr((head+16)); + share->reclength = uint2korr((head+16)); if (*(head+26) == 1) - outparam->system=1; /* one-record-database */ + share->system= 1; /* one-record-database */ #ifdef HAVE_CRYPTED_FRM else if (*(head+26) == 2) { @@ -273,85 +301,136 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, } #endif + record_offset= (ulong) (uint2korr(head+6)+ + ((uint2korr(head+14) == 0xffff ? + uint4korr(head+47) : uint2korr(head+14)))); + + if ((n_length= uint2korr(head+55))) + { + /* Read extra data segment */ + char *buff, *next_chunk, *buff_end; + if (!(next_chunk= buff= my_malloc(n_length, MYF(MY_WME)))) + goto err; + buff_end= buff + n_length; + if (my_pread(file, (byte*)buff, n_length, record_offset + share->reclength, + MYF(MY_NABP))) + { + my_free(buff, MYF(0)); + goto err; + } + share->connect_string.length= uint2korr(buff); + if (! (share->connect_string.str= strmake_root(&outparam->mem_root, + next_chunk + 2, share->connect_string.length))) + { + my_free(buff, MYF(0)); + goto err; + } + next_chunk+= share->connect_string.length + 2; + if (next_chunk + 2 < buff_end) + { + uint str_db_type_length= uint2korr(next_chunk); + share->db_type= ha_resolve_by_name(next_chunk + 2, str_db_type_length); + DBUG_PRINT("enter", ("Setting dbtype to: %d - %d - '%.*s'\n", + share->db_type, + str_db_type_length, str_db_type_length, + next_chunk + 2)); + next_chunk+= str_db_type_length + 2; + } + my_free(buff, MYF(0)); + } /* Allocate handler */ - if (!(outparam->file= get_new_handler(outparam,outparam->db_type))) - goto err_not_open; + if (!(outparam->file= get_new_handler(outparam, &outparam->mem_root, + share->db_type))) + goto err; error=4; outparam->reginfo.lock_type= TL_UNLOCK; outparam->current_lock=F_UNLCK; - if ((db_stat & HA_OPEN_KEYFILE) || (prgflag & DELAYED_OPEN)) records=2; - else records=1; - if (prgflag & (READ_ALL+EXTRA_RECORD)) records++; + if ((db_stat & HA_OPEN_KEYFILE) || (prgflag & DELAYED_OPEN)) + records=2; + else + records=1; + if (prgflag & (READ_ALL+EXTRA_RECORD)) + records++; /* QQ: TODO, remove the +1 from below */ - rec_buff_length=ALIGN_SIZE(outparam->reclength+1+ - outparam->file->extra_rec_buf_length()); - if (!(outparam->record[0]= (byte*) - (record = (char *) alloc_root(&outparam->mem_root, - rec_buff_length * records)))) - goto err_not_open; /* purecov: inspected */ - record[outparam->reclength]=0; // For purify and ->c_ptr() - outparam->rec_buff_length=rec_buff_length; - if (my_pread(file,(byte*) record,(uint) outparam->reclength, - (ulong) (uint2korr(head+6)+ - ((uint2korr(head+14) == 0xffff ? - uint4korr(head+47) : uint2korr(head+14)))), - MYF(MY_NABP))) - goto err_not_open; /* purecov: inspected */ - /* HACK: table->record[2] is used instead of table->default_values here */ - for (i=0 ; i < records ; i++, record+=rec_buff_length) - { - outparam->record[i]=(byte*) record; - if (i) - memcpy(record,record-rec_buff_length,(uint) outparam->reclength); - } - - if (records == 2) - { /* fix for select */ - outparam->default_values=outparam->record[1]; - if (db_stat & HA_READ_ONLY) - outparam->record[1]=outparam->record[0]; /* purecov: inspected */ - } - outparam->insert_values=0; /* for INSERT ... UPDATE */ - + rec_buff_length= ALIGN_SIZE(share->reclength + 1 + + outparam->file->extra_rec_buf_length()); + share->rec_buff_length= rec_buff_length; + if (!(record= (char *) alloc_root(&outparam->mem_root, + rec_buff_length * records))) + goto err; /* purecov: inspected */ + share->default_values= (byte *) record; + + if (my_pread(file,(byte*) record, (uint) share->reclength, + record_offset, MYF(MY_NABP))) + goto err; /* purecov: inspected */ + + if (records == 1) + { + /* We are probably in hard repair, and the buffers should not be used */ + outparam->record[0]= outparam->record[1]= share->default_values; + } + else + { + outparam->record[0]= (byte *) record+ rec_buff_length; + if (records > 2) + outparam->record[1]= (byte *) record+ rec_buff_length*2; + else + outparam->record[1]= outparam->record[0]; // Safety + } + +#ifdef HAVE_purify + /* + We need this because when we read var-length rows, we are not updating + bytes after end of varchar + */ + if (records > 1) + { + memcpy(outparam->record[0], share->default_values, rec_buff_length); + if (records > 2) + memcpy(outparam->record[1], share->default_values, rec_buff_length); + } +#endif VOID(my_seek(file,pos,MY_SEEK_SET,MYF(0))); - if (my_read(file,(byte*) head,288,MYF(MY_NABP))) goto err_not_open; + if (my_read(file,(byte*) head,288,MYF(MY_NABP))) + goto err; #ifdef HAVE_CRYPTED_FRM if (crypted) { crypted->decode((char*) head+256,288-256); if (sint2korr(head+284) != 0) // Should be 0 - goto err_not_open; // Wrong password + goto err; // Wrong password } #endif - outparam->fields= uint2korr(head+258); - pos=uint2korr(head+260); /* Length of all screens */ - n_length=uint2korr(head+268); - interval_count=uint2korr(head+270); - interval_parts=uint2korr(head+272); - int_length=uint2korr(head+274); - outparam->null_fields=uint2korr(head+282); - com_length=uint2korr(head+284); - outparam->comment=strdup_root(&outparam->mem_root, - (char*) head+47); + share->fields= uint2korr(head+258); + pos= uint2korr(head+260); /* Length of all screens */ + n_length= uint2korr(head+268); + interval_count= uint2korr(head+270); + interval_parts= uint2korr(head+272); + int_length= uint2korr(head+274); + share->null_fields= uint2korr(head+282); + com_length= uint2korr(head+284); + share->comment.length= (int) (head[46]); + share->comment.str= strmake_root(&outparam->mem_root, (char*) head+47, + share->comment.length); - DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, outparam->keys,n_length,int_length, com_length)); + DBUG_PRINT("info",("i_count: %d i_parts: %d index: %d n_length: %d int_length: %d com_length: %d", interval_count,interval_parts, share->keys,n_length,int_length, com_length)); if (!(field_ptr = (Field **) alloc_root(&outparam->mem_root, - (uint) ((outparam->fields+1)*sizeof(Field*)+ + (uint) ((share->fields+1)*sizeof(Field*)+ interval_count*sizeof(TYPELIB)+ - (outparam->fields+interval_parts+ + (share->fields+interval_parts+ keys+3)*sizeof(my_string)+ (n_length+int_length+com_length))))) - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ outparam->field=field_ptr; - read_length=(uint) (outparam->fields * field_pack_length + + read_length=(uint) (share->fields * field_pack_length + pos+ (uint) (n_length+int_length+com_length)); if (read_string(file,(gptr*) &disk_buff,read_length)) - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ #ifdef HAVE_CRYPTED_FRM if (crypted) { @@ -362,33 +441,33 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, #endif strpos= disk_buff+pos; - outparam->intervals= (TYPELIB*) (field_ptr+outparam->fields+1); - int_array= (const char **) (outparam->intervals+interval_count); - names= (char*) (int_array+outparam->fields+interval_parts+keys+3); + share->intervals= (TYPELIB*) (field_ptr+share->fields+1); + int_array= (const char **) (share->intervals+interval_count); + names= (char*) (int_array+share->fields+interval_parts+keys+3); if (!interval_count) - outparam->intervals=0; // For better debugging - memcpy((char*) names, strpos+(outparam->fields*field_pack_length), + share->intervals= 0; // For better debugging + memcpy((char*) names, strpos+(share->fields*field_pack_length), (uint) (n_length+int_length)); - comment_pos=names+(n_length+int_length); + comment_pos= names+(n_length+int_length); memcpy(comment_pos, disk_buff+read_length-com_length, com_length); - fix_type_pointers(&int_array,&outparam->fieldnames,1,&names); - if (outparam->fieldnames.count != outparam->fields) - goto err_not_open; - fix_type_pointers(&int_array,outparam->intervals,interval_count, + fix_type_pointers(&int_array, &share->fieldnames, 1, &names); + if (share->fieldnames.count != share->fields) + goto err; + fix_type_pointers(&int_array, share->intervals, interval_count, &names); { /* Set ENUM and SET lengths */ TYPELIB *interval; - for (interval= outparam->intervals; - interval < outparam->intervals + interval_count; + for (interval= share->intervals; + interval < share->intervals + interval_count; interval++) { uint count= (uint) (interval->count + 1) * sizeof(uint); if (!(interval->type_lengths= (uint *) alloc_root(&outparam->mem_root, count))) - goto err_not_open; + goto err; for (count= 0; count < interval->count; count++) { char *val= (char*) interval->type_names[count]; @@ -399,33 +478,38 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, } if (keynames) - fix_type_pointers(&int_array,&outparam->keynames,1,&keynames); + fix_type_pointers(&int_array, &share->keynames, 1, &keynames); VOID(my_close(file,MYF(MY_WME))); file= -1; - record=(char*) outparam->record[0]-1; /* Fieldstart = 1 */ + record= (char*) outparam->record[0]-1; /* Fieldstart = 1 */ if (null_field_first) { outparam->null_flags=null_pos=(uchar*) record+1; - null_bit= (db_create_options & HA_OPTION_PACK_RECORD) ? 1 : 2; - outparam->null_bytes=(outparam->null_fields+null_bit+6)/8; + null_bit_pos= (db_create_options & HA_OPTION_PACK_RECORD) ? 0 : 1; + /* + null_bytes below is only correct under the condition that + there are no bit fields. Correct values is set below after the + table struct is initialized + */ + share->null_bytes= (share->null_fields + null_bit_pos + 7) / 8; } else { - outparam->null_bytes=(outparam->null_fields+7)/8; - outparam->null_flags=null_pos= - (uchar*) (record+1+outparam->reclength-outparam->null_bytes); - null_bit=1; + share->null_bytes= (share->null_fields+7)/8; + outparam->null_flags= null_pos= + (uchar*) (record+1+share->reclength-share->null_bytes); + null_bit_pos= 0; } - use_hash= outparam->fields >= MAX_FIELDS_BEFORE_HASH; + use_hash= share->fields >= MAX_FIELDS_BEFORE_HASH; if (use_hash) - use_hash= !hash_init(&outparam->name_hash, + use_hash= !hash_init(&share->name_hash, system_charset_info, - outparam->fields,0,0, + share->fields,0,0, (hash_get_key) get_field_name,0,0); - for (i=0 ; i < outparam->fields; i++, strpos+=field_pack_length, field_ptr++) + for (i=0 ; i < share->fields; i++, strpos+=field_pack_length, field_ptr++) { uint pack_flag, interval_nr, unireg_type, recpos, field_length; enum_field_types field_type; @@ -433,7 +517,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, Field::geometry_type geom_type= Field::GEOM_GEOMETRY; LEX_STRING comment; - if (new_frm_ver == 3) + if (new_frm_ver >= 3) { /* new frm file in 4.1 */ field_length= uint2korr(strpos+3); @@ -441,11 +525,10 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, pack_flag= uint2korr(strpos+8); unireg_type= (uint) strpos[10]; interval_nr= (uint) strpos[12]; - uint comment_length=uint2korr(strpos+15); field_type=(enum_field_types) (uint) strpos[13]; - // charset and geometry_type share the same byte in frm + /* charset and geometry_type share the same byte in frm */ if (field_type == FIELD_TYPE_GEOMETRY) { #ifdef HAVE_SPATIAL @@ -453,7 +536,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, charset= &my_charset_bin; #else error= 4; // unsupported field type - goto err_not_open; + goto err; #endif } else @@ -464,7 +547,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, { error= 5; // Unknown or unavailable charset errarg= (int) strpos[14]; - goto err_not_open; + goto err; } } if (!comment_length) @@ -484,6 +567,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, field_length= (uint) strpos[3]; recpos= uint2korr(strpos+4), pack_flag= uint2korr(strpos+6); + pack_flag&= ~FIELDFLAG_NO_DEFAULT; // Safety for old files unireg_type= (uint) strpos[8]; interval_nr= (uint) strpos[10]; @@ -500,7 +584,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (!f_is_blob(pack_flag)) { // 3.23 or 4.0 string - if (!(charset= get_charset_by_csname(outparam->table_charset->csname, + if (!(charset= get_charset_by_csname(share->table_charset->csname, MY_CS_BINSORT, MYF(0)))) charset= &my_charset_bin; } @@ -508,51 +592,80 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, charset= &my_charset_bin; } else - charset= outparam->table_charset; + charset= share->table_charset; bzero((char*) &comment, sizeof(comment)); } if (interval_nr && charset->mbminlen > 1) { /* Unescape UCS2 intervals from HEX notation */ - TYPELIB *interval= outparam->intervals + interval_nr - 1; + TYPELIB *interval= share->intervals + interval_nr - 1; unhex_type2(interval); } +#ifndef TO_BE_DELETED_ON_PRODUCTION + if (field_type == FIELD_TYPE_NEWDECIMAL && !share->mysql_version) + { + /* + Fix pack length of old decimal values from 5.0.3 -> 5.0.4 + The difference is that in the old version we stored precision + in the .frm table while we now store the display_length + */ + uint decimals= f_decimals(pack_flag); + field_length= my_decimal_precision_to_length(field_length, + decimals, + f_is_dec(pack_flag) == 0); + sql_print_error("Found incompatible DECIMAL field '%s' in %s; Please do \"ALTER TABLE '%s' FORCE\" to fix it!", share->fieldnames.type_names[i], name, share->table_name); + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_CRASHED_ON_USAGE, + "Found incompatible DECIMAL field '%s' in %s; Please do \"ALTER TABLE '%s' FORCE\" to fix it!", share->fieldnames.type_names[i], name, share->table_name); + share->crashed= 1; // Marker for CHECK TABLE + } +#endif + *field_ptr=reg_field= make_field(record+recpos, (uint32) field_length, - null_pos,null_bit, + null_pos, null_bit_pos, pack_flag, field_type, charset, geom_type, (Field::utype) MTYP_TYPENR(unireg_type), (interval_nr ? - outparam->intervals+interval_nr-1 : + share->intervals+interval_nr-1 : (TYPELIB*) 0), - outparam->fieldnames.type_names[i], + share->fieldnames.type_names[i], outparam); if (!reg_field) // Not supported field type { error= 4; - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ } + + reg_field->field_index= i; reg_field->comment=comment; - if (!(reg_field->flags & NOT_NULL_FLAG)) + if (field_type == FIELD_TYPE_BIT && !f_bit_as_char(pack_flag)) { - if ((null_bit<<=1) == 256) + if ((null_bit_pos+= field_length & 7) > 7) { - null_pos++; - null_bit=1; + null_pos++; + null_bit_pos-= 8; } } + if (!(reg_field->flags & NOT_NULL_FLAG)) + { + if (!(null_bit_pos= (null_bit_pos + 1) & 7)) + null_pos++; + } + if (f_no_default(pack_flag)) + reg_field->flags|= NO_DEFAULT_VALUE_FLAG; if (reg_field->unireg_check == Field::NEXT_NUMBER) outparam->found_next_number_field= reg_field; if (outparam->timestamp_field == reg_field) - outparam->timestamp_field_offset=i; + share->timestamp_field_offset= i; if (use_hash) - (void) my_hash_insert(&outparam->name_hash,(byte*) field_ptr); // Will never fail + (void) my_hash_insert(&share->name_hash,(byte*) field_ptr); // never fail } *field_ptr=0; // End marker @@ -560,15 +673,15 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (key_parts) { uint primary_key=(uint) (find_type((char*) primary_key_name, - &outparam->keynames, 3) - 1); + &share->keynames, 3) - 1); uint ha_option=outparam->file->table_flags(); keyinfo=outparam->key_info; key_part=keyinfo->key_part; - for (uint key=0 ; key < outparam->keys ; key++,keyinfo++) + for (uint key=0 ; key < share->keys ; key++,keyinfo++) { uint usable_parts=0; - keyinfo->name=(char*) outparam->keynames.type_names[key]; + keyinfo->name=(char*) share->keynames.type_names[key]; /* Fix fulltext keys for old .frm files */ if (outparam->key_info[key].flags & HA_FULLTEXT) outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT; @@ -601,12 +714,13 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, (uint) key_part->offset, (uint) key_part->length); #ifdef EXTRA_DEBUG - if (key_part->fieldnr > outparam->fields) - goto err_not_open; // sanity check + if (key_part->fieldnr > share->fields) + goto err; // sanity check #endif if (key_part->fieldnr) { // Should always be true ! Field *field=key_part->field=outparam->field[key_part->fieldnr-1]; + key_part->type= field->key_type(); if (field->null_ptr) { key_part->null_offset=(uint) ((byte*) field->null_ptr - @@ -618,10 +732,12 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, keyinfo->key_length+= HA_KEY_NULL_LENGTH; } if (field->type() == FIELD_TYPE_BLOB || - field->real_type() == FIELD_TYPE_VAR_STRING) + field->real_type() == MYSQL_TYPE_VARCHAR) { if (field->type() == FIELD_TYPE_BLOB) key_part->key_part_flag|= HA_BLOB_PART; + else + key_part->key_part_flag|= HA_VAR_LENGTH_PART; keyinfo->extra_length+=HA_KEY_BLOB_LENGTH; key_part->store_length+=HA_KEY_BLOB_LENGTH; keyinfo->key_length+= HA_KEY_BLOB_LENGTH; @@ -632,11 +748,13 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (!(field->flags & BINARY_FLAG)) keyinfo->flags|= HA_END_SPACE_KEY; } + if (field->type() == MYSQL_TYPE_BIT) + key_part->key_part_flag|= HA_BIT_PART; + if (i == 0 && key != primary_key) - field->flags |= - ((keyinfo->flags & HA_NOSAME) && - field->key_length() == - keyinfo->key_length ? UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG); + field->flags |= ((keyinfo->flags & HA_NOSAME) && + (keyinfo->key_parts == 1)) ? + UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG; if (i == 0) field->key_start.set_bit(key); if (field->key_length() == key_part->length && @@ -644,8 +762,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, { if (outparam->file->index_flags(key, i, 0) & HA_KEYREAD_ONLY) { - outparam->read_only_keys.clear_bit(key); - outparam->keys_for_keyread.set_bit(key); + share->keys_for_keyread.set_bit(key); field->part_of_key.set_bit(key); } if (outparam->file->index_flags(key, i, 1) & HA_READ_ORDER) @@ -663,18 +780,45 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, the primary key, then we can use any key to find this column */ if (ha_option & HA_PRIMARY_KEY_IN_READ_INDEX) - field->part_of_key= outparam->keys_in_use; + field->part_of_key= share->keys_in_use; } if (field->key_length() != key_part->length) { +#ifndef TO_BE_DELETED_ON_PRODUCTION + if (field->type() == FIELD_TYPE_NEWDECIMAL) + { + /* + Fix a fatal error in decimal key handling that causes crashes + on Innodb. We fix it by reducing the key length so that + InnoDB never gets a too big key when searching. + This allows the end user to do an ALTER TABLE to fix the + error. + */ + keyinfo->key_length-= (key_part->length - field->key_length()); + key_part->store_length-= (uint16)(key_part->length - + field->key_length()); + key_part->length= (uint16)field->key_length(); + sql_print_error("Found wrong key definition in %s; Please do \"ALTER TABLE '%s' FORCE \" to fix it!", name, share->table_name); + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_CRASHED_ON_USAGE, + "Found wrong key definition in %s; Please do \"ALTER TABLE '%s' FORCE\" to fix it!", name, share->table_name); + + share->crashed= 1; // Marker for CHECK TABLE + goto to_be_deleted; + } +#endif key_part->key_part_flag|= HA_PART_KEY_SEG; if (!(field->flags & BLOB_FLAG)) { // Create a new field field=key_part->field=field->new_field(&outparam->mem_root, - outparam); + outparam, + outparam == field->table); field->field_length=key_part->length; } } + + to_be_deleted: + /* If the field can be NULL, don't optimize away the test key_part_column = expression from the WHERE clause @@ -691,21 +835,21 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, } keyinfo->usable_key_parts=usable_parts; // Filesort - set_if_bigger(outparam->max_key_length,keyinfo->key_length+ - keyinfo->key_parts); - outparam->total_key_length+= keyinfo->key_length; + set_if_bigger(share->max_key_length,keyinfo->key_length+ + keyinfo->key_parts); + share->total_key_length+= keyinfo->key_length; /* MERGE tables do not have unique indexes. But every key could be an unique index on the underlying MyISAM table. (Bug #10400) */ if ((keyinfo->flags & HA_NOSAME) || (ha_option & HA_ANY_INDEX_MAY_BE_UNIQUE)) - set_if_bigger(outparam->max_unique_length,keyinfo->key_length); + set_if_bigger(share->max_unique_length,keyinfo->key_length); } if (primary_key < MAX_KEY && - (outparam->keys_in_use.is_set(primary_key))) + (share->keys_in_use.is_set(primary_key))) { - outparam->primary_key=primary_key; + share->primary_key= primary_key; /* If we are using an integer as the primary key then allow the user to refer to it as '_rowid' @@ -718,27 +862,25 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, } } else - outparam->primary_key = MAX_KEY; // we do not have a primary key + share->primary_key = MAX_KEY; // we do not have a primary key } else - outparam->primary_key= MAX_KEY; + share->primary_key= MAX_KEY; x_free((gptr) disk_buff); disk_buff=0; if (new_field_pack_flag <= 1) - { /* Old file format with default null */ - uint null_length=(outparam->null_fields+7)/8; - bfill(outparam->null_flags,null_length,255); - bfill(outparam->null_flags+outparam->rec_buff_length,null_length,255); - if (records > 2) - bfill(outparam->null_flags+outparam->rec_buff_length*2,null_length,255); + { + /* Old file format with default as not null */ + uint null_length= (share->null_fields+7)/8; + bfill(share->default_values + (outparam->null_flags - (uchar*) record), + null_length, 255); } - if ((reg_field=outparam->found_next_number_field)) { - if ((int) (outparam->next_number_index= (uint) + if ((int) (share->next_number_index= (uint) find_ref_key(outparam,reg_field, - &outparam->next_number_key_offset)) < 0) + &share->next_number_key_offset)) < 0) { reg_field->unireg_check=Field::NONE; /* purecov: inspected */ outparam->found_next_number_field=0; @@ -747,50 +889,54 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, reg_field->flags|=AUTO_INCREMENT_FLAG; } - if (outparam->blob_fields) + if (share->blob_fields) { Field **ptr; - Field_blob **save; - - if (!(outparam->blob_field=save= - (Field_blob**) alloc_root(&outparam->mem_root, - (uint) (outparam->blob_fields+1)* - sizeof(Field_blob*)))) - goto err_not_open; - for (ptr=outparam->field ; *ptr ; ptr++) + uint k, *save; + + /* Store offsets to blob fields to find them fast */ + if (!(share->blob_field= save= + (uint*) alloc_root(&outparam->mem_root, + (uint) (share->blob_fields* sizeof(uint))))) + goto err; + for (k=0, ptr= outparam->field ; *ptr ; ptr++, k++) { if ((*ptr)->flags & BLOB_FLAG) - (*save++)= (Field_blob*) *ptr; + (*save++)= k; } - *save=0; // End marker } - else - outparam->blob_field= - (Field_blob**) (outparam->field+outparam->fields); // Point at null ptr - /* The table struct is now initialzed; Open the table */ + /* + the correct null_bytes can now be set, since bitfields have been taken + into account + */ + share->null_bytes= (null_pos - (uchar*) outparam->null_flags + + (null_bit_pos + 7) / 8); + share->last_null_bit_pos= null_bit_pos; + + /* The table struct is now initialized; Open the table */ error=2; if (db_stat) { - int err; + int ha_err; unpack_filename(index_file,index_file); - if ((err=(outparam->file-> - ha_open(index_file, - (db_stat & HA_READ_ONLY ? O_RDONLY : O_RDWR), - (db_stat & HA_OPEN_TEMPORARY ? HA_OPEN_TMP_TABLE : - ((db_stat & HA_WAIT_IF_LOCKED) || - (specialflag & SPECIAL_WAIT_IF_LOCKED)) ? - HA_OPEN_WAIT_IF_LOCKED : - (db_stat & (HA_ABORT_IF_LOCKED | HA_GET_INFO)) ? - HA_OPEN_ABORT_IF_LOCKED : - HA_OPEN_IGNORE_IF_LOCKED) | ha_open_flags)))) + if ((ha_err= (outparam->file-> + ha_open(index_file, + (db_stat & HA_READ_ONLY ? O_RDONLY : O_RDWR), + (db_stat & HA_OPEN_TEMPORARY ? HA_OPEN_TMP_TABLE : + ((db_stat & HA_WAIT_IF_LOCKED) || + (specialflag & SPECIAL_WAIT_IF_LOCKED)) ? + HA_OPEN_WAIT_IF_LOCKED : + (db_stat & (HA_ABORT_IF_LOCKED | HA_GET_INFO)) ? + HA_OPEN_ABORT_IF_LOCKED : + HA_OPEN_IGNORE_IF_LOCKED) | ha_open_flags)))) { /* Set a flag if the table is crashed and it can be auto. repaired */ - outparam->crashed=((err == HA_ERR_CRASHED_ON_USAGE) && - outparam->file->auto_repair() && - !(ha_open_flags & HA_OPEN_FOR_REPAIR)); + share->crashed= ((ha_err == HA_ERR_CRASHED_ON_USAGE) && + outparam->file->auto_repair() && + !(ha_open_flags & HA_OPEN_FOR_REPAIR)); - if (err==HA_ERR_NO_SUCH_TABLE) + if (ha_err == HA_ERR_NO_SUCH_TABLE) { /* The table did not exists in storage engine, use same error message as if the .frm file didn't exist */ @@ -799,38 +945,37 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, } else { - outparam->file->print_error(err, MYF(0)); + outparam->file->print_error(ha_err, MYF(0)); error_reported= TRUE; } - goto err_not_open; /* purecov: inspected */ + goto err; /* purecov: inspected */ } } - outparam->db_low_byte_first=outparam->file->low_byte_first(); + share->db_low_byte_first= outparam->file->low_byte_first(); *root_ptr= old_root; - opened_tables++; + thd->status_var.opened_tables++; #ifndef DBUG_OFF if (use_hash) - (void) hash_check(&outparam->name_hash); + (void) hash_check(&share->name_hash); #endif DBUG_RETURN (0); - err_not_open: + err: x_free((gptr) disk_buff); if (file > 0) VOID(my_close(file,MYF(MY_WME))); - err_end: /* Here when no file */ delete crypted; *root_ptr= old_root; - if (!error_reported) - frm_error(error, outparam, name, ME_ERROR + ME_WAITTANG, errarg); + if (! error_reported) + frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG, errarg); delete outparam->file; - outparam->file=0; // For easyer errorchecking + outparam->file=0; // For easier errorchecking outparam->db_stat=0; - hash_free(&outparam->name_hash); - free_root(&outparam->mem_root,MYF(0)); - my_free(outparam->table_name,MYF(MY_ALLOW_ZERO_PTR)); + hash_free(&share->name_hash); + free_root(&outparam->mem_root, MYF(0)); // Safe to call on bzero'd root + my_free((char*) outparam->alias, MYF(MY_ALLOW_ZERO_PTR)); DBUG_RETURN (error); } /* openfrm */ @@ -843,21 +988,18 @@ int closefrm(register TABLE *table) DBUG_ENTER("closefrm"); if (table->db_stat) error=table->file->close(); - if (table->table_name) - { - my_free(table->table_name,MYF(0)); - table->table_name=0; - } - if (table->fields) + my_free((char*) table->alias, MYF(MY_ALLOW_ZERO_PTR)); + table->alias= 0; + if (table->field) { for (Field **ptr=table->field ; *ptr ; ptr++) delete *ptr; - table->fields=0; + table->field= 0; } delete table->file; - table->file=0; /* For easyer errorchecking */ - hash_free(&table->name_hash); - free_root(&table->mem_root,MYF(0)); + table->file= 0; /* For easier errorchecking */ + hash_free(&table->s->name_hash); + free_root(&table->mem_root, MYF(0)); DBUG_RETURN(error); } @@ -866,8 +1008,11 @@ int closefrm(register TABLE *table) void free_blobs(register TABLE *table) { - for (Field_blob **ptr=table->blob_field ; *ptr ; ptr++) - (*ptr)->free(); + uint *ptr, *end; + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; + ptr != end ; + ptr++) + ((Field_blob*) table->field[*ptr])->free(); } @@ -905,7 +1050,10 @@ ulong get_form_pos(File file, uchar *head, TYPELIB *save_names) ret_value=uint4korr(pos); } if (! save_names) - my_free((gptr) buf,MYF(0)); + { + if (names) + my_free((gptr) buf,MYF(0)); + } else if (!names) bzero((char*) save_names,sizeof(save_names)); else @@ -1015,6 +1163,7 @@ static void frm_error(int error, TABLE *form, const char *name, int err_no; char buff[FN_REFLEN]; const char *form_dev="",*datext; + const char *real_name= (char*) name+dirname_length(name); DBUG_ENTER("frm_error"); switch (error) { @@ -1025,11 +1174,12 @@ static void frm_error(int error, TABLE *form, const char *name, uint length=dirname_part(buff,name); buff[length-1]=0; db=buff+dirname_length(buff); - my_error(ER_NO_SUCH_TABLE,MYF(0),db,form->real_name); + my_error(ER_NO_SUCH_TABLE, MYF(0), db, real_name); } else - my_error(ER_FILE_NOT_FOUND,errortype, - fn_format(buff,name,form_dev,reg_ext,0),my_errno); + my_error((my_errno == EMFILE) ? ER_CANT_OPEN_FILE : ER_FILE_NOT_FOUND, + errortype, + fn_format(buff, name, form_dev, reg_ext, 0), my_errno); break; case 2: { @@ -1038,7 +1188,7 @@ static void frm_error(int error, TABLE *form, const char *name, err_no= (my_errno == ENOENT) ? ER_FILE_NOT_FOUND : (my_errno == EAGAIN) ? ER_FILE_USED : ER_CANT_OPEN_FILE; my_error(err_no,errortype, - fn_format(buff,form->real_name,form_dev,datext,2),my_errno); + fn_format(buff,real_name,form_dev,datext,2),my_errno); break; } case 5: @@ -1052,7 +1202,7 @@ static void frm_error(int error, TABLE *form, const char *name, } my_printf_error(ER_UNKNOWN_COLLATION, "Unknown collation '%s' in table '%-.64s' definition", - MYF(0), csname, form->real_name); + MYF(0), csname, real_name); break; } case 6: @@ -1063,8 +1213,8 @@ static void frm_error(int error, TABLE *form, const char *name, break; default: /* Better wrong error than none */ case 4: - my_error(ER_NOT_FORM_FILE,errortype, - fn_format(buff,name,form_dev,reg_ext,0)); + my_error(ER_NOT_FORM_FILE, errortype, + fn_format(buff, name, form_dev, reg_ext, 0)); break; } DBUG_VOID_RETURN; @@ -1073,7 +1223,7 @@ static void frm_error(int error, TABLE *form, const char *name, /* ** fix a str_type to a array type - ** typeparts sepearated with some char. differents types are separated + ** typeparts separated with some char. differents types are separated ** with a '\0' */ @@ -1135,22 +1285,27 @@ TYPELIB *typelib(MEM_ROOT *mem_root, List<String> &strings) } - /* - ** Search after a field with given start & length - ** If an exact field isn't found, return longest field with starts - ** at right position. - ** Return 0 on error, else field number+1 - ** This is needed because in some .frm fields 'fieldnr' was saved wrong - */ +/* + Search after a field with given start & length + If an exact field isn't found, return longest field with starts + at right position. + + NOTES + This is needed because in some .frm fields 'fieldnr' was saved wrong + + RETURN + 0 error + # field number +1 +*/ static uint find_field(TABLE *form,uint start,uint length) { Field **field; - uint i,pos; + uint i, pos, fields; pos=0; - - for (field=form->field, i=1 ; i<= form->fields ; i++,field++) + fields= form->s->fields; + for (field=form->field, i=1 ; i<= fields ; i++,field++) { if ((*field)->offset() == start) { @@ -1165,7 +1320,7 @@ static uint find_field(TABLE *form,uint start,uint length) } - /* Check that the integer is in the internvall */ + /* Check that the integer is in the internal */ int set_zone(register int nr, int min_zone, int max_zone) { @@ -1229,7 +1384,7 @@ void append_unescaped(String *res, const char *pos, uint length) res->append('n'); break; case '\r': - res->append('\\'); /* This gives better readbility */ + res->append('\\'); /* This gives better readability */ res->append('r'); break; case '\\': @@ -1250,12 +1405,11 @@ void append_unescaped(String *res, const char *pos, uint length) /* Create a .frm file */ -File create_frm(register my_string name, const char *db, const char *table, - uint reclength, uchar *fileinfo, +File create_frm(THD *thd, my_string name, const char *db, + const char *table, uint reclength, uchar *fileinfo, HA_CREATE_INFO *create_info, uint keys) { register File file; - uint key_length; ulong length; char fill[IO_SIZE]; int create_flags= O_RDWR | O_TRUNC; @@ -1263,13 +1417,12 @@ File create_frm(register my_string name, const char *db, const char *table, if (create_info->options & HA_LEX_CREATE_TMP_TABLE) create_flags|= O_EXCL | O_NOFOLLOW; -#if SIZEOF_OFF_T > 4 /* Fix this when we have new .frm files; Current limit is 4G rows (QQ) */ - if (create_info->max_rows > ~(ulong) 0) - create_info->max_rows= ~(ulong) 0; - if (create_info->min_rows > ~(ulong) 0) - create_info->min_rows= ~(ulong) 0; -#endif + if (create_info->max_rows > UINT_MAX32) + create_info->max_rows= UINT_MAX32; + if (create_info->min_rows > UINT_MAX32) + create_info->min_rows= UINT_MAX32; + /* Ensure that raid_chunks can't be larger than 255, as this would cause problems with drop database @@ -1278,16 +1431,23 @@ File create_frm(register my_string name, const char *db, const char *table, if ((file= my_create(name, CREATE_MODE, create_flags, MYF(0))) >= 0) { + uint key_length, tmp_key_length; + uint tmp; bzero((char*) fileinfo,64); - fileinfo[0]=(uchar) 254; fileinfo[1]= 1; fileinfo[2]= FRM_VER+3; // Header - fileinfo[3]= (uchar) ha_checktype(create_info->db_type); + /* header */ + fileinfo[0]=(uchar) 254; + fileinfo[1]= 1; + fileinfo[2]= FRM_VER+3+ test(create_info->varchar); + + fileinfo[3]= (uchar) ha_checktype(thd,create_info->db_type,0,0); fileinfo[4]=1; int2store(fileinfo+6,IO_SIZE); /* Next block starts here */ key_length=keys*(7+NAME_LEN+MAX_REF_PARTS*9)+16; - length=(ulong) next_io_size((ulong) (IO_SIZE+key_length+reclength)); + length= next_io_size((ulong) (IO_SIZE+key_length+reclength+ + create_info->extra_size)); int4store(fileinfo+10,length); - if (key_length > 0xffff) key_length=0xffff; - int2store(fileinfo+14,key_length); + tmp_key_length= (key_length < 0xffff) ? key_length : 0xffff; + int2store(fileinfo+14,tmp_key_length); int2store(fileinfo+16,reclength); int4store(fileinfo+18,create_info->max_rows); int4store(fileinfo+22,create_info->min_rows); @@ -1295,6 +1455,7 @@ File create_frm(register my_string name, const char *db, const char *table, create_info->table_options|=HA_OPTION_LONG_BLOB_PTR; // Use portable blob pointers int2store(fileinfo+30,create_info->table_options); fileinfo[32]=0; // No filename anymore + fileinfo[33]=5; // Mark for 5.0 frm file int4store(fileinfo+34,create_info->avg_row_length); fileinfo[38]= (create_info->default_table_charset ? create_info->default_table_charset->number : 0); @@ -1302,6 +1463,10 @@ File create_frm(register my_string name, const char *db, const char *table, fileinfo[41]= (uchar) create_info->raid_type; fileinfo[42]= (uchar) create_info->raid_chunks; int4store(fileinfo+43,create_info->raid_chunksize); + int4store(fileinfo+47, key_length); + tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store + int4store(fileinfo+51, tmp); + int2store(fileinfo+55, create_info->extra_size); bzero(fill,IO_SIZE); for (; length > IO_SIZE ; length-= IO_SIZE) { @@ -1326,17 +1491,20 @@ File create_frm(register my_string name, const char *db, const char *table, void update_create_info_from_table(HA_CREATE_INFO *create_info, TABLE *table) { + TABLE_SHARE *share= table->s; DBUG_ENTER("update_create_info_from_table"); - create_info->max_rows=table->max_rows; - create_info->min_rows=table->min_rows; - create_info->table_options=table->db_create_options; - create_info->avg_row_length=table->avg_row_length; - create_info->row_type=table->row_type; - create_info->raid_type=table->raid_type; - create_info->raid_chunks=table->raid_chunks; - create_info->raid_chunksize=table->raid_chunksize; - create_info->default_table_charset=table->table_charset; + + create_info->max_rows= share->max_rows; + create_info->min_rows= share->min_rows; + create_info->table_options= share->db_create_options; + create_info->avg_row_length= share->avg_row_length; + create_info->row_type= share->row_type; + create_info->raid_type= share->raid_type; + create_info->raid_chunks= share->raid_chunks; + create_info->raid_chunksize= share->raid_chunksize; + create_info->default_table_charset= share->table_charset; create_info->table_charset= 0; + DBUG_VOID_RETURN; } @@ -1372,8 +1540,12 @@ bool get_field(MEM_ROOT *mem, Field *field, String *res) field->val_str(&str); if (!(length= str.length())) + { + res->length(0); return 1; - to= strmake_root(mem, str.ptr(), length); + } + if (!(to= strmake_root(mem, str.ptr(), length))) + length= 0; // Safety fix res->set(to, length, ((Field_str*)field)->charset()); return 0; } @@ -1439,7 +1611,7 @@ bool check_db_name(char *name) if (use_mb(system_charset_info)) { int len=my_ismbchar(system_charset_info, name, - name+system_charset_info->mbmaxlen); + name+system_charset_info->mbmaxlen); if (len) { name += len; @@ -1508,7 +1680,7 @@ bool check_column_name(const char *name) { const char *start= name; bool last_char_is_space= TRUE; - + while (*name) { #if defined(USE_MB) && defined(USE_MB_IDENT) @@ -1516,7 +1688,7 @@ bool check_column_name(const char *name) if (use_mb(system_charset_info)) { int len=my_ismbchar(system_charset_info, name, - name+system_charset_info->mbmaxlen); + name+system_charset_info->mbmaxlen); if (len) { name += len; @@ -1535,25 +1707,1305 @@ bool check_column_name(const char *name) } /* -** Get type of table from .frm file + Create Item_field for each column in the table. + + SYNPOSIS + st_table::fill_item_list() + item_list a pointer to an empty list used to store items + + DESCRIPTION + Create Item_field object for each column in the table and + initialize it with the corresponding Field. New items are + created in the current THD memory root. + + RETURN VALUE + 0 success + 1 out of memory */ -db_type get_table_type(const char *name) +bool st_table::fill_item_list(List<Item> *item_list) const { - File file; - uchar head[4]; - int error; - DBUG_ENTER("get_table_type"); - DBUG_PRINT("enter",("name: '%s'",name)); - - if ((file=my_open(name,O_RDONLY, MYF(0))) < 0) - DBUG_RETURN(DB_TYPE_UNKNOWN); - error=my_read(file,(byte*) head,4,MYF(MY_NABP)); - my_close(file,MYF(0)); - if (error || head[0] != (uchar) 254 || head[1] != 1 || - (head[2] != FRM_VER && head[2] != FRM_VER+1 && head[2] != FRM_VER+3)) - DBUG_RETURN(DB_TYPE_UNKNOWN); - DBUG_RETURN(ha_checktype((enum db_type) (uint) *(head+3))); + /* + All Item_field's created using a direct pointer to a field + are fixed in Item_field constructor. + */ + for (Field **ptr= field; *ptr; ptr++) + { + Item_field *item= new Item_field(*ptr); + if (!item || item_list->push_back(item)) + return TRUE; + } + return FALSE; +} + +/* + Reset an existing list of Item_field items to point to the + Fields of this table. + + SYNPOSIS + st_table::fill_item_list() + item_list a non-empty list with Item_fields + + DESCRIPTION + This is a counterpart of fill_item_list used to redirect + Item_fields to the fields of a newly created table. + The caller must ensure that number of items in the item_list + is the same as the number of columns in the table. +*/ + +void st_table::reset_item_list(List<Item> *item_list) const +{ + List_iterator_fast<Item> it(*item_list); + for (Field **ptr= field; *ptr; ptr++) + { + Item_field *item_field= (Item_field*) it++; + DBUG_ASSERT(item_field != 0); + item_field->reset_field(*ptr); + } +} + +/* + calculate md5 of query + + SYNOPSIS + st_table_list::calc_md5() + buffer buffer for md5 writing +*/ + +void st_table_list::calc_md5(char *buffer) +{ + my_MD5_CTX context; + uchar digest[16]; + my_MD5Init(&context); + my_MD5Update(&context,(uchar *) query.str, query.length); + my_MD5Final(digest, &context); + sprintf((char *) buffer, + "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", + digest[0], digest[1], digest[2], digest[3], + digest[4], digest[5], digest[6], digest[7], + digest[8], digest[9], digest[10], digest[11], + digest[12], digest[13], digest[14], digest[15]); +} + + +/* + set underlying TABLE for table place holder of VIEW + + DESCRIPTION + Replace all views that only uses one table with the table itself. + This allows us to treat the view as a simple table and even update + it (it is a kind of optimisation) + + SYNOPSIS + st_table_list::set_underlying_merge() +*/ + +void st_table_list::set_underlying_merge() +{ + TABLE_LIST *tbl; + + if ((tbl= merge_underlying_list)) + { + /* This is a view. Process all tables of view */ + DBUG_ASSERT(view && effective_algorithm == VIEW_ALGORITHM_MERGE); + do + { + if (tbl->merge_underlying_list) // This is a view + { + DBUG_ASSERT(tbl->view && + tbl->effective_algorithm == VIEW_ALGORITHM_MERGE); + /* + This is the only case where set_ancestor is called on an object + that may not be a view (in which case ancestor is 0) + */ + tbl->merge_underlying_list->set_underlying_merge(); + } + } while ((tbl= tbl->next_local)); + + if (!multitable_view) + { + table= merge_underlying_list->table; + schema_table= merge_underlying_list->schema_table; + } + } +} + + +/* + setup fields of placeholder of merged VIEW + + SYNOPSIS + st_table_list::setup_underlying() + thd - thread handler + + DESCRIPTION + It is: + - preparing translation table for view columns + If there are underlying view(s) procedure first will be called for them. + + RETURN + FALSE - OK + TRUE - error +*/ + +bool st_table_list::setup_underlying(THD *thd) +{ + DBUG_ENTER("st_table_list::setup_underlying"); + + if (!field_translation && merge_underlying_list) + { + Field_translator *transl; + SELECT_LEX *select= &view->select_lex; + Item *item; + TABLE_LIST *tbl; + List_iterator_fast<Item> it(select->item_list); + uint field_count= 0; + + if (check_stack_overrun(thd, STACK_MIN_SIZE, (char *)&field_count)) + { + DBUG_RETURN(TRUE); + } + + for (tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + { + if (tbl->merge_underlying_list && + tbl->setup_underlying(thd)) + { + DBUG_RETURN(TRUE); + } + } + + /* Create view fields translation table */ + + if (!(transl= + (Field_translator*)(thd->stmt_arena-> + alloc(select->item_list.elements * + sizeof(Field_translator))))) + { + DBUG_RETURN(TRUE); + } + + while ((item= it++)) + { + transl[field_count].name= item->name; + transl[field_count++].item= item; + } + field_translation= transl; + field_translation_end= transl + field_count; + /* TODO: use hash for big number of fields */ + + /* full text function moving to current select */ + if (view->select_lex.ftfunc_list->elements) + { + Item_func_match *ifm; + SELECT_LEX *current_select= thd->lex->current_select; + List_iterator_fast<Item_func_match> + li(*(view->select_lex.ftfunc_list)); + while ((ifm= li++)) + current_select->ftfunc_list->push_front(ifm); + } + } + DBUG_RETURN(FALSE); +} + + +/* + Prepare where expression of view + + SYNOPSIS + st_table_list::prep_where() + thd - thread handler + conds - condition of this JOIN + no_where_clause - do not build WHERE or ON outer qwery do not need it + (it is INSERT), we do not need conds if this flag is set + + NOTE: have to be called befor CHECK OPTION preparation, because it makes + fix_fields for view WHERE clause + + RETURN + FALSE - OK + TRUE - error +*/ + +bool st_table_list::prep_where(THD *thd, Item **conds, + bool no_where_clause) +{ + DBUG_ENTER("st_table_list::prep_where"); + + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + { + if (tbl->view && tbl->prep_where(thd, conds, no_where_clause)) + { + DBUG_RETURN(TRUE); + } + } + + if (where) + { + if (!where->fixed && where->fix_fields(thd, &where)) + { + DBUG_RETURN(TRUE); + } + + /* + check that it is not VIEW in which we insert with INSERT SELECT + (in this case we can't add view WHERE condition to main SELECT_LEX) + */ + if (!no_where_clause && !where_processed) + { + TABLE_LIST *tbl= this; + Query_arena *arena= thd->stmt_arena, backup; + arena= thd->activate_stmt_arena_if_needed(&backup); // For easier test + + /* Go up to join tree and try to find left join */ + for (; tbl; tbl= tbl->embedding) + { + if (tbl->outer_join) + { + /* + Store WHERE condition to ON expression for outer join, because + we can't use WHERE to correctly execute left joins on VIEWs and + this expression will not be moved to WHERE condition (i.e. will + be clean correctly for PS/SP) + */ + tbl->on_expr= and_conds(tbl->on_expr, + where->copy_andor_structure(thd)); + break; + } + } + if (tbl == 0) + *conds= and_conds(*conds, where->copy_andor_structure(thd)); + if (arena) + thd->restore_active_arena(arena, &backup); + where_processed= TRUE; + } + } + + DBUG_RETURN(FALSE); +} + + +/* + Prepare check option expression of table + + SYNOPSIS + st_table_list::prep_check_option() + thd - thread handler + check_opt_type - WITH CHECK OPTION type (VIEW_CHECK_NONE, + VIEW_CHECK_LOCAL, VIEW_CHECK_CASCADED) + we use this parameter instead of direct check of + effective_with_check to change type of underlying + views to VIEW_CHECK_CASCADED if outer view have + such option and prevent processing of underlying + view check options if outer view have just + VIEW_CHECK_LOCAL option. + + NOTE + This method build check options for every call + (usual execution or every SP/PS call) + This method have to be called after WHERE preparation + (st_table_list::prep_where) + + RETURN + FALSE - OK + TRUE - error +*/ + +bool st_table_list::prep_check_option(THD *thd, uint8 check_opt_type) +{ + DBUG_ENTER("st_table_list::prep_check_option"); + + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + { + /* see comment of check_opt_type parameter */ + if (tbl->view && + tbl->prep_check_option(thd, + ((check_opt_type == VIEW_CHECK_CASCADED) ? + VIEW_CHECK_CASCADED : + VIEW_CHECK_NONE))) + { + DBUG_RETURN(TRUE); + } + } + + if (check_opt_type) + { + Item *item= 0; + if (where) + { + DBUG_ASSERT(where->fixed); + item= where->copy_andor_structure(thd); + } + if (check_opt_type == VIEW_CHECK_CASCADED) + { + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + { + if (tbl->check_option) + item= and_conds(item, tbl->check_option); + } + } + if (item) + thd->change_item_tree(&check_option, item); + } + + if (check_option) + { + const char *save_where= thd->where; + thd->where= "check option"; + if (!check_option->fixed && + check_option->fix_fields(thd, &check_option) || + check_option->check_cols(1)) + { + DBUG_RETURN(TRUE); + } + thd->where= save_where; + } + DBUG_RETURN(FALSE); +} + + +/* + Hide errors which show view underlying table information + + SYNOPSIS + st_table_list::hide_view_error() + thd thread handler + +*/ + +void st_table_list::hide_view_error(THD *thd) +{ + /* Hide "Unknown column" or "Unknown function" error */ + if (thd->net.last_errno == ER_BAD_FIELD_ERROR || + thd->net.last_errno == ER_SP_DOES_NOT_EXIST || + thd->net.last_errno == ER_PROCACCESS_DENIED_ERROR || + thd->net.last_errno == ER_COLUMNACCESS_DENIED_ERROR || + thd->net.last_errno == ER_TABLEACCESS_DENIED_ERROR || + thd->net.last_errno == ER_TABLE_NOT_LOCKED || + thd->net.last_errno == ER_NO_SUCH_TABLE) + { + TABLE_LIST *top= top_table(); + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), top->view_db.str, top->view_name.str); + } + else if (thd->net.last_errno == ER_NO_DEFAULT_FOR_FIELD) + { + TABLE_LIST *top= top_table(); + thd->clear_error(); + // TODO: make correct error message + my_error(ER_NO_DEFAULT_FOR_VIEW_FIELD, MYF(0), + top->view_db.str, top->view_name.str); + } +} + + +/* + Find underlying base tables (TABLE_LIST) which represent given + table_to_find (TABLE) + + SYNOPSIS + st_table_list::find_underlying_table() + table_to_find table to find + + RETURN + 0 table is not found + found table reference +*/ + +st_table_list *st_table_list::find_underlying_table(TABLE *table_to_find) +{ + /* is this real table and table which we are looking for? */ + if (table == table_to_find && merge_underlying_list == 0) + return this; + + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + { + TABLE_LIST *result; + if ((result= tbl->find_underlying_table(table_to_find))) + return result; + } + return 0; +} + +/* + cleunup items belonged to view fields translation table + + SYNOPSIS + st_table_list::cleanup_items() +*/ + +void st_table_list::cleanup_items() +{ + if (!field_translation) + return; + + for (Field_translator *transl= field_translation; + transl < field_translation_end; + transl++) + transl->item->walk(&Item::cleanup_processor, 0); +} + + +/* + check CHECK OPTION condition + + SYNOPSIS + check_option() + ignore_failure ignore check option fail + + RETURN + VIEW_CHECK_OK OK + VIEW_CHECK_ERROR FAILED + VIEW_CHECK_SKIP FAILED, but continue +*/ + +int st_table_list::view_check_option(THD *thd, bool ignore_failure) +{ + if (check_option && check_option->val_int() == 0) + { + TABLE_LIST *main_view= top_table(); + if (ignore_failure) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_VIEW_CHECK_FAILED, ER(ER_VIEW_CHECK_FAILED), + main_view->view_db.str, main_view->view_name.str); + return(VIEW_CHECK_SKIP); + } + my_error(ER_VIEW_CHECK_FAILED, MYF(0), main_view->view_db.str, + main_view->view_name.str); + return(VIEW_CHECK_ERROR); + } + return(VIEW_CHECK_OK); +} + + +/* + Find table in underlying tables by mask and check that only this + table belong to given mask + + SYNOPSIS + st_table_list::check_single_table() + table_arg reference on variable where to store found table + (should be 0 on call, to find table, or point to table for + unique test) + map bit mask of tables + view_arg view for which we are looking table + + RETURN + FALSE table not found or found only one + TRUE found several tables +*/ + +bool st_table_list::check_single_table(st_table_list **table_arg, + table_map map, + st_table_list *view_arg) +{ + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + { + if (tbl->table) + { + if (tbl->table->map & map) + { + if (*table_arg) + return TRUE; + *table_arg= tbl; + tbl->check_option= view_arg->check_option; + } + } + else if (tbl->check_single_table(table_arg, map, view_arg)) + return TRUE; + } + return FALSE; +} + + +/* + Set insert_values buffer + + SYNOPSIS + set_insert_values() + mem_root memory pool for allocating + + RETURN + FALSE - OK + TRUE - out of memory +*/ + +bool st_table_list::set_insert_values(MEM_ROOT *mem_root) +{ + if (table) + { + if (!table->insert_values && + !(table->insert_values= (byte *)alloc_root(mem_root, + table->s->rec_buff_length))) + return TRUE; + } + else + { + DBUG_ASSERT(view && merge_underlying_list); + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + if (tbl->set_insert_values(mem_root)) + return TRUE; + } + return FALSE; +} + + +/* + Test if this is a leaf with respect to name resolution. + + SYNOPSIS + st_table_list::is_leaf_for_name_resolution() + + DESCRIPTION + A table reference is a leaf with respect to name resolution if + it is either a leaf node in a nested join tree (table, view, + schema table, subquery), or an inner node that represents a + NATURAL/USING join, or a nested join with materialized join + columns. + + RETURN + TRUE if a leaf, FALSE otherwise. +*/ +bool st_table_list::is_leaf_for_name_resolution() +{ + return (view || is_natural_join || is_join_columns_complete || + !nested_join); +} + + +/* + Retrieve the first (left-most) leaf in a nested join tree with + respect to name resolution. + + SYNOPSIS + st_table_list::first_leaf_for_name_resolution() + + DESCRIPTION + Given that 'this' is a nested table reference, recursively walk + down the left-most children of 'this' until we reach a leaf + table reference with respect to name resolution. + + IMPLEMENTATION + The left-most child of a nested table reference is the last element + in the list of children because the children are inserted in + reverse order. + + RETURN + If 'this' is a nested table reference - the left-most child of + the tree rooted in 'this', + else return 'this' +*/ + +TABLE_LIST *st_table_list::first_leaf_for_name_resolution() +{ + TABLE_LIST *cur_table_ref; + NESTED_JOIN *cur_nested_join; + LINT_INIT(cur_table_ref); + + if (is_leaf_for_name_resolution()) + return this; + DBUG_ASSERT(nested_join); + + for (cur_nested_join= nested_join; + cur_nested_join; + cur_nested_join= cur_table_ref->nested_join) + { + List_iterator_fast<TABLE_LIST> it(cur_nested_join->join_list); + cur_table_ref= it++; + /* + If the current nested join is a RIGHT JOIN, the operands in + 'join_list' are in reverse order, thus the first operand is + already at the front of the list. Otherwise the first operand + is in the end of the list of join operands. + */ + if (!(cur_table_ref->outer_join & JOIN_TYPE_RIGHT)) + { + TABLE_LIST *next; + while ((next= it++)) + cur_table_ref= next; + } + if (cur_table_ref->is_leaf_for_name_resolution()) + break; + } + return cur_table_ref; +} + + +/* + Retrieve the last (right-most) leaf in a nested join tree with + respect to name resolution. + + SYNOPSIS + st_table_list::last_leaf_for_name_resolution() + + DESCRIPTION + Given that 'this' is a nested table reference, recursively walk + down the right-most children of 'this' until we reach a leaf + table reference with respect to name resolution. + + IMPLEMENTATION + The right-most child of a nested table reference is the first + element in the list of children because the children are inserted + in reverse order. + + RETURN + - If 'this' is a nested table reference - the right-most child of + the tree rooted in 'this', + - else - 'this' +*/ + +TABLE_LIST *st_table_list::last_leaf_for_name_resolution() +{ + TABLE_LIST *cur_table_ref= this; + NESTED_JOIN *cur_nested_join; + + if (is_leaf_for_name_resolution()) + return this; + DBUG_ASSERT(nested_join); + + for (cur_nested_join= nested_join; + cur_nested_join; + cur_nested_join= cur_table_ref->nested_join) + { + cur_table_ref= cur_nested_join->join_list.head(); + /* + If the current nested is a RIGHT JOIN, the operands in + 'join_list' are in reverse order, thus the last operand is in the + end of the list. + */ + if ((cur_table_ref->outer_join & JOIN_TYPE_RIGHT)) + { + List_iterator_fast<TABLE_LIST> it(cur_nested_join->join_list); + TABLE_LIST *next; + cur_table_ref= it++; + while ((next= it++)) + cur_table_ref= next; + } + if (cur_table_ref->is_leaf_for_name_resolution()) + break; + } + return cur_table_ref; +} + + +/* + Register access mode which we need for underlying tables + + SYNOPSIS + register_want_access() + want_access Acess which we require +*/ + +void st_table_list::register_want_access(ulong want_access) +{ + /* Remove SHOW_VIEW_ACL, because it will be checked during making view */ + want_access&= ~SHOW_VIEW_ACL; + if (belong_to_view) + { + grant.want_privilege= want_access; + if (table) + table->grant.want_privilege= want_access; + } + for (TABLE_LIST *tbl= merge_underlying_list; tbl; tbl= tbl->next_local) + tbl->register_want_access(want_access); +} + + +/* + Load security context information for this view + + SYNOPSIS + st_table_list::prepare_view_securety_context() + thd [in] thread handler + + RETURN + FALSE OK + TRUE Error +*/ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +bool st_table_list::prepare_view_securety_context(THD *thd) +{ + DBUG_ENTER("st_table_list::prepare_view_securety_context"); + DBUG_PRINT("enter", ("table: %s", alias)); + + DBUG_ASSERT(!prelocking_placeholder && view); + if (view_suid) + { + DBUG_PRINT("info", ("This table is suid view => load contest")); + DBUG_ASSERT(view && view_sctx); + if (acl_getroot_no_password(view_sctx, + definer.user.str, + definer.host.str, + definer.host.str, + thd->db)) + { + if (thd->lex->sql_command == SQLCOM_SHOW_CREATE) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_NO_SUCH_USER, + ER(ER_NO_SUCH_USER), + definer.user.str, definer.host.str); + } + else + { + if (thd->security_ctx->master_access & SUPER_ACL) + { + my_error(ER_NO_SUCH_USER, MYF(0), definer.user.str, definer.host.str); + + } + else + { + my_error(ER_ACCESS_DENIED_ERROR, MYF(0), + thd->security_ctx->priv_user, + thd->security_ctx->priv_host, + (thd->password ? ER(ER_YES) : ER(ER_NO))); + } + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} +#endif + + +/* + Find security context of current view + + SYNOPSIS + st_table_list::find_view_security_context() + thd [in] thread handler + +*/ + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +Security_context *st_table_list::find_view_security_context(THD *thd) +{ + Security_context *sctx; + TABLE_LIST *upper_view= this; + DBUG_ENTER("st_table_list::find_view_security_context"); + + DBUG_ASSERT(view); + while (upper_view && !upper_view->view_suid) + { + DBUG_ASSERT(!upper_view->prelocking_placeholder); + upper_view= upper_view->referencing_view; + } + if (upper_view) + { + DBUG_PRINT("info", ("Securety context of view %s will be used", + upper_view->alias)); + sctx= upper_view->view_sctx; + DBUG_ASSERT(sctx); + } + else + { + DBUG_PRINT("info", ("Current global context will be used")); + sctx= thd->security_ctx; + } + DBUG_RETURN(sctx); +} +#endif + + +/* + Prepare security context and load underlying tables priveleges for view + + SYNOPSIS + st_table_list::prepare_security() + thd [in] thread handler + + RETURN + FALSE OK + TRUE Error +*/ + +bool st_table_list::prepare_security(THD *thd) +{ + List_iterator_fast<TABLE_LIST> tb(*view_tables); + TABLE_LIST *tbl; + DBUG_ENTER("st_table_list::prepare_security"); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *save_security_ctx= thd->security_ctx; + + DBUG_ASSERT(!prelocking_placeholder); + if (prepare_view_securety_context(thd)) + DBUG_RETURN(TRUE); + thd->security_ctx= find_view_security_context(thd); + while ((tbl= tb++)) + { + DBUG_ASSERT(tbl->referencing_view); + char *local_db, *local_table_name; + if (tbl->view) + { + local_db= tbl->view_db.str; + local_table_name= tbl->view_name.str; + } + else + { + local_db= tbl->db; + local_table_name= tbl->table_name; + } + fill_effective_table_privileges(thd, &tbl->grant, local_db, + local_table_name); + if (tbl->table) + tbl->table->grant= grant; + } + thd->security_ctx= save_security_ctx; +#else + while ((tbl= tb++)) + tbl->grant.privilege= ~NO_ACCESS; +#endif + DBUG_RETURN(FALSE); +} + + +Natural_join_column::Natural_join_column(Field_translator *field_param, + TABLE_LIST *tab) +{ + DBUG_ASSERT(tab->field_translation); + view_field= field_param; + table_field= NULL; + table_ref= tab; + is_common= FALSE; +} + + +Natural_join_column::Natural_join_column(Field *field_param, + TABLE_LIST *tab) +{ + DBUG_ASSERT(tab->table == field_param->table); + table_field= field_param; + view_field= NULL; + table_ref= tab; + is_common= FALSE; +} + + +const char *Natural_join_column::name() +{ + if (view_field) + { + DBUG_ASSERT(table_field == NULL); + return view_field->name; + } + + return table_field->field_name; +} + + +Item *Natural_join_column::create_item(THD *thd) +{ + if (view_field) + { + DBUG_ASSERT(table_field == NULL); + return create_view_field(thd, table_ref, &view_field->item, + view_field->name); + } + return new Item_field(thd, &thd->lex->current_select->context, table_field); +} + + +Field *Natural_join_column::field() +{ + if (view_field) + { + DBUG_ASSERT(table_field == NULL); + return NULL; + } + return table_field; +} + + +const char *Natural_join_column::table_name() +{ + DBUG_ASSERT(table_ref); + return table_ref->alias; +} + + +const char *Natural_join_column::db_name() +{ + if (view_field) + return table_ref->view_db.str; + + /* + Test that TABLE_LIST::db is the same as st_table_share::db to + ensure consistency. An exception are I_S schema tables, which + are inconsistent in this respect. + */ + DBUG_ASSERT(!strcmp(table_ref->db, + table_ref->table->s->db) || + (table_ref->schema_table && + table_ref->table->s->db[0] == 0)); + return table_ref->db; +} + + +GRANT_INFO *Natural_join_column::grant() +{ + if (view_field) + return &(table_ref->grant); + return &(table_ref->table->grant); +} + + +void Field_iterator_view::set(TABLE_LIST *table) +{ + DBUG_ASSERT(table->field_translation); + view= table; + ptr= table->field_translation; + array_end= table->field_translation_end; +} + + +const char *Field_iterator_table::name() +{ + return (*ptr)->field_name; +} + + +Item *Field_iterator_table::create_item(THD *thd) +{ + return new Item_field(thd, &thd->lex->current_select->context, *ptr); +} + + +const char *Field_iterator_view::name() +{ + return ptr->name; +} + + +Item *Field_iterator_view::create_item(THD *thd) +{ + return create_view_field(thd, view, &ptr->item, ptr->name); +} + +Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref, + const char *name) +{ + bool save_wrapper= thd->lex->select_lex.no_wrap_view_item; + Item *field= *field_ref; + DBUG_ENTER("create_view_field"); + + if (view->schema_table_reformed) + { + /* + Translation table items are always Item_fields and already fixed + ('mysql_schema_table' function). So we can return directly the + field. This case happens only for 'show & where' commands. + */ + DBUG_ASSERT(field && field->fixed); + DBUG_RETURN(field); + } + + DBUG_ASSERT(field); + thd->lex->current_select->no_wrap_view_item= TRUE; + if (!field->fixed) + { + if (field->fix_fields(thd, field_ref)) + { + thd->lex->current_select->no_wrap_view_item= save_wrapper; + DBUG_RETURN(0); + } + field= *field_ref; + } + thd->lex->current_select->no_wrap_view_item= save_wrapper; + if (thd->lex->current_select->no_wrap_view_item) + { + DBUG_RETURN(field); + } + Item *item= new Item_direct_view_ref(&view->view->select_lex.context, + field_ref, view->alias, + name); + DBUG_RETURN(item); +} + + +void Field_iterator_natural_join::set(TABLE_LIST *table_ref) +{ + DBUG_ASSERT(table_ref->join_columns); + column_ref_it.init(*(table_ref->join_columns)); + cur_column_ref= column_ref_it++; +} + + +void Field_iterator_natural_join::next() +{ + cur_column_ref= column_ref_it++; + DBUG_ASSERT(!cur_column_ref || ! cur_column_ref->table_field || + cur_column_ref->table_ref->table == + cur_column_ref->table_field->table); +} + + +void Field_iterator_table_ref::set_field_iterator() +{ + DBUG_ENTER("Field_iterator_table_ref::set_field_iterator"); + /* + If the table reference we are iterating over is a natural join, or it is + an operand of a natural join, and TABLE_LIST::join_columns contains all + the columns of the join operand, then we pick the columns from + TABLE_LIST::join_columns, instead of the orginial container of the + columns of the join operator. + */ + if (table_ref->is_join_columns_complete) + { + /* Necesary, but insufficient conditions. */ + DBUG_ASSERT(table_ref->is_natural_join || + table_ref->nested_join || + table_ref->join_columns && + /* This is a merge view. */ + ((table_ref->field_translation && + table_ref->join_columns->elements == + (ulong)(table_ref->field_translation_end - + table_ref->field_translation)) || + /* This is stored table or a tmptable view. */ + (!table_ref->field_translation && + table_ref->join_columns->elements == + table_ref->table->s->fields))); + field_it= &natural_join_it; + DBUG_PRINT("info",("field_it for '%s' is Field_iterator_natural_join", + table_ref->alias)); + } + /* This is a merge view, so use field_translation. */ + else if (table_ref->field_translation) + { + DBUG_ASSERT(table_ref->view && + table_ref->effective_algorithm == VIEW_ALGORITHM_MERGE); + field_it= &view_field_it; + DBUG_PRINT("info", ("field_it for '%s' is Field_iterator_view", + table_ref->alias)); + } + /* This is a base table or stored view. */ + else + { + DBUG_ASSERT(table_ref->table || table_ref->view); + field_it= &table_field_it; + DBUG_PRINT("info", ("field_it for '%s' is Field_iterator_table", + table_ref->alias)); + } + field_it->set(table_ref); + DBUG_VOID_RETURN; +} + + +void Field_iterator_table_ref::set(TABLE_LIST *table) +{ + DBUG_ASSERT(table); + first_leaf= table->first_leaf_for_name_resolution(); + last_leaf= table->last_leaf_for_name_resolution(); + DBUG_ASSERT(first_leaf && last_leaf); + table_ref= first_leaf; + set_field_iterator(); +} + + +void Field_iterator_table_ref::next() +{ + /* Move to the next field in the current table reference. */ + field_it->next(); + /* + If all fields of the current table reference are exhausted, move to + the next leaf table reference. + */ + if (field_it->end_of_fields() && table_ref != last_leaf) + { + table_ref= table_ref->next_name_resolution_table; + DBUG_ASSERT(table_ref); + set_field_iterator(); + } +} + + +const char *Field_iterator_table_ref::table_name() +{ + if (table_ref->view) + return table_ref->view_name.str; + else if (table_ref->is_natural_join) + return natural_join_it.column_ref()->table_name(); + + DBUG_ASSERT(!strcmp(table_ref->table_name, + table_ref->table->s->table_name)); + return table_ref->table_name; +} + + +const char *Field_iterator_table_ref::db_name() +{ + if (table_ref->view) + return table_ref->view_db.str; + else if (table_ref->is_natural_join) + return natural_join_it.column_ref()->db_name(); + + /* + Test that TABLE_LIST::db is the same as st_table_share::db to + ensure consistency. An exception are I_S schema tables, which + are inconsistent in this respect. + */ + DBUG_ASSERT(!strcmp(table_ref->db, table_ref->table->s->db) || + (table_ref->schema_table && + table_ref->table->s->db[0] == 0)); + + return table_ref->db; +} + + +GRANT_INFO *Field_iterator_table_ref::grant() +{ + if (table_ref->view) + return &(table_ref->grant); + else if (table_ref->is_natural_join) + return natural_join_it.column_ref()->grant(); + return &(table_ref->table->grant); +} + + +/* + Create new or return existing column reference to a column of a + natural/using join. + + SYNOPSIS + Field_iterator_table_ref::get_or_create_column_ref() + parent_table_ref the parent table reference over which the + iterator is iterating + + DESCRIPTION + Create a new natural join column for the current field of the + iterator if no such column was created, or return an already + created natural join column. The former happens for base tables or + views, and the latter for natural/using joins. If a new field is + created, then the field is added to 'parent_table_ref' if it is + given, or to the original table referene of the field if + parent_table_ref == NULL. + + NOTES + This method is designed so that when a Field_iterator_table_ref + walks through the fields of a table reference, all its fields + are created and stored as follows: + - If the table reference being iterated is a stored table, view or + natural/using join, store all natural join columns in a list + attached to that table reference. + - If the table reference being iterated is a nested join that is + not natural/using join, then do not materialize its result + fields. This is OK because for such table references + Field_iterator_table_ref iterates over the fields of the nested + table references (recursively). In this way we avoid the storage + of unnecessay copies of result columns of nested joins. + + RETURN + # Pointer to a column of a natural join (or its operand) + NULL No memory to allocate the column +*/ + +Natural_join_column * +Field_iterator_table_ref::get_or_create_column_ref(TABLE_LIST *parent_table_ref) +{ + Natural_join_column *nj_col; + bool is_created= TRUE; + uint field_count; + TABLE_LIST *add_table_ref= parent_table_ref ? + parent_table_ref : table_ref; + LINT_INIT(field_count); + + if (field_it == &table_field_it) + { + /* The field belongs to a stored table. */ + Field *tmp_field= table_field_it.field(); + nj_col= new Natural_join_column(tmp_field, table_ref); + field_count= table_ref->table->s->fields; + } + else if (field_it == &view_field_it) + { + /* The field belongs to a merge view or information schema table. */ + Field_translator *translated_field= view_field_it.field_translator(); + nj_col= new Natural_join_column(translated_field, table_ref); + field_count= table_ref->field_translation_end - + table_ref->field_translation; + } + else + { + /* + The field belongs to a NATURAL join, therefore the column reference was + already created via one of the two constructor calls above. In this case + we just return the already created column reference. + */ + DBUG_ASSERT(table_ref->is_join_columns_complete); + is_created= FALSE; + nj_col= natural_join_it.column_ref(); + DBUG_ASSERT(nj_col); + } + DBUG_ASSERT(!nj_col->table_field || + nj_col->table_ref->table == nj_col->table_field->table); + + /* + If the natural join column was just created add it to the list of + natural join columns of either 'parent_table_ref' or to the table + reference that directly contains the original field. + */ + if (is_created) + { + /* Make sure not all columns were materialized. */ + DBUG_ASSERT(!add_table_ref->is_join_columns_complete); + if (!add_table_ref->join_columns) + { + /* Create a list of natural join columns on demand. */ + if (!(add_table_ref->join_columns= new List<Natural_join_column>)) + return NULL; + add_table_ref->is_join_columns_complete= FALSE; + } + add_table_ref->join_columns->push_back(nj_col); + /* + If new fields are added to their original table reference, mark if + all fields were added. We do it here as the caller has no easy way + of knowing when to do it. + If the fields are being added to parent_table_ref, then the caller + must take care to mark when all fields are created/added. + */ + if (!parent_table_ref && + add_table_ref->join_columns->elements == field_count) + add_table_ref->is_join_columns_complete= TRUE; + } + + return nj_col; +} + + +/* + Return an existing reference to a column of a natural/using join. + + SYNOPSIS + Field_iterator_table_ref::get_natural_column_ref() + + DESCRIPTION + The method should be called in contexts where it is expected that + all natural join columns are already created, and that the column + being retrieved is a Natural_join_column. + + RETURN + # Pointer to a column of a natural join (or its operand) + NULL No memory to allocate the column +*/ + +Natural_join_column * +Field_iterator_table_ref::get_natural_column_ref() +{ + Natural_join_column *nj_col; + + DBUG_ASSERT(field_it == &natural_join_it); + /* + The field belongs to a NATURAL join, therefore the column reference was + already created via one of the two constructor calls above. In this case + we just return the already created column reference. + */ + nj_col= natural_join_it.column_ref(); + DBUG_ASSERT(nj_col && + (!nj_col->table_field || + nj_col->table_ref->table == nj_col->table_field->table)); + return nj_col; } /* @@ -1563,22 +3015,52 @@ db_type get_table_type(const char *name) st_table_list::reinit_before_use() */ -void st_table_list::reinit_before_use(THD * /* thd */) +void st_table_list::reinit_before_use(THD *thd) { /* Reset old pointers to TABLEs: they are not valid since the tables were closed in the end of previous prepare or execute call. */ table= 0; - table_list= 0; + /* Reset is_schema_table_processed value(needed for I_S tables */ + schema_table_state= NOT_PROCESSED; + + TABLE_LIST *embedded; /* The table at the current level of nesting. */ + TABLE_LIST *parent_embedding= this; /* The parent nested table reference. */ + do + { + embedded= parent_embedding; + if (embedded->prep_on_expr) + embedded->on_expr= embedded->prep_on_expr->copy_andor_structure(thd); + parent_embedding= embedded->embedding; + } + while (parent_embedding && + parent_embedding->nested_join->join_list.head() == embedded); } +/* + Return subselect that contains the FROM list this table is taken from + + SYNOPSIS + st_table_list::containing_subselect() + + RETURN + Subselect item for the subquery that contains the FROM list + this table is taken from if there is any + 0 - otherwise + +*/ + +Item_subselect *st_table_list::containing_subselect() +{ + return (select_lex ? select_lex->master_unit()->item : 0); +} /***************************************************************************** ** Instansiate templates *****************************************************************************/ -#ifdef __GNUC__ +#ifdef HAVE_EXPLICIT_TEMPLATE_INSTANTIATION template class List<String>; template class List_iterator<String>; #endif diff --git a/sql/table.h b/sql/table.h index d615623cc37..b795fa78e51 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -18,8 +17,12 @@ /* Structs that defines the TABLE */ class Item; /* Needed by ORDER */ +class Item_subselect; class GRANT_TABLE; class st_select_lex_unit; +class st_select_lex; +class COND_EQUAL; +class Security_context; /* Order clause list element */ @@ -27,12 +30,16 @@ typedef struct st_order { struct st_order *next; Item **item; /* Point at item in select fields */ Item *item_ptr; /* Storage for initial item */ + Item **item_copy; /* For SPs; the original item ptr */ + int counter; /* position in SELECT list, correct + only if counter_used is true*/ bool asc; /* true if ascending */ bool free_me; /* true if item isn't shared */ bool in_field_list; /* true if in select field list */ + bool counter_used; /* parameter was counter of columns */ Field *field; /* If tmp-table group */ char *buff; /* If tmp-table group */ - table_map used,depend_map; + table_map used, depend_map; } ORDER; typedef struct st_grant_info @@ -41,13 +48,29 @@ typedef struct st_grant_info uint version; ulong privilege; ulong want_privilege; + /* + Stores the requested access acl of top level tables list. Is used to + check access rights to the underlying tables of a view. + */ + ulong orig_want_privilege; } GRANT_INFO; -enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2}; +enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2, + SYSTEM_TMP_TABLE=3}; + +enum frm_type_enum +{ + FRMTYPE_ERROR= 0, + FRMTYPE_TABLE, + FRMTYPE_VIEW +}; typedef struct st_filesort_info { IO_CACHE *io_cache; /* If sorted through filebyte */ + uchar **sort_keys; /* Buffer for sorting keys */ + byte *buffpek; /* Buffer for buffpek structures */ + uint buffpek_len; /* Max number of buffpeks in the buffer */ byte *addon_buf; /* Pointer to a buffer if sorted with fields */ uint addon_length; /* Length of the buffer */ struct st_sort_addon_field *addon_field; /* Pointer to the fields info */ @@ -75,49 +98,133 @@ enum timestamp_auto_set_type #define clear_timestamp_auto_bits(_target_, _bits_) \ (_target_)= (enum timestamp_auto_set_type)((int)(_target_) & ~(int)(_bits_)) -/* Table cache entry struct */ - class Field_timestamp; class Field_blob; +class Table_triggers_list; -struct st_table { - handler *file; - Field **field; /* Pointer to fields */ - Field_blob **blob_field; /* Pointer to blob fields */ +/* + This structure is shared between different table objects. There is one + instance of table share per one table in the database. +*/ + +typedef struct st_table_share +{ /* hash of field names (contains pointers to elements of field array) */ - HASH name_hash; - byte *record[2]; /* Pointer to records */ - byte *default_values; /* Default values for INSERT */ - byte *insert_values; /* used by INSERT ... UPDATE */ - uint fields; /* field count */ - uint reclength; /* Recordlength */ - uint rec_buff_length; - uint keys,key_parts,primary_key,max_key_length,max_unique_length; - uint total_key_length; - uint uniques; - uint null_fields; /* number of null fields */ - uint blob_fields; /* number of blob fields */ - key_map keys_in_use, keys_for_keyread, read_only_keys; - key_map quick_keys; - key_map used_keys; /* keys that cover all used table fields */ - key_map keys_in_use_for_query; - KEY *key_info; /* data of keys in database */ + HASH name_hash; /* hash of field names */ + MEM_ROOT mem_root; TYPELIB keynames; /* Pointers to keynames */ - ha_rows max_rows; /* create information */ - ha_rows min_rows; /* create information */ - ulong avg_row_length; /* create information */ - ulong raid_chunksize; TYPELIB fieldnames; /* Pointer to fieldnames */ TYPELIB *intervals; /* pointer to interval info */ +#ifdef NOT_YET + pthread_mutex_t mutex; /* For locking the share */ + pthread_cond_t cond; /* To signal that share is ready */ + struct st_table *open_tables; /* link to open tables */ + struct st_table *used_next, /* Link to used tables */ + **used_prev; + /* The following is copied to each TABLE on OPEN */ + Field **field; + KEY *key_info; /* data of keys in database */ +#endif + uint *blob_field; /* Index to blobs in Field arrray*/ + byte *default_values; /* row with default values */ + LEX_STRING comment; /* Comment about table */ + CHARSET_INFO *table_charset; /* Default charset of string fields */ + + /* A pair "database_name\0table_name\0", widely used as simply a db name */ + char *table_cache_key; + const char *db; /* Pointer to db */ + const char *table_name; /* Table name (for open) */ + const char *path; /* Path to .frm file (from datadir) */ + LEX_STRING connect_string; + key_map keys_in_use; /* Keys in use for table */ + key_map keys_for_keyread; + ulong avg_row_length; /* create information */ + ulong raid_chunksize; + ulong version, flush_version, mysql_version; + ulong timestamp_offset; /* Set to offset+1 of record */ + ulong reclength; /* Recordlength */ + + ha_rows min_rows, max_rows; /* create information */ enum db_type db_type; /* table_type for handler */ enum row_type row_type; /* How rows are stored */ + enum tmp_table_type tmp_table; + + uint blob_ptr_size; /* 4 or 8 */ + uint null_bytes, last_null_bit_pos; + uint key_length; /* Length of table_cache_key */ + uint fields; /* Number of fields */ + uint rec_buff_length; /* Size of table->record[] buffer */ + uint keys, key_parts; + uint max_key_length, max_unique_length, total_key_length; + uint uniques; /* Number of UNIQUE index */ + uint null_fields; /* number of null fields */ + uint blob_fields; /* number of blob fields */ + uint varchar_fields; /* number of varchar fields */ uint db_create_options; /* Create options from database */ uint db_options_in_use; /* Options in use */ uint db_record_offset; /* if HA_REC_IN_SEQ */ - uint db_stat; /* mode of file as in handler.h */ - uint raid_type,raid_chunks; - uint status; /* Used by postfix.. */ - uint system; /* Set if system record */ + uint raid_type, raid_chunks; + uint open_count; /* Number of tables in open list */ + /* Index of auto-updated TIMESTAMP field in field array */ + uint primary_key; + uint timestamp_field_offset; + uint next_number_index; + uint next_number_key_offset; + uchar frm_version; + my_bool system; /* Set if system record */ + my_bool crypted; /* If .frm file is crypted */ + my_bool db_low_byte_first; /* Portable row format */ + my_bool crashed; + my_bool is_view; + my_bool name_lock, replace_with_name_lock; + /* + TRUE if this is a system table like 'mysql.proc', which we want to be + able to open and lock even when we already have some tables open and + locked. To avoid deadlocks we have to put certain restrictions on + locking of this table for writing. FALSE - otherwise. + */ + my_bool system_table; +} TABLE_SHARE; + + +/* Information for one open table */ + +struct st_table { + st_table() {} /* Remove gcc warning */ + + TABLE_SHARE *s; + handler *file; +#ifdef NOT_YET + struct st_table *used_next, **used_prev; /* Link to used tables */ + struct st_table *open_next, **open_prev; /* Link to open tables */ +#endif + struct st_table *next, *prev; + + THD *in_use; /* Which thread uses this */ + Field **field; /* Pointer to fields */ + + byte *record[2]; /* Pointer to records */ + byte *insert_values; /* used by INSERT ... UPDATE */ + key_map quick_keys, used_keys, keys_in_use_for_query; + KEY *key_info; /* data of keys in database */ + + Field *next_number_field, /* Set if next_number is activated */ + *found_next_number_field, /* Set on open */ + *rowid_field; + Field_timestamp *timestamp_field; + + /* Table's triggers, 0 if there are no of them */ + Table_triggers_list *triggers; + struct st_table_list *pos_in_table_list;/* Element referring to this table */ + ORDER *group; + const char *alias; /* alias or table name */ + uchar *null_flags; + query_id_t query_id; + + ha_rows quick_rows[MAX_KEY]; + key_part_map const_key_parts[MAX_KEY]; + uint quick_key_parts[MAX_KEY]; + uint quick_n_ranges[MAX_KEY]; /* If this table has TIMESTAMP field with auto-set property (pointed by @@ -132,119 +239,611 @@ struct st_table { as example). */ timestamp_auto_set_type timestamp_field_type; - /* Index of auto-updated TIMESTAMP field in field array */ - uint timestamp_field_offset; - - uint next_number_index; - uint blob_ptr_size; /* 4 or 8 */ - uint next_number_key_offset; - uint lock_position; /* Position in MYSQL_LOCK.table */ - uint lock_data_start; /* Start pos. in MYSQL_LOCK.locks */ - uint lock_count; /* Number of locks */ - int current_lock; /* Type of lock on table */ - enum tmp_table_type tmp_table; + table_map map; /* ID bit of table (1,2,4,8,16...) */ + + uint lock_position; /* Position in MYSQL_LOCK.table */ + uint lock_data_start; /* Start pos. in MYSQL_LOCK.locks */ + uint lock_count; /* Number of locks */ + uint tablenr,used_fields; + uint temp_pool_slot; /* Used by intern temp tables */ + uint status; /* What's in record[0] */ + uint db_stat; /* mode of file as in handler.h */ + /* number of select if it is derived table */ + uint derived_select_number; + int current_lock; /* Type of lock on table */ my_bool copy_blobs; /* copy_blobs when storing */ + + /* + 0 or JOIN_TYPE_{LEFT|RIGHT}. Currently this is only compared to 0. + If maybe_null !=0, this table is inner w.r.t. some outer join operation, + and null_row may be true. + */ + uint maybe_null; /* - Used in outer joins: if true, all columns are considered to have NULL - values, including columns declared as "not null". + If true, the current table row is considered to have all columns set to + NULL, including columns declared as "not null" (see maybe_null). */ my_bool null_row; - /* 0 or JOIN_TYPE_{LEFT|RIGHT}, same as TABLE_LIST::outer_join */ - my_bool outer_join; - my_bool maybe_null; /* true if (outer_join != 0) */ my_bool force_index; my_bool distinct,const_table,no_rows; - my_bool key_read; - my_bool crypted; - my_bool db_low_byte_first; /* Portable row format */ + my_bool key_read, no_keyread; my_bool locked_by_flush; my_bool locked_by_name; my_bool fulltext_searched; - my_bool crashed; - my_bool is_view; - my_bool no_keyread, no_cache; - my_bool clear_query_id; /* To reset query_id for tables and cols */ - my_bool auto_increment_field_not_null; - Field *next_number_field, /* Set if next_number is activated */ - *found_next_number_field, /* Set on open */ - *rowid_field; - Field_timestamp *timestamp_field; -#if MYSQL_VERSION_ID < 40100 + my_bool no_cache; + /* To signal that we should reset query_id for tables and cols */ + my_bool clear_query_id; /* - Indicates whenever we have to set field_length members of all TIMESTAMP - fields to 19 (to honour 'new_mode' variable) or to original - field_length values. + To indicate that a non-null value of the auto_increment field + was provided by the user or retrieved from the current record. + Used only in the MODE_NO_AUTO_VALUE_ON_ZERO mode. */ - my_bool timestamp_mode; -#endif - my_string comment; /* Comment about table */ - CHARSET_INFO *table_charset; /* Default charset of string fields */ + my_bool auto_increment_field_not_null; + my_bool insert_or_update; /* Can be used by the handler */ + my_bool alias_name_used; /* true if table_name is alias */ + REGINFO reginfo; /* field connections */ MEM_ROOT mem_root; GRANT_INFO grant; - - /* A pair "database_name\0table_name\0", widely used as simply a db name */ - char *table_cache_key; - char *table_name,*real_name,*path; - uint key_length; /* Length of key */ - uint tablenr,used_fields,null_bytes; - table_map map; /* ID bit of table (1,2,4,8,16...) */ - ulong version,flush_version; - uchar *null_flags; FILESORT_INFO sort; - ORDER *group; - ha_rows quick_rows[MAX_KEY]; - uint quick_key_parts[MAX_KEY]; - key_part_map const_key_parts[MAX_KEY]; - ulong query_id; - uchar frm_version; + TABLE_SHARE share_not_to_be_used; /* To be deleted when true shares */ - union /* Temporary variables */ - { - uint temp_pool_slot; /* Used by intern temp tables */ - struct st_table_list *pos_in_table_list; - }; - /* number of select if it is derived table */ - uint derived_select_number; - THD *in_use; /* Which thread uses this */ - struct st_table *next,*prev; + bool fill_item_list(List<Item> *item_list) const; + void reset_item_list(List<Item> *item_list) const; +}; + +enum enum_schema_table_state +{ + NOT_PROCESSED= 0, + PROCESSED_BY_CREATE_SORT_INDEX, + PROCESSED_BY_JOIN_EXEC +}; + +typedef struct st_foreign_key_info +{ + LEX_STRING *forein_id; + LEX_STRING *referenced_db; + LEX_STRING *referenced_table; + LEX_STRING *constraint_method; + List<LEX_STRING> foreign_fields; + List<LEX_STRING> referenced_fields; +} FOREIGN_KEY_INFO; + + +enum enum_schema_tables +{ + SCH_CHARSETS= 0, + SCH_COLLATIONS, + SCH_COLLATION_CHARACTER_SET_APPLICABILITY, + SCH_COLUMNS, + SCH_COLUMN_PRIVILEGES, + SCH_KEY_COLUMN_USAGE, + SCH_OPEN_TABLES, + SCH_PROCEDURES, + SCH_SCHEMATA, + SCH_SCHEMA_PRIVILEGES, + SCH_STATISTICS, + SCH_STATUS, + SCH_TABLES, + SCH_TABLE_CONSTRAINTS, + SCH_TABLE_NAMES, + SCH_TABLE_PRIVILEGES, + SCH_TRIGGERS, + SCH_USER_PRIVILEGES, + SCH_VARIABLES, + SCH_VIEWS }; +typedef struct st_field_info +{ + const char* field_name; + uint field_length; + enum enum_field_types field_type; + int value; + bool maybe_null; + const char* old_name; +} ST_FIELD_INFO; + + +struct st_table_list; +typedef class Item COND; + +typedef struct st_schema_table +{ + const char* table_name; + ST_FIELD_INFO *fields_info; + /* Create information_schema table */ + TABLE *(*create_table) (THD *thd, struct st_table_list *table_list); + /* Fill table with data */ + int (*fill_table) (THD *thd, struct st_table_list *tables, COND *cond); + /* Handle fileds for old SHOW */ + int (*old_format) (THD *thd, struct st_schema_table *schema_table); + int (*process_table) (THD *thd, struct st_table_list *tables, + TABLE *table, bool res, const char *base_name, + const char *file_name); + int idx_field1, idx_field2; + bool hidden; +} ST_SCHEMA_TABLE; + + #define JOIN_TYPE_LEFT 1 #define JOIN_TYPE_RIGHT 2 +#define VIEW_ALGORITHM_UNDEFINED 0 +#define VIEW_ALGORITHM_TMPTABLE 1 +#define VIEW_ALGORITHM_MERGE 2 + +#define VIEW_SUID_INVOKER 0 +#define VIEW_SUID_DEFINER 1 +#define VIEW_SUID_DEFAULT 2 + +/* view WITH CHECK OPTION parameter options */ +#define VIEW_CHECK_NONE 0 +#define VIEW_CHECK_LOCAL 1 +#define VIEW_CHECK_CASCADED 2 + +/* result of view WITH CHECK OPTION parameter check */ +#define VIEW_CHECK_OK 0 +#define VIEW_CHECK_ERROR 1 +#define VIEW_CHECK_SKIP 2 + +struct st_lex; +class select_union; +class TMP_TABLE_PARAM; + +Item *create_view_field(THD *thd, st_table_list *view, Item **field_ref, + const char *name); + +struct Field_translator +{ + Item *item; + const char *name; +}; + + +/* + Column reference of a NATURAL/USING join. Since column references in + joins can be both from views and stored tables, may point to either a + Field (for tables), or a Field_translator (for views). +*/ + +class Natural_join_column: public Sql_alloc +{ +public: + Field_translator *view_field; /* Column reference of merge view. */ + Field *table_field; /* Column reference of table or temp view. */ + st_table_list *table_ref; /* Original base table/view reference. */ + /* + True if a common join column of two NATURAL/USING join operands. Notice + that when we have a hierarchy of nested NATURAL/USING joins, a column can + be common at some level of nesting but it may not be common at higher + levels of nesting. Thus this flag may change depending on at which level + we are looking at some column. + */ + bool is_common; +public: + Natural_join_column(Field_translator *field_param, st_table_list *tab); + Natural_join_column(Field *field_param, st_table_list *tab); + const char *name(); + Item *create_item(THD *thd); + Field *field(); + const char *table_name(); + const char *db_name(); + GRANT_INFO *grant(); +}; + + +/* + Table reference in the FROM clause. + + These table references can be of several types that correspond to + different SQL elements. Below we list all types of TABLE_LISTs with + the necessary conditions to determine when a TABLE_LIST instance + belongs to a certain type. + + 1) table (TABLE_LIST::view == NULL) + - base table + (TABLE_LIST::derived == NULL) + - subquery - TABLE_LIST::table is a temp table + (TABLE_LIST::derived != NULL) + - information schema table + (TABLE_LIST::schema_table != NULL) + NOTICE: for schema tables TABLE_LIST::field_translation may be != NULL + 2) view (TABLE_LIST::view != NULL) + - merge (TABLE_LIST::effective_algorithm == VIEW_ALGORITHM_MERGE) + also (TABLE_LIST::field_translation != NULL) + - tmptable (TABLE_LIST::effective_algorithm == VIEW_ALGORITHM_TMPTABLE) + also (TABLE_LIST::field_translation == NULL) + 3) nested table reference (TABLE_LIST::nested_join != NULL) + - table sequence - e.g. (t1, t2, t3) + TODO: how to distinguish from a JOIN? + - general JOIN + TODO: how to distinguish from a table sequence? + - NATURAL JOIN + (TABLE_LIST::natural_join != NULL) + - JOIN ... USING + (TABLE_LIST::join_using_fields != NULL) +*/ + typedef struct st_table_list { - struct st_table_list *next; - char *db, *alias, *real_name; - char *option; /* Used by cache index */ + st_table_list() {} /* Remove gcc warning */ + /* + List of tables local to a subquery (used by SQL_LIST). Considers + views as leaves (unlike 'next_leaf' below). Created at parse time + in st_select_lex::add_table_to_list() -> table_list.link_in_list(). + */ + struct st_table_list *next_local; + /* link in a global list of all queries tables */ + struct st_table_list *next_global, **prev_global; + char *db, *alias, *table_name, *schema_table_name; + char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ - struct st_table_list *natural_join; /* natural join on this table*/ - /* ... join ... USE INDEX ... IGNORE INDEX */ - List<String> *use_index, *ignore_index; - TABLE *table; /* opened table */ - st_table_list *table_list; /* pointer to node of list of all tables */ - class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ - GRANT_INFO grant; + /* + The structure of ON expression presented in the member above + can be changed during certain optimizations. This member + contains a snapshot of AND-OR structure of the ON expression + made after permanent transformations of the parse tree, and is + used to restore ON clause before every reexecution of a prepared + statement or stored procedure. + */ + Item *prep_on_expr; + COND_EQUAL *cond_equal; /* Used with outer join */ + /* + During parsing - left operand of NATURAL/USING join where 'this' is + the right operand. After parsing (this->natural_join == this) iff + 'this' represents a NATURAL or USING join operation. Thus after + parsing 'this' is a NATURAL/USING join iff (natural_join != NULL). + */ + struct st_table_list *natural_join; + /* + True if 'this' represents a nested join that is a NATURAL JOIN. + For one of the operands of 'this', the member 'natural_join' points + to the other operand of 'this'. + */ + bool is_natural_join; + /* Field names in a USING clause for JOIN ... USING. */ + List<String> *join_using_fields; + /* + Explicitly store the result columns of either a NATURAL/USING join or + an operand of such a join. + */ + List<Natural_join_column> *join_columns; + /* TRUE if join_columns contains all columns of this table reference. */ + bool is_join_columns_complete; + + /* + List of nodes in a nested join tree, that should be considered as + leaves with respect to name resolution. The leaves are: views, + top-most nodes representing NATURAL/USING joins, subqueries, and + base tables. All of these TABLE_LIST instances contain a + materialized list of columns. The list is local to a subquery. + */ + struct st_table_list *next_name_resolution_table; + /* Index names in a "... JOIN ... USE/IGNORE INDEX ..." clause. */ + List<String> *use_index, *ignore_index; + TABLE *table; /* opened table */ + /* + select_result for derived table to pass it from table creation to table + filling procedure + */ + select_union *derived_result; + /* + Reference from aux_tables to local list entry of main select of + multi-delete statement: + delete t1 from t2,t1 where t1.a<'B' and t2.b=t1.b; + here it will be reference of first occurrence of t1 to second (as you + can see this lists can't be merged) + */ + st_table_list *correspondent_table; + st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ + ST_SCHEMA_TABLE *schema_table; /* Information_schema table */ + st_select_lex *schema_select_lex; + /* + True when the view field translation table is used to convert + schema table fields for backwards compatibility with SHOW command. + */ + bool schema_table_reformed; + TMP_TABLE_PARAM *schema_table_param; + /* link to select_lex where this table was used */ + st_select_lex *select_lex; + st_lex *view; /* link on VIEW lex for merging */ + Field_translator *field_translation; /* array of VIEW fields */ + /* pointer to element after last one in translation table above */ + Field_translator *field_translation_end; + /* + List (based on next_local) of underlying tables of this view. I.e. it + does not include the tables of subqueries used in the view. Is set only + for merged views. + */ + st_table_list *merge_underlying_list; + /* + - 0 for base tables + - in case of the view it is the list of all (not only underlying + tables but also used in subquery ones) tables of the view. + */ + List<st_table_list> *view_tables; + /* most upper view this table belongs to */ + st_table_list *belong_to_view; + /* + The view directly referencing this table + (non-zero only for merged underlying tables of a view). + */ + st_table_list *referencing_view; + /* + Security context (non-zero only for tables which belong + to view with SQL SECURITY DEFINER) + */ + Security_context *security_ctx; + /* + This view security context (non-zero only for views with + SQL SECURITY DEFINER) + */ + Security_context *view_sctx; + /* + List of all base tables local to a subquery including all view + tables. Unlike 'next_local', this in this list views are *not* + leaves. Created in setup_tables() -> make_leaves_list(). + */ + bool allowed_show; + st_table_list *next_leaf; + Item *where; /* VIEW WHERE clause condition */ + Item *check_option; /* WITH CHECK OPTION condition */ + LEX_STRING query; /* text of (CRETE/SELECT) statement */ + LEX_STRING md5; /* md5 of query text */ + LEX_STRING source; /* source of CREATE VIEW */ + LEX_STRING view_db; /* saved view database */ + LEX_STRING view_name; /* saved view name */ + LEX_STRING timestamp; /* GMT time stamp of last operation */ + st_lex_user definer; /* definer of view */ + ulonglong file_version; /* version of file's field set */ + ulonglong updatable_view; /* VIEW can be updated */ + ulonglong revision; /* revision control number */ + ulonglong algorithm; /* 0 any, 1 tmp tables , 2 merging */ + ulonglong view_suid; /* view is suid (TRUE dy default) */ + ulonglong with_check; /* WITH CHECK OPTION */ + /* + effective value of WITH CHECK OPTION (differ for temporary table + algorithm) + */ + uint8 effective_with_check; + uint8 effective_algorithm; /* which algorithm was really used */ + GRANT_INFO grant; + /* data need by some engines in query cache*/ + ulonglong engine_data; + /* call back function for asking handler about caching in query cache */ + qc_engine_callback callback_func; thr_lock_type lock_type; uint outer_join; /* Which join type */ uint shared; /* Used in multi-upd */ - uint32 db_length, real_name_length; + uint db_length; + uint32 table_name_length; + bool updatable; /* VIEW/TABLE can be updated now */ bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ - bool force_index; /* Prefer index over table scan */ - bool ignore_leaves; /* Preload only non-leaf nodes */ + bool force_index; /* prefer index over table scan */ + bool ignore_leaves; /* preload only non-leaf nodes */ + table_map dep_tables; /* tables the table depends on */ + table_map on_expr_dep_tables; /* tables on expression depends on */ + struct st_nested_join *nested_join; /* if the element is a nested join */ + st_table_list *embedding; /* nested join containing the table */ + List<struct st_table_list> *join_list;/* join list the table belongs to */ bool cacheable_table; /* stop PS caching */ - /* used in multi-upd privelege check */ - bool table_in_update_from_clause; + /* used in multi-upd/views privilege check */ + bool table_in_first_from_clause; + bool skip_temporary; /* this table shouldn't be temporary */ + /* TRUE if this merged view contain auto_increment field */ + bool contain_auto_increment; + bool multitable_view; /* TRUE iff this is multitable view */ + bool compact_view_format; /* Use compact format for SHOW CREATE VIEW */ + /* view where processed */ + bool where_processed; + /* FRMTYPE_ERROR if any type is acceptable */ + enum frm_type_enum required_type; + char timestamp_buffer[20]; /* buffer for timestamp (19+1) */ + /* + This TABLE_LIST object is just placeholder for prelocking, it will be + used for implicit LOCK TABLES only and won't be used in real statement. + */ + bool prelocking_placeholder; + + enum enum_schema_table_state schema_table_state; + void calc_md5(char *buffer); + void set_underlying_merge(); + int view_check_option(THD *thd, bool ignore_failure); + bool setup_underlying(THD *thd); + void cleanup_items(); + bool placeholder() {return derived || view || schema_table || !table; } + void print(THD *thd, String *str); + bool check_single_table(st_table_list **table, table_map map, + st_table_list *view); + bool set_insert_values(MEM_ROOT *mem_root); + void hide_view_error(THD *thd); + st_table_list *find_underlying_table(TABLE *table); + st_table_list *first_leaf_for_name_resolution(); + st_table_list *last_leaf_for_name_resolution(); + bool is_leaf_for_name_resolution(); + inline st_table_list *top_table() + { return belong_to_view ? belong_to_view : this; } + inline bool prepare_check_option(THD *thd) + { + bool res= FALSE; + if (effective_with_check) + res= prep_check_option(thd, effective_with_check); + return res; + } + inline bool prepare_where(THD *thd, Item **conds, + bool no_where_clause) + { + if (effective_algorithm == VIEW_ALGORITHM_MERGE) + return prep_where(thd, conds, no_where_clause); + return FALSE; + } + + void register_want_access(ulong want_access); + bool prepare_security(THD *thd); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + Security_context *find_view_security_context(THD *thd); + bool prepare_view_securety_context(THD *thd); +#endif /* Cleanup for re-execution in a prepared statement or a stored procedure. */ void reinit_before_use(THD *thd); + Item_subselect *containing_subselect(); + +private: + bool prep_check_option(THD *thd, uint8 check_opt_type); + bool prep_where(THD *thd, Item **conds, bool no_where_clause); + /* + Cleanup for re-execution in a prepared statement or a stored + procedure. + */ } TABLE_LIST; +class Item; + +/* + Iterator over the fields of a generic table reference. +*/ + +class Field_iterator: public Sql_alloc +{ +public: + Field_iterator() {} /* Remove gcc warning */ + virtual ~Field_iterator() {} + virtual void set(TABLE_LIST *)= 0; + virtual void next()= 0; + virtual bool end_of_fields()= 0; /* Return 1 at end of list */ + virtual const char *name()= 0; + virtual Item *create_item(THD *)= 0; + virtual Field *field()= 0; +}; + + +/* + Iterator over the fields of a base table, view with temporary + table, or subquery. +*/ + +class Field_iterator_table: public Field_iterator +{ + Field **ptr; +public: + Field_iterator_table() :ptr(0) {} + void set(TABLE_LIST *table) { ptr= table->table->field; } + void set_table(TABLE *table) { ptr= table->field; } + void next() { ptr++; } + bool end_of_fields() { return *ptr == 0; } + const char *name(); + Item *create_item(THD *thd); + Field *field() { return *ptr; } +}; + + +/* Iterator over the fields of a merge view. */ + +class Field_iterator_view: public Field_iterator +{ + Field_translator *ptr, *array_end; + TABLE_LIST *view; +public: + Field_iterator_view() :ptr(0), array_end(0) {} + void set(TABLE_LIST *table); + void next() { ptr++; } + bool end_of_fields() { return ptr == array_end; } + const char *name(); + Item *create_item(THD *thd); + Item **item_ptr() {return &ptr->item; } + Field *field() { return 0; } + inline Item *item() { return ptr->item; } + Field_translator *field_translator() { return ptr; } +}; + + +/* + Field_iterator interface to the list of materialized fields of a + NATURAL/USING join. +*/ + +class Field_iterator_natural_join: public Field_iterator +{ + List_iterator_fast<Natural_join_column> column_ref_it; + Natural_join_column *cur_column_ref; +public: + Field_iterator_natural_join() :cur_column_ref(NULL) {} + ~Field_iterator_natural_join() {} + void set(TABLE_LIST *table); + void next(); + bool end_of_fields() { return !cur_column_ref; } + const char *name() { return cur_column_ref->name(); } + Item *create_item(THD *thd) { return cur_column_ref->create_item(thd); } + Field *field() { return cur_column_ref->field(); } + Natural_join_column *column_ref() { return cur_column_ref; } +}; + + +/* + Generic iterator over the fields of an arbitrary table reference. + + DESCRIPTION + This class unifies the various ways of iterating over the columns + of a table reference depending on the type of SQL entity it + represents. If such an entity represents a nested table reference, + this iterator encapsulates the iteration over the columns of the + members of the table reference. + + IMPLEMENTATION + The implementation assumes that all underlying NATURAL/USING table + references already contain their result columns and are linked into + the list TABLE_LIST::next_name_resolution_table. +*/ + +class Field_iterator_table_ref: public Field_iterator +{ + TABLE_LIST *table_ref, *first_leaf, *last_leaf; + Field_iterator_table table_field_it; + Field_iterator_view view_field_it; + Field_iterator_natural_join natural_join_it; + Field_iterator *field_it; + void set_field_iterator(); +public: + Field_iterator_table_ref() :field_it(NULL) {} + void set(TABLE_LIST *table); + void next(); + bool end_of_fields() + { return (table_ref == last_leaf && field_it->end_of_fields()); } + const char *name() { return field_it->name(); } + const char *table_name(); + const char *db_name(); + GRANT_INFO *grant(); + Item *create_item(THD *thd) { return field_it->create_item(thd); } + Field *field() { return field_it->field(); } + Natural_join_column *get_or_create_column_ref(TABLE_LIST *parent_table_ref); + Natural_join_column *get_natural_column_ref(); +}; + + +typedef struct st_nested_join +{ + List<TABLE_LIST> join_list; /* list of elements in the nested join */ + table_map used_tables; /* bitmap of tables in the nested join */ + table_map not_null_tables; /* tables that rejects nulls */ + struct st_join_table *first_nested;/* the first nested table in the plan */ + /* + Used to count tables in the nested join in 2 isolated places: + 1. In make_outerjoin_info(). + 2. check_interleaving_with_nj/restore_prev_nj_state (these are called + by the join optimizer. + Before each use the counters are zeroed by reset_nj_counters. + */ + uint counter; + nested_join_map nj_map; /* Bit used to identify this nested join*/ +} NESTED_JOIN; + + typedef struct st_changed_table_list { struct st_changed_table_list *next; @@ -252,8 +851,8 @@ typedef struct st_changed_table_list uint32 key_length; } CHANGED_TABLE_LIST; -typedef struct st_open_table_list -{ + +typedef struct st_open_table_list{ struct st_open_table_list *next; char *db,*table; uint32 in_use,locked; diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc index 3a9ca397bba..392db9224c3 100644 --- a/sql/thr_malloc.cc +++ b/sql/thr_malloc.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2001, 2003-2004 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/time.cc b/sql/time.cc index a1a27619e4b..a46f2fc237d 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -33,15 +32,6 @@ int calc_weekday(long daynr,bool sunday_first_day_of_week) DBUG_RETURN ((int) ((daynr + 5L + (sunday_first_day_of_week ? 1L : 0L)) % 7)); } - /* Calc days in one year. works with 0 <= year <= 99 */ - -uint calc_days_in_year(uint year) -{ - return (year & 3) == 0 && (year%100 || (year%400 == 0 && year)) ? - 366 : 365; -} - - /* The bits in week_format has the following meaning: WEEK_MONDAY_FIRST (0) If not set Sunday is first day of week @@ -195,14 +185,22 @@ ulong convert_month_to_period(ulong month) NOTE See description of str_to_datetime() for more information. */ + timestamp_type str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, uint flags) { int was_cut; - timestamp_type ts_type= str_to_datetime(str, length, l_time, flags, &was_cut); - if (was_cut) - make_truncated_value_warning(current_thd, str, length, ts_type); + THD *thd= current_thd; + timestamp_type ts_type; + + ts_type= str_to_datetime(str, length, l_time, + (flags | (thd->variables.sql_mode & + (MODE_INVALID_DATES | + MODE_NO_ZERO_DATE))), + &was_cut); + if (was_cut || ts_type <= MYSQL_TIMESTAMP_ERROR) + make_truncated_value_warning(current_thd, str, length, ts_type, NullS); return ts_type; } @@ -224,7 +222,7 @@ str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, 0 - t contains datetime value which is out of TIMESTAMP range. */ -my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *in_dst_time_gap) +my_time_t TIME_to_timestamp(THD *thd, const TIME *t, my_bool *in_dst_time_gap) { my_time_t timestamp; @@ -255,101 +253,13 @@ str_to_time_with_warn(const char *str, uint length, TIME *l_time) int warning; bool ret_val= str_to_time(str, length, l_time, &warning); if (ret_val || warning) - make_truncated_value_warning(current_thd, str, length, MYSQL_TIMESTAMP_TIME); + make_truncated_value_warning(current_thd, str, length, + MYSQL_TIMESTAMP_TIME, NullS); return ret_val; } /* - Convert datetime value specified as number to broken-down TIME - representation and form value of DATETIME type as side-effect. - - SYNOPSIS - number_to_TIME() - nr - datetime value as number - time_res - pointer for structure for broken-down representation - fuzzy_date - indicates whenever we allow fuzzy dates - was_cut - set ot 1 if there was some kind of error during - conversion or to 0 if everything was OK. - - DESCRIPTION - Convert a datetime value of formats YYMMDD, YYYYMMDD, YYMMDDHHMSS, - YYYYMMDDHHMMSS to broken-down TIME representation. Return value in - YYYYMMDDHHMMSS format as side-effect. - - This function also checks if datetime value fits in DATETIME range. - - RETURN VALUE - Datetime value in YYYYMMDDHHMMSS format. - If input value is not valid datetime value then 0 is returned. -*/ - -longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date, - int *was_cut) -{ - long part1,part2; - - *was_cut= 0; - - if (nr == LL(0) || nr >= LL(10000101000000)) - goto ok; - if (nr < 101) - goto err; - if (nr <= (YY_PART_YEAR-1)*10000L+1231L) - { - nr= (nr+20000000L)*1000000L; // YYMMDD, year: 2000-2069 - goto ok; - } - if (nr < (YY_PART_YEAR)*10000L+101L) - goto err; - if (nr <= 991231L) - { - nr= (nr+19000000L)*1000000L; // YYMMDD, year: 1970-1999 - goto ok; - } - if (nr < 10000101L) - goto err; - if (nr <= 99991231L) - { - nr= nr*1000000L; - goto ok; - } - if (nr < 101000000L) - goto err; - if (nr <= (YY_PART_YEAR-1)*LL(10000000000)+LL(1231235959)) - { - nr= nr+LL(20000000000000); // YYMMDDHHMMSS, 2000-2069 - goto ok; - } - if (nr < YY_PART_YEAR*LL(10000000000)+ LL(101000000)) - goto err; - if (nr <= LL(991231235959)) - nr= nr+LL(19000000000000); // YYMMDDHHMMSS, 1970-1999 - - ok: - part1=(long) (nr/LL(1000000)); - part2=(long) (nr - (longlong) part1*LL(1000000)); - time_res->year= (int) (part1/10000L); part1%=10000L; - time_res->month= (int) part1 / 100; - time_res->day= (int) part1 % 100; - time_res->hour= (int) (part2/10000L); part2%=10000L; - time_res->minute=(int) part2 / 100; - time_res->second=(int) part2 % 100; - - if (time_res->year <= 9999 && time_res->month <= 12 && - time_res->day <= 31 && time_res->hour <= 23 && - time_res->minute <= 59 && time_res->second <= 59 && - (fuzzy_date || (time_res->month != 0 && time_res->day != 0) || nr==0)) - return nr; - - err: - - *was_cut= 1; - return LL(0); -} - - -/* Convert a system time structure to TIME */ @@ -769,16 +679,15 @@ void make_datetime(const DATE_TIME_FORMAT *format __attribute__((unused)), void make_truncated_value_warning(THD *thd, const char *str_val, - uint str_length, timestamp_type time_type) + uint str_length, timestamp_type time_type, + const char *field_name) { char warn_buff[MYSQL_ERRMSG_SIZE]; const char *type_str; - + CHARSET_INFO *cs= &my_charset_latin1; char buff[128]; String str(buff,(uint32) sizeof(buff), system_charset_info); - str.length(0); - str.append(str_val, str_length); - str.append('\0'); + str.copy(str_val, str_length, system_charset_info); switch (time_type) { case MYSQL_TIMESTAMP_DATE: @@ -792,84 +701,18 @@ void make_truncated_value_warning(THD *thd, const char *str_val, type_str= "datetime"; break; } - sprintf(warn_buff, ER(ER_TRUNCATED_WRONG_VALUE), - type_str, str.ptr()); + if (field_name) + cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + type_str, str.c_ptr(), field_name, + (ulong) thd->row_count); + else + cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), + ER(ER_TRUNCATED_WRONG_VALUE), + type_str, str.c_ptr()); push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, - ER_TRUNCATED_WRONG_VALUE, warn_buff); -} - - -/* Convert time value to integer in YYYYMMDDHHMMSS format */ - -ulonglong TIME_to_ulonglong_datetime(const TIME *time) -{ - return ((ulonglong) (time->year * 10000UL + - time->month * 100UL + - time->day) * ULL(1000000) + - (ulonglong) (time->hour * 10000UL + - time->minute * 100UL + - time->second)); -} - - -/* Convert TIME value to integer in YYYYMMDD format */ - -ulonglong TIME_to_ulonglong_date(const TIME *time) -{ - return (ulonglong) (time->year * 10000UL + time->month * 100UL + time->day); + ER_TRUNCATED_WRONG_VALUE, warn_buff); } -/* - Convert TIME value to integer in HHMMSS format. - This function doesn't take into account time->day member: - it's assumed that days have been converted to hours already. -*/ - -ulonglong TIME_to_ulonglong_time(const TIME *time) -{ - return (ulonglong) (time->hour * 10000UL + - time->minute * 100UL + - time->second); -} - - -/* - Convert struct TIME (date and time split into year/month/day/hour/... - to a number in format YYYYMMDDHHMMSS (DATETIME), - YYYYMMDD (DATE) or HHMMSS (TIME). - - SYNOPSIS - TIME_to_ulonglong() - - DESCRIPTION - The function is used when we need to convert value of time item - to a number if it's used in numeric context, i. e.: - SELECT NOW()+1, CURDATE()+0, CURTIMIE()+0; - SELECT ?+1; - - NOTE - This function doesn't check that given TIME structure members are - in valid range. If they are not, return value won't reflect any - valid date either. -*/ - -ulonglong TIME_to_ulonglong(const TIME *time) -{ - switch (time->time_type) { - case MYSQL_TIMESTAMP_DATETIME: - return TIME_to_ulonglong_datetime(time); - case MYSQL_TIMESTAMP_DATE: - return TIME_to_ulonglong_date(time); - case MYSQL_TIMESTAMP_TIME: - return TIME_to_ulonglong_time(time); - case MYSQL_TIMESTAMP_NONE: - case MYSQL_TIMESTAMP_ERROR: - return ULL(0); - default: - DBUG_ASSERT(0); - } - return 0; -} - #endif diff --git a/sql/tzfile.h b/sql/tzfile.h index 623cddc1f12..1a57c0c5f69 100644 --- a/sql/tzfile.h +++ b/sql/tzfile.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sql/tztime.cc b/sql/tztime.cc index 9af33526c98..70f7cc5ea86 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -781,6 +780,8 @@ gmt_sec_to_TIME(TIME *tmp, my_time_t sec_in_utc, const TIME_ZONE_INFO *sp) static my_time_t sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec) { + /* Guard against my_time_t overflow(on system with 32 bit my_time_t) */ + DBUG_ASSERT(!(year == TIMESTAMP_MAX_YEAR && mon == 1 && mday > 17)); #ifndef WE_WANT_TO_HANDLE_UNORMALIZED_DATES /* It turns out that only whenever month is normalized or unnormalized @@ -880,7 +881,8 @@ sec_since_epoch(int year, int mon, int mday, int hour, int min ,int sec) 0 in case of error. */ static my_time_t -TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap) +TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, + my_bool *in_dst_time_gap) { my_time_t local_t; uint saved_seconds; @@ -948,13 +950,12 @@ TIME_to_gmt_sec(const TIME *t, const TIME_ZONE_INFO *sp, bool *in_dst_time_gap) */ if (shift) { - if (local_t > (TIMESTAMP_MAX_VALUE - shift*86400L + - sp->revtis[i].rt_offset - saved_seconds)) + if (local_t > (my_time_t) (TIMESTAMP_MAX_VALUE - shift * SECS_PER_DAY + + sp->revtis[i].rt_offset - saved_seconds)) { DBUG_RETURN(0); /* my_time_t overflow */ } - else - local_t+= shift*86400L; + local_t+= shift * SECS_PER_DAY; } if (sp->revtis[i].rt_type) @@ -1006,8 +1007,9 @@ static const String tz_SYSTEM_name("SYSTEM", 6, &my_charset_latin1); class Time_zone_system : public Time_zone { public: + Time_zone_system() {} /* Remove gcc warning */ virtual my_time_t TIME_to_gmt_sec(const TIME *t, - bool *in_dst_time_gap) const; + my_bool *in_dst_time_gap) const; virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; virtual const String * get_name() const; }; @@ -1039,7 +1041,7 @@ public: Corresponding my_time_t value or 0 in case of error */ my_time_t -Time_zone_system::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +Time_zone_system::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const { long not_used; return my_system_gmt_sec(t, ¬_used, in_dst_time_gap); @@ -1099,8 +1101,9 @@ Time_zone_system::get_name() const class Time_zone_utc : public Time_zone { public: + Time_zone_utc() {} /* Remove gcc warning */ virtual my_time_t TIME_to_gmt_sec(const TIME *t, - bool *in_dst_time_gap) const; + my_bool *in_dst_time_gap) const; virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; virtual const String * get_name() const; }; @@ -1126,7 +1129,7 @@ public: 0 */ my_time_t -Time_zone_utc::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +Time_zone_utc::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const { /* Should be never called */ DBUG_ASSERT(0); @@ -1189,7 +1192,7 @@ class Time_zone_db : public Time_zone public: Time_zone_db(TIME_ZONE_INFO *tz_info_arg, const String * tz_name_arg); virtual my_time_t TIME_to_gmt_sec(const TIME *t, - bool *in_dst_time_gap) const; + my_bool *in_dst_time_gap) const; virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; virtual const String * get_name() const; private: @@ -1238,7 +1241,7 @@ Time_zone_db::Time_zone_db(TIME_ZONE_INFO *tz_info_arg, Corresponding my_time_t value or 0 in case of error */ my_time_t -Time_zone_db::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +Time_zone_db::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const { return ::TIME_to_gmt_sec(t, tz_info, in_dst_time_gap); } @@ -1285,7 +1288,7 @@ class Time_zone_offset : public Time_zone public: Time_zone_offset(long tz_offset_arg); virtual my_time_t TIME_to_gmt_sec(const TIME *t, - bool *in_dst_time_gap) const; + my_bool *in_dst_time_gap) const; virtual void gmt_sec_to_TIME(TIME *tmp, my_time_t t) const; virtual const String * get_name() const; /* @@ -1337,9 +1340,10 @@ Time_zone_offset::Time_zone_offset(long tz_offset_arg): Corresponding my_time_t value or 0 in case of error */ my_time_t -Time_zone_offset::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const +Time_zone_offset::TIME_to_gmt_sec(const TIME *t, my_bool *in_dst_time_gap) const { my_time_t local_t; + int shift= 0; /* Check timestamp range.we have to do this as calling function relies on @@ -1348,10 +1352,24 @@ Time_zone_offset::TIME_to_gmt_sec(const TIME *t, bool *in_dst_time_gap) const if (!validate_timestamp_range(t)) return 0; - local_t= sec_since_epoch(t->year, t->month, t->day, + /* + Do a temporary shift of the boundary dates to avoid + overflow of my_time_t if the time value is near it's + maximum range + */ + if ((t->year == TIMESTAMP_MAX_YEAR) && (t->month == 1) && t->day > 4) + shift= 2; + + local_t= sec_since_epoch(t->year, t->month, (t->day - shift), t->hour, t->minute, t->second) - offset; + if (shift) + { + /* Add back the shifted time */ + local_t+= shift * SECS_PER_DAY; + } + if (local_t >= TIMESTAMP_MIN_VALUE && local_t <= TIMESTAMP_MAX_VALUE) return local_t; @@ -1428,11 +1446,30 @@ static LS_INFO *tz_lsis= 0; static bool time_zone_tables_exist= 1; -typedef struct st_tz_names_entry: public Sql_alloc +/* + Names of tables (with their lengths) that are needed + for dynamical loading of time zone descriptions. +*/ + +static const LEX_STRING tz_tables_names[MY_TZ_TABLES_COUNT]= +{ + {(char *) STRING_WITH_LEN("time_zone_name")}, + {(char *) STRING_WITH_LEN("time_zone")}, + {(char *) STRING_WITH_LEN("time_zone_transition_type")}, + {(char *) STRING_WITH_LEN("time_zone_transition")} +}; + +/* Name of database to which those tables belong. */ + +static const LEX_STRING tz_tables_db_name= {(char *) STRING_WITH_LEN("mysql")}; + + +class Tz_names_entry: public Sql_alloc { +public: String name; Time_zone *tz; -} TZ_NAMES_ENTRY; +}; /* @@ -1440,7 +1477,7 @@ typedef struct st_tz_names_entry: public Sql_alloc they should obey C calling conventions. */ -extern "C" byte* my_tz_names_get_key(TZ_NAMES_ENTRY *entry, uint *length, +extern "C" byte* my_tz_names_get_key(Tz_names_entry *entry, uint *length, my_bool not_used __attribute__((unused))) { *length= entry->name.length(); @@ -1456,48 +1493,78 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length, /* - Prepare table list with time zone related tables from preallocated array. + Prepare table list with time zone related tables from preallocated array + and add to global table list. SYNOPSIS tz_init_table_list() - tz_tabs - pointer to preallocated array of 4 TABLE_LIST objects. + tz_tabs - pointer to preallocated array of MY_TZ_TABLES_COUNT + TABLE_LIST objects + global_next_ptr - pointer to variable which points to global_next member + of last element of global table list (or list root + then list is empty) (in/out). DESCRIPTION This function prepares list of TABLE_LIST objects which can be used - for opening of time zone tables from preallocated array. + for opening of time zone tables from preallocated array. It also links + this list to the end of global table list (it will read and update + accordingly variable pointed by global_next_ptr for this). */ -void -tz_init_table_list(TABLE_LIST *tz_tabs) +static void +tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) { - bzero(tz_tabs, sizeof(TABLE_LIST) * 4); - tz_tabs[0].alias= tz_tabs[0].real_name= (char*)"time_zone_name"; - tz_tabs[1].alias= tz_tabs[1].real_name= (char*)"time_zone"; - tz_tabs[2].alias= tz_tabs[2].real_name= (char*)"time_zone_transition_type"; - tz_tabs[3].alias= tz_tabs[3].real_name= (char*)"time_zone_transition"; - tz_tabs[0].next= tz_tabs+1; - tz_tabs[1].next= tz_tabs+2; - tz_tabs[2].next= tz_tabs+3; - tz_tabs[0].lock_type= tz_tabs[1].lock_type= tz_tabs[2].lock_type= - tz_tabs[3].lock_type= TL_READ; - tz_tabs[0].db= tz_tabs[1].db= tz_tabs[2].db= tz_tabs[3].db= (char *)"mysql"; + bzero(tz_tabs, sizeof(TABLE_LIST) * MY_TZ_TABLES_COUNT); + + for (int i= 0; i < MY_TZ_TABLES_COUNT; i++) + { + tz_tabs[i].alias= tz_tabs[i].table_name= tz_tables_names[i].str; + tz_tabs[i].table_name_length= tz_tables_names[i].length; + tz_tabs[i].db= tz_tables_db_name.str; + tz_tabs[i].db_length= tz_tables_db_name.length; + tz_tabs[i].lock_type= TL_READ; + + if (i != MY_TZ_TABLES_COUNT - 1) + tz_tabs[i].next_global= tz_tabs[i].next_local= &tz_tabs[i+1]; + if (i != 0) + tz_tabs[i].prev_global= &tz_tabs[i-1].next_global; + } + + /* Link into global list */ + tz_tabs[0].prev_global= *global_next_ptr; + **global_next_ptr= tz_tabs; + /* Update last-global-pointer to point to pointer in last table */ + *global_next_ptr= &tz_tabs[MY_TZ_TABLES_COUNT-1].next_global; } /* - Create table list with time zone related tables. + Fake table list object, pointer to which is returned by + my_tz_get_tables_list() as indication of error. +*/ +TABLE_LIST fake_time_zone_tables_list; + +/* + Create table list with time zone related tables and add it to the end + of global table list. SYNOPSIS my_tz_get_table_list() - thd - current thread object + thd - current thread object + global_next_ptr - pointer to variable which points to global_next member + of last element of global table list (or list root + then list is empty) (in/out). DESCRIPTION This function creates list of TABLE_LIST objects allocated in thd's - memroot, which can be used for opening of time zone tables. + memroot, which can be used for opening of time zone tables. It will also + link this list to the end of global table list (it will read and update + accordingly variable pointed by global_next_ptr for this). NOTE my_tz_check_n_skip_implicit_tables() function depends on fact that - elements of list created are allocated as TABLE_LIST[4] array. + elements of list created are allocated as TABLE_LIST[MY_TZ_TABLES_COUNT] + array. RETURN VALUES Returns pointer to first TABLE_LIST object, (could be 0 if time zone @@ -1505,19 +1572,21 @@ tz_init_table_list(TABLE_LIST *tz_tabs) */ TABLE_LIST * -my_tz_get_table_list(THD *thd) +my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr) { TABLE_LIST *tz_tabs; + DBUG_ENTER("my_tz_get_table_list"); if (!time_zone_tables_exist) - return 0; + DBUG_RETURN(0); - if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * 4))) - return &fake_time_zone_tables_list; + if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * + MY_TZ_TABLES_COUNT))) + DBUG_RETURN(&fake_time_zone_tables_list); - tz_init_table_list(tz_tabs); + tz_init_table_list(tz_tabs, global_next_ptr); - return tz_tabs; + DBUG_RETURN(tz_tabs); } @@ -1551,12 +1620,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) { THD *thd; TABLE_LIST *tables= 0; - TABLE_LIST tables_buff[5]; + TABLE_LIST tables_buff[1+MY_TZ_TABLES_COUNT], **last_global_next_ptr; TABLE *table; - TZ_NAMES_ENTRY *tmp_tzname; + Tz_names_entry *tmp_tzname; my_bool return_val= 1; + char db[]= "mysql"; int res; - uint counter; DBUG_ENTER("my_tz_init"); /* @@ -1564,6 +1633,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) */ if (!(thd= new THD)) DBUG_RETURN(1); + thd->thread_stack= (char*) &thd; thd->store_globals(); /* Init all memory structures that require explicit destruction */ @@ -1585,12 +1655,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) tz_inited= 1; /* Add 'SYSTEM' time zone to tz_names hash */ - if (!(tmp_tzname= new (&tz_storage) TZ_NAMES_ENTRY())) + if (!(tmp_tzname= new (&tz_storage) Tz_names_entry())) { sql_print_error("Fatal error: OOM while initializing time zones"); goto end_with_cleanup; } - tmp_tzname->name.set("SYSTEM", 6, &my_charset_latin1); + tmp_tzname->name.set(STRING_WITH_LEN("SYSTEM"), &my_charset_latin1); tmp_tzname->tz= my_tz_SYSTEM; if (my_hash_insert(&tz_names, (const byte *)tmp_tzname)) { @@ -1611,19 +1681,20 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) leap seconds shared by all time zones. */ - thd->db= my_strdup("mysql",MYF(0)); - thd->db_length= 5; // Safety + thd->set_db(db, sizeof(db)-1); bzero((char*) &tables_buff, sizeof(TABLE_LIST)); - tables_buff[0].alias= tables_buff[0].real_name= + tables_buff[0].alias= tables_buff[0].table_name= (char*)"time_zone_leap_second"; tables_buff[0].lock_type= TL_READ; - tables_buff[0].db= thd->db; - tables_buff[0].next= tables_buff + 1; - /* Fill TABLE_LIST for rest of the time zone describing tables */ - tz_init_table_list(tables_buff + 1); + tables_buff[0].db= db; + /* + Fill TABLE_LIST for the rest of the time zone describing tables + and link it to first one. + */ + last_global_next_ptr= &(tables_buff[0].next_global); + tz_init_table_list(tables_buff + 1, &last_global_next_ptr); - if (open_tables(thd, tables_buff, &counter) || - lock_tables(thd, tables_buff, counter)) + if (simple_open_n_lock_tables(thd, tables_buff)) { sql_print_warning("Can't open and lock time zone table: %s " "trying to live without them", thd->net.last_error); @@ -1674,9 +1745,9 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) tz_leapcnt++; DBUG_PRINT("info", - ("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld", - tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans, - tz_lsis[tz_leapcnt-1].ls_corr)); + ("time_zone_leap_second table: tz_leapcnt: %u tt_time: %lu offset=%ld", + tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans, + tz_lsis[tz_leapcnt-1].ls_corr)); res= table->file->index_next(table->record[0]); } @@ -1701,8 +1772,8 @@ end_with_setting_default_tz: /* If we have default time zone try to load it */ if (default_tzname) { - String tmp_tzname(default_tzname, &my_charset_latin1); - if (!(global_system_variables.time_zone= my_tz_find(&tmp_tzname, tables))) + String tmp_tzname2(default_tzname, &my_charset_latin1); + if (!(global_system_variables.time_zone= my_tz_find(&tmp_tzname2, tables))) { sql_print_error("Fatal error: Illegal or unknown default time zone '%s'", default_tzname); @@ -1779,7 +1850,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) { TABLE *table= 0; TIME_ZONE_INFO *tz_info; - TZ_NAMES_ENTRY *tmp_tzname; + Tz_names_entry *tmp_tzname; Time_zone *return_val= 0; int res; uint tzid, ttid; @@ -1822,8 +1893,9 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) and it is specifically for this purpose). */ table= tz_tables->table; - tz_tables= tz_tables->next; - table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1); + tz_tables= tz_tables->next_local; + table->field[0]->store(tz_name->ptr(), tz_name->length(), + &my_charset_latin1); /* It is OK to ignore ha_index_init()/ha_index_end() return values since mysql.time_zone* tables are MyISAM and these operations always succeed @@ -1854,8 +1926,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) using the only index in this table). */ table= tz_tables->table; - tz_tables= tz_tables->next; - table->field[0]->store((longlong)tzid); + tz_tables= tz_tables->next_local; + table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0); if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, @@ -1881,8 +1953,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) Right - using special index. */ table= tz_tables->table; - tz_tables= tz_tables->next; - table->field[0]->store((longlong)tzid); + tz_tables= tz_tables->next_local; + table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0); // FIXME Is there any better approach than explicitly specifying 4 ??? @@ -1954,7 +2026,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) in ascending order by index scan also satisfies us. */ table= tz_tables->table; - table->field[0]->store((longlong)tzid); + table->field[0]->store((longlong) tzid, TRUE); (void)table->file->ha_index_init(0); // FIXME Is there any better approach than explicitly specifying 4 ??? @@ -1985,8 +2057,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) tz_info->timecnt++; DBUG_PRINT("info", - ("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u", - tzid, (longlong)ttime, ttid)); + ("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u", + tzid, (ulong) ttime, ttid)); res= table->file->index_next_same(table->record[0], (byte*)table->field[0]->ptr, 4); @@ -2053,7 +2125,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) } - if (!(tmp_tzname= new (&tz_storage) TZ_NAMES_ENTRY()) || + if (!(tmp_tzname= new (&tz_storage) Tz_names_entry()) || !(tmp_tzname->tz= new (&tz_storage) Time_zone_db(tz_info, &(tmp_tzname->name))) || (tmp_tzname->name.set(tz_name_buff, tz_name->length(), @@ -2200,7 +2272,7 @@ str_to_offset(const char *str, uint length, long *offset) Time_zone * my_tz_find(const String * name, TABLE_LIST *tz_tables) { - TZ_NAMES_ENTRY *tmp_tzname; + Tz_names_entry *tmp_tzname; Time_zone *result_tz= 0; long offset; @@ -2208,7 +2280,7 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) DBUG_PRINT("enter", ("time zone name='%s'", name ? ((String *)name)->c_ptr() : "NULL")); - DBUG_ASSERT(!time_zone_tables_exist || tz_tables); + DBUG_ASSERT(!time_zone_tables_exist || tz_tables || current_thd->slave_thread); if (!name) DBUG_RETURN(0); @@ -2236,11 +2308,11 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) else { result_tz= 0; - if ((tmp_tzname= (TZ_NAMES_ENTRY *)hash_search(&tz_names, + if ((tmp_tzname= (Tz_names_entry *)hash_search(&tz_names, (const byte *)name->ptr(), name->length()))) result_tz= tmp_tzname->tz; - else if (time_zone_tables_exist) + else if (time_zone_tables_exist && tz_tables) result_tz= tz_load_from_open_tables(name, tz_tables); } @@ -2249,6 +2321,58 @@ my_tz_find(const String * name, TABLE_LIST *tz_tables) DBUG_RETURN(result_tz); } + +/* + A more standalone version of my_tz_find(): will open tz tables if needed. + This is so far only used by replication, where time zone setting does not + happen in the usual query context. + + SYNOPSIS + my_tz_find_with_opening_tz_tables() + thd - pointer to thread's THD structure + name - time zone specification + + DESCRIPTION + This function tries to find a time zone which matches the named passed in + argument. If it fails, it will open time zone tables and re-try the + search. + This function is needed for the slave SQL thread, which does not do the + addition of time zone tables which is usually done during query parsing + (as time zone setting by slave does not happen in mysql_parse() but + before). So it needs to open tz tables by itself if needed. + See notes of my_tz_find() as they also apply here. + + RETURN VALUE + Pointer to corresponding Time_zone object. 0 - in case of bad time zone + specification or other error. + +*/ +Time_zone *my_tz_find_with_opening_tz_tables(THD *thd, const String *name) +{ + Time_zone *tz; + DBUG_ENTER("my_tz_find_with_opening_tables"); + DBUG_ASSERT(thd); + DBUG_ASSERT(thd->slave_thread); // intended for use with slave thread only + if (!(tz= my_tz_find(name, 0)) && time_zone_tables_exist) + { + /* + Probably we have not loaded this time zone yet so let us look it up in + our time zone tables. Note that if we don't have tz tables on this + slave, we don't even try. + */ + TABLE_LIST tables[MY_TZ_TABLES_COUNT]; + TABLE_LIST *dummy; + TABLE_LIST **dummyp= &dummy; + tz_init_table_list(tables, &dummyp); + if (simple_open_n_lock_tables(thd, tables)) + DBUG_RETURN(0); + tz= my_tz_find(name, tables); + /* We need to close tables _now_ to not pollute coming query */ + close_thread_tables(thd); + } + DBUG_RETURN(tz); +} + #endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ @@ -2531,8 +2655,6 @@ main(int argc, char **argv) time_t t, t1, t2; char fullname[FN_REFLEN+1]; char *str_end; - long not_used; - bool not_used_2; MEM_ROOT tz_storage; MY_INIT(argv[0]); @@ -2642,14 +2764,21 @@ main(int argc, char **argv) dates. */ for (time_tmp.year= 1980; time_tmp.year < 2010; time_tmp.year++) + { for (time_tmp.month= 1; time_tmp.month < 13; time_tmp.month++) + { for (time_tmp.day= 1; time_tmp.day < mon_lengths[isleap(time_tmp.year)][time_tmp.month-1]; time_tmp.day++) + { for (time_tmp.hour= 0; time_tmp.hour < 24; time_tmp.hour++) + { for (time_tmp.minute= 0; time_tmp.minute < 60; time_tmp.minute+= 5) + { for (time_tmp.second=0; time_tmp.second<60; time_tmp.second+=25) { + long not_used; + my_bool not_used_2; t= (time_t)my_system_gmt_sec(&time_tmp, ¬_used, ¬_used_2); t1= (time_t)TIME_to_gmt_sec(&time_tmp, &tz_info, ¬_used_2); if (t != t1) @@ -2681,6 +2810,11 @@ main(int argc, char **argv) return 1; } } + } + } + } + } + } printf("TIME_to_gmt_sec = my_system_gmt_sec for test range\n"); diff --git a/sql/tztime.h b/sql/tztime.h index e1ff71b6703..d1f33843810 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -30,6 +29,7 @@ class Time_zone: public Sql_alloc { public: + Time_zone() {} /* Remove gcc warning */ /* Converts local time in broken down TIME representation to my_time_t (UTC seconds since Epoch) represenation. @@ -37,7 +37,7 @@ public: falls into spring time-gap (or lefts it untouched otherwise). */ virtual my_time_t TIME_to_gmt_sec(const TIME *t, - bool *in_dst_time_gap) const = 0; + my_bool *in_dst_time_gap) const = 0; /* Converts time in my_time_t representation to local time in broken down TIME representation. @@ -59,11 +59,22 @@ public: extern Time_zone * my_tz_UTC; extern Time_zone * my_tz_SYSTEM; -extern TABLE_LIST * my_tz_get_table_list(THD *thd); +extern TABLE_LIST * my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr); extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables); +extern Time_zone * my_tz_find_with_opening_tz_tables(THD *thd, const String *name); extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap); extern void my_tz_free(); +extern TABLE_LIST fake_time_zone_tables_list; + +/* + Number of elements in table list produced by my_tz_get_table_list() + (this table list contains tables which are needed for dynamical loading + of time zone descriptions). Actually it is imlementation detail that + should not be used anywhere outside of tztime.h and tztime.cc. +*/ + +static const int MY_TZ_TABLES_COUNT= 4; /* Check if we have pointer to the begining of list of implicitly used time @@ -87,18 +98,12 @@ inline bool my_tz_check_n_skip_implicit_tables(TABLE_LIST **table, { if (*table == tz_tables) { - for (int i= 0; i < 4; i++) + for (int i= 0; i < MY_TZ_TABLES_COUNT; i++) (*table)[i].grant.privilege= SELECT_ACL; - (*table)+= 3; + (*table)+= MY_TZ_TABLES_COUNT - 1; return TRUE; } return FALSE; } -/* - Maximum length of time zone name that we support - (Time zone name is char(64) in db) -*/ -#define MAX_TIME_ZONE_NAME_LENGTH 72 - #endif /* !defined(TESTTIME) && !defined(TZINFO2SQL) */ diff --git a/sql/udf_example.cc b/sql/udf_example.c index 50de0f187fe..2bb4fe92d2f 100644 --- a/sql/udf_example.cc +++ b/sql/udf_example.c @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -56,7 +55,9 @@ ** ** Function 'myfunc_int' returns summary length of all its arguments. ** -** Function 'sequence' returns an sequence starting from a certain number +** Function 'sequence' returns an sequence starting from a certain number. +** +** Function 'myfunc_argument_name' returns name of argument. ** ** On the end is a couple of functions that converts hostnames to ip and ** vice versa. @@ -82,6 +83,7 @@ ** CREATE FUNCTION lookup RETURNS STRING SONAME "udf_example.so"; ** CREATE FUNCTION reverse_lookup RETURNS STRING SONAME "udf_example.so"; ** CREATE AGGREGATE FUNCTION avgcost RETURNS REAL SONAME "udf_example.so"; +** CREATE FUNCTION myfunc_argument_name RETURNS STRING SONAME "udf_example.so"; ** ** After this the functions will work exactly like native MySQL functions. ** Functions should be created only once. @@ -94,6 +96,7 @@ ** DROP FUNCTION lookup; ** DROP FUNCTION reverse_lookup; ** DROP FUNCTION avgcost; +** DROP FUNCTION myfunc_argument_name; ** ** The CREATE FUNCTION and DROP FUNCTION update the func@mysql table. All ** Active function will be reloaded on every restart of server @@ -109,6 +112,8 @@ */ #ifdef STANDARD +/* STANDARD is defined, don't use any mysql functions */ +#include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef __WIN__ @@ -121,10 +126,17 @@ typedef long long longlong; #else #include <my_global.h> #include <my_sys.h> +#if defined(MYSQL_SERVER) +#include <m_string.h> /* To get strmov() */ +#else +/* when compiled as standalone */ +#define strmov(a,b) strcpy(a,b) +#define bzero(a,b) memset(a,0,b) +#define memcpy_fixed(a,b,c) memcpy(a,b,c) +#endif #endif #include <mysql.h> -#include <m_ctype.h> -#include <m_string.h> // To get strmov() +#include <ctype.h> static pthread_mutex_t LOCK_hostname; @@ -132,7 +144,6 @@ static pthread_mutex_t LOCK_hostname; /* These must be right or mysqld will not find the symbol! */ -extern "C" { my_bool metaphon_init(UDF_INIT *initid, UDF_ARGS *args, char *message); void metaphon_deinit(UDF_INIT *initid); char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, @@ -140,6 +151,7 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, my_bool myfunc_double_init(UDF_INIT *, UDF_ARGS *args, char *message); double myfunc_double(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error); +my_bool myfunc_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message); longlong myfunc_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error); my_bool sequence_init(UDF_INIT *initid, UDF_ARGS *args, char *message); @@ -152,7 +164,9 @@ void avgcost_reset( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error void avgcost_clear( UDF_INIT* initid, char* is_null, char *error ); void avgcost_add( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error ); double avgcost( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char *error ); -} +my_bool is_const_init(UDF_INIT *initid, UDF_ARGS *args, char *message); +char *is_const(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long + *length, char *is_null, char *error); /************************************************************************* @@ -214,7 +228,7 @@ my_bool metaphon_init(UDF_INIT *initid, UDF_ARGS *args, char *message) ****************************************************************************/ -void metaphon_deinit(UDF_INIT *initid) +void metaphon_deinit(UDF_INIT *initid __attribute__((unused))) { } @@ -260,23 +274,25 @@ static char codes[26] = { #define NOGHTOF(x) (codes[(x) - 'A'] & 16) /* BDH */ -char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *length, char *is_null, char *error) +char *metaphon(UDF_INIT *initid __attribute__((unused)), + UDF_ARGS *args, char *result, unsigned long *length, + char *is_null, char *error __attribute__((unused))) { const char *word=args->args[0]; - if (!word) // Null argument + const char *w_end; + char *org_result; + char *n, *n_start, *n_end; /* pointers to string */ + char *metaph_end; /* pointers to end of metaph */ + char ntrans[32]; /* word with uppercase letters */ + int KSflag; /* state flag for X to KS */ + + if (!word) /* Null argument */ { *is_null=1; return 0; } - const char *w_end=word+args->lengths[0]; - char *org_result=result; - - char *n, *n_start, *n_end; /* pointers to string */ - char *metaph, *metaph_end; /* pointers to metaph */ - char ntrans[32]; /* word with uppercase letters */ - char newm[8]; /* new metaph for comparison */ - int KSflag; /* state flag for X to KS */ + w_end=word+args->lengths[0]; + org_result=result; /*-------------------------------------------------------- * Copy word to internal buffer, dropping non-alphabetic @@ -285,8 +301,8 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, for (n = ntrans + 1, n_end = ntrans + sizeof(ntrans)-2; word != w_end && n < n_end; word++ ) - if ( my_isalpha ( &my_charset_latin1, *word )) - *n++ = my_toupper ( &my_charset_latin1, *word ); + if ( isalpha ( *word )) + *n++ = toupper ( *word ); if ( n == ntrans + 1 ) /* return empty string if 0 bytes */ { @@ -337,7 +353,7 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, KSflag = 0; /* state flag for KS translation */ for (metaph_end = result + MAXMETAPH, n_start = n; - n <= n_end && result < metaph_end; n++ ) + n < n_end && result < metaph_end; n++ ) { if ( KSflag ) @@ -490,7 +506,7 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, } } } - *length= (ulong) (result - org_result); + *length= (unsigned long) (result - org_result); return org_result; } @@ -512,36 +528,39 @@ char *metaphon(UDF_INIT *initid, UDF_ARGS *args, char *result, my_bool myfunc_double_init(UDF_INIT *initid, UDF_ARGS *args, char *message) { + uint i; + if (!args->arg_count) { - strcpy(message,"myfunc_double must have at least on argument"); + strcpy(message,"myfunc_double must have at least one argument"); return 1; } /* ** As this function wants to have everything as strings, force all arguments ** to strings. */ - for (uint i=0 ; i < args->arg_count; i++) + for (i=0 ; i < args->arg_count; i++) args->arg_type[i]=STRING_RESULT; - initid->maybe_null=1; // The result may be null - initid->decimals=2; // We want 2 decimals in the result - initid->max_length=6; // 3 digits + . + 2 decimals + initid->maybe_null=1; /* The result may be null */ + initid->decimals=2; /* We want 2 decimals in the result */ + initid->max_length=6; /* 3 digits + . + 2 decimals */ return 0; } -double myfunc_double(UDF_INIT *initid, UDF_ARGS *args, char *is_null, - char *error) +double myfunc_double(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, + char *is_null, char *error __attribute__((unused))) { unsigned long val = 0; unsigned long v = 0; + uint i, j; - for (uint i = 0; i < args->arg_count; i++) + for (i = 0; i < args->arg_count; i++) { if (args->args[i] == NULL) continue; val += args->lengths[i]; - for (uint j=args->lengths[i] ; j-- > 0 ;) + for (j=args->lengths[i] ; j-- > 0 ;) v += args->args[i][j]; } if (val) @@ -568,22 +587,25 @@ double myfunc_double(UDF_INIT *initid, UDF_ARGS *args, char *is_null, /* This function returns the sum of all arguments */ -longlong myfunc_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, - char *error) +longlong myfunc_int(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, + char *is_null __attribute__((unused)), + char *error __attribute__((unused))) { longlong val = 0; - for (uint i = 0; i < args->arg_count; i++) + uint i; + + for (i = 0; i < args->arg_count; i++) { if (args->args[i] == NULL) continue; switch (args->arg_type[i]) { - case STRING_RESULT: // Add string lengths + case STRING_RESULT: /* Add string lengths */ val += args->lengths[i]; break; - case INT_RESULT: // Add numbers + case INT_RESULT: /* Add numbers */ val += *((longlong*) args->args[i]); break; - case REAL_RESULT: // Add numers as longlong + case REAL_RESULT: /* Add numers as longlong */ val += (longlong) *((double*) args->args[i]); break; default: @@ -593,6 +615,16 @@ longlong myfunc_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, return val; } +/* + At least one of _init/_deinit is needed unless the server is started + with --allow_suspicious_udfs. +*/ +my_bool myfunc_int_init(UDF_INIT *initid __attribute__((unused)), + UDF_ARGS *args __attribute__((unused)), + char *message __attribute__((unused))) +{ + return 0; +} /* Simple example of how to get a sequences starting from the first argument @@ -607,7 +639,7 @@ my_bool sequence_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return 1; } if (args->arg_count) - args->arg_type[0]= INT_RESULT; // Force argument to int + args->arg_type[0]= INT_RESULT; /* Force argument to int */ if (!(initid->ptr=(char*) malloc(sizeof(longlong)))) { @@ -631,8 +663,9 @@ void sequence_deinit(UDF_INIT *initid) free(initid->ptr); } -longlong sequence(UDF_INIT *initid, UDF_ARGS *args, char *is_null, - char *error) +longlong sequence(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, + char *is_null __attribute__((unused)), + char *error __attribute__((unused))) { ulonglong val=0; if (args->arg_count) @@ -650,12 +683,15 @@ longlong sequence(UDF_INIT *initid, UDF_ARGS *args, char *is_null, ** ****************************************************************************/ +#ifdef __WIN__ +#include <winsock.h> +#else #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <netdb.h> +#endif -extern "C" { my_bool lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message); void lookup_deinit(UDF_INIT *initid); char *lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, @@ -664,7 +700,6 @@ my_bool reverse_lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message); void reverse_lookup_deinit(UDF_INIT *initid); char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *length, char *null_value, char *error); -} /**************************************************************************** @@ -690,20 +725,26 @@ my_bool lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return 0; } -void lookup_deinit(UDF_INIT *initid) +void lookup_deinit(UDF_INIT *initid __attribute__((unused))) { #if !defined(HAVE_GETHOSTBYADDR_R) || !defined(HAVE_SOLARIS_STYLE_GETHOST) (void) pthread_mutex_destroy(&LOCK_hostname); #endif } -char *lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *null_value, char *error) +char *lookup(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, + char *result, unsigned long *res_length, char *null_value, + char *error __attribute__((unused))) { uint length; + char name_buff[256]; + struct hostent *hostent; +#if defined(HAVE_GETHOSTBYADDR_R) && defined(HAVE_SOLARIS_STYLE_GETHOST) int tmp_errno; - char name_buff[256],hostname_buff[2048]; - struct hostent tmp_hostent,*hostent; + char hostname_buff[2048]; + struct hostent tmp_hostent; +#endif + struct in_addr in; if (!args->args[0] || !(length=args->lengths[0])) { @@ -731,7 +772,6 @@ char *lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, } VOID(pthread_mutex_unlock(&LOCK_hostname)); #endif - struct in_addr in; memcpy_fixed((char*) &in,(char*) *hostent->h_addr_list, sizeof(in.s_addr)); *res_length= (ulong) (strmov(result, inet_ntoa(in)) - result); return result; @@ -765,18 +805,24 @@ my_bool reverse_lookup_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return 0; } -void reverse_lookup_deinit(UDF_INIT *initid) +void reverse_lookup_deinit(UDF_INIT *initid __attribute__((unused))) { #if !defined(HAVE_GETHOSTBYADDR_R) || !defined(HAVE_SOLARIS_STYLE_GETHOST) (void) pthread_mutex_destroy(&LOCK_hostname); #endif } -char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *null_value, char *error) +char *reverse_lookup(UDF_INIT *initid __attribute__((unused)), UDF_ARGS *args, + char *result, unsigned long *res_length, + char *null_value, char *error __attribute__((unused))) { +#if defined(HAVE_GETHOSTBYADDR_R) && defined(HAVE_SOLARIS_STYLE_GETHOST) char name_buff[256]; struct hostent tmp_hostent; + int tmp_errno; +#endif + struct hostent *hp; + unsigned long taddr; uint length; if (args->arg_count == 4) @@ -793,8 +839,8 @@ char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, (int) *((longlong*) args->args[3])); } else - { // string argument - if (!args->args[0]) // Return NULL for NULL values + { /* string argument */ + if (!args->args[0]) /* Return NULL for NULL values */ { *null_value=1; return 0; @@ -806,15 +852,13 @@ char *reverse_lookup(UDF_INIT *initid, UDF_ARGS *args, char *result, result[length]=0; } - unsigned long taddr = inet_addr(result); + taddr = inet_addr(result); if (taddr == (unsigned long) -1L) { *null_value=1; return 0; } - struct hostent *hp; #if defined(HAVE_GETHOSTBYADDR_R) && defined(HAVE_SOLARIS_STYLE_GETHOST) - int tmp_errno; if (!(hp=gethostbyaddr_r((char*) &taddr,sizeof(taddr), AF_INET, &tmp_hostent, name_buff,sizeof(name_buff), &tmp_errno))) @@ -887,11 +931,15 @@ avgcost_init( UDF_INIT* initid, UDF_ARGS* args, char* message ) /*args->arg_type[0] = REAL_RESULT; args->arg_type[1] = REAL_RESULT;*/ - initid->maybe_null = 0; // The result may be null - initid->decimals = 4; // We want 4 decimals in the result - initid->max_length = 20; // 6 digits + . + 10 decimals + initid->maybe_null = 0; /* The result may be null */ + initid->decimals = 4; /* We want 4 decimals in the result */ + initid->max_length = 20; /* 6 digits + . + 10 decimals */ - data = new struct avgcost_data; + if (!(data = (struct avgcost_data*) malloc(sizeof(struct avgcost_data)))) + { + strmov(message,"Couldn't allocate memory"); + return 1; + } data->totalquantity = 0; data->totalprice = 0.0; @@ -903,7 +951,7 @@ avgcost_init( UDF_INIT* initid, UDF_ARGS* args, char* message ) void avgcost_deinit( UDF_INIT* initid ) { - delete initid->ptr; + free(initid->ptr); } @@ -918,7 +966,8 @@ avgcost_reset(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message) /* This is needed to get things to work in MySQL 4.1.1 and above */ void -avgcost_clear(UDF_INIT* initid, char* is_null, char* message) +avgcost_clear(UDF_INIT* initid, char* is_null __attribute__((unused)), + char* message __attribute__((unused))) { struct avgcost_data* data = (struct avgcost_data*)initid->ptr; data->totalprice= 0.0; @@ -928,7 +977,9 @@ avgcost_clear(UDF_INIT* initid, char* is_null, char* message) void -avgcost_add(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message) +avgcost_add(UDF_INIT* initid, UDF_ARGS* args, + char* is_null __attribute__((unused)), + char* message __attribute__((unused))) { if (args->args[0] && args->args[1]) { @@ -948,7 +999,7 @@ avgcost_add(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message) if ( ((quantity < 0) && (newquantity < 0)) || ((quantity > 0) && (newquantity > 0)) ) { - data->totalprice = price * double(newquantity); + data->totalprice = price * (double)newquantity; } /* ** sub q if totalq > 0 @@ -956,15 +1007,15 @@ avgcost_add(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message) */ else { - price = data->totalprice / double(data->totalquantity); - data->totalprice = price * double(newquantity); + price = data->totalprice / (double)data->totalquantity; + data->totalprice = price * (double)newquantity; } data->totalquantity = newquantity; } else { data->totalquantity += quantity; - data->totalprice += price * double(quantity); + data->totalprice += price * (double)quantity; } if (data->totalquantity == 0) @@ -974,7 +1025,8 @@ avgcost_add(UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* message) double -avgcost( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* error ) +avgcost( UDF_INIT* initid, UDF_ARGS* args __attribute__((unused)), + char* is_null, char* error __attribute__((unused))) { struct avgcost_data* data = (struct avgcost_data*)initid->ptr; if (!data->count || !data->totalquantity) @@ -984,7 +1036,73 @@ avgcost( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* error ) } *is_null = 0; - return data->totalprice/double(data->totalquantity); + return data->totalprice/(double)data->totalquantity; } +my_bool myfunc_argument_name_init(UDF_INIT *initid, UDF_ARGS *args, + char *message); +char *myfunc_argument_name(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *length, char *null_value, + char *error); + +my_bool myfunc_argument_name_init(UDF_INIT *initid, UDF_ARGS *args, + char *message) +{ + if (args->arg_count != 1) + { + strmov(message,"myfunc_argument_name_init accepts only one argument"); + return 1; + } + initid->max_length= args->attribute_lengths[0]; + initid->maybe_null= 1; + initid->const_item= 1; + return 0; +} + +char *myfunc_argument_name(UDF_INIT *initid __attribute__((unused)), + UDF_ARGS *args, char *result, + unsigned long *length, char *null_value, + char *error __attribute__((unused))) +{ + if (!args->attributes[0]) + { + null_value= 0; + return 0; + } + (*length)--; /* space for ending \0 (for debugging purposes) */ + if (*length > args->attribute_lengths[0]) + *length= args->attribute_lengths[0]; + memcpy(result, args->attributes[0], *length); + result[*length]= 0; + return result; +} + + + +my_bool is_const_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 1) + { + strmov(message, "IS_CONST accepts only one argument"); + return 1; + } + initid->ptr= (char*)((args->args[0] != NULL) ? 1UL : 0); + return 0; +} + +char * is_const(UDF_INIT *initid, UDF_ARGS *args __attribute__((unused)), + char *result, unsigned long *length, + char *is_null, char *error __attribute__((unused))) +{ + if (initid->ptr != 0) { + sprintf(result, "const"); + } else { + sprintf(result, "not const"); + } + *is_null= 0; + *length= strlen(result); + return result; +} + + #endif /* HAVE_DLOPEN */ diff --git a/sql/udf_example.def b/sql/udf_example.def new file mode 100644 index 00000000000..7a87147d7b6 --- /dev/null +++ b/sql/udf_example.def @@ -0,0 +1,25 @@ +LIBRARY udf_example +VERSION 1.0 +EXPORTS + lookup + lookup_init + reverse_lookup + reverse_lookup_init + metaphon_init + metaphon_deinit + metaphon + myfunc_double_init + myfunc_double + myfunc_int_init + myfunc_int + sequence_init + sequence_deinit + sequence + avgcost_init + avgcost_deinit + avgcost_reset + avgcost_add + avgcost_clear + avgcost + is_const + is_const_init diff --git a/sql/uniques.cc b/sql/uniques.cc index d060965aa66..9eb827f62a3 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -2,8 +2,7 @@ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -38,8 +37,8 @@ int unique_write_to_file(gptr key, element_count count, Unique *unique) { /* - Use unique->size (size of element stored in the tree) and not - unique->tree.size_of_element. The latter is different from unique->size + Use unique->size (size of element stored in the tree) and not + unique->tree.size_of_element. The latter is different from unique->size when tree implementation chooses to store pointer to key in TREE_ELEMENT (instead of storing the element itself there) */ @@ -55,20 +54,264 @@ int unique_write_to_ptrs(gptr key, element_count count, Unique *unique) } Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, - uint size_arg, ulong max_in_memory_size_arg) + uint size_arg, ulonglong max_in_memory_size_arg) :max_in_memory_size(max_in_memory_size_arg), size(size_arg), elements(0) { my_b_clear(&file); - init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL, - comp_func_fixed_arg); + init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, 0, + NULL, comp_func_fixed_arg); /* If the following fail's the next add will also fail */ my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16); - max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size); - open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, - MYF(MY_WME)); + /* + If you change the following, change it in get_max_elements function, too. + */ + max_elements= (ulong) (max_in_memory_size / + ALIGN_SIZE(sizeof(TREE_ELEMENT)+size)); + VOID(open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, + MYF(MY_WME))); +} + + +/* + Calculate log2(n!) + + NOTES + Stirling's approximate formula is used: + + n! ~= sqrt(2*M_PI*n) * (n/M_E)^n + + Derivation of formula used for calculations is as follows: + + log2(n!) = log(n!)/log(2) = log(sqrt(2*M_PI*n)*(n/M_E)^n) / log(2) = + + = (log(2*M_PI*n)/2 + n*log(n/M_E)) / log(2). +*/ + +inline double log2_n_fact(double x) +{ + return (log(2*M_PI*x)/2 + x*log(x/M_E)) / M_LN2; +} + + +/* + Calculate cost of merge_buffers function call for given sequence of + input stream lengths and store the number of rows in result stream in *last. + + SYNOPSIS + get_merge_buffers_cost() + buff_elems Array of #s of elements in buffers + elem_size Size of element stored in buffer + first Pointer to first merged element size + last Pointer to last merged element size + + RETURN + Cost of merge_buffers operation in disk seeks. + + NOTES + It is assumed that no rows are eliminated during merge. + The cost is calculated as + + cost(read_and_write) + cost(merge_comparisons). + + All bytes in the sequences is read and written back during merge so cost + of disk io is 2*elem_size*total_buf_elems/IO_SIZE (2 is for read + write) + + For comparisons cost calculations we assume that all merged sequences have + the same length, so each of total_buf_size elements will be added to a sort + heap with (n_buffers-1) elements. This gives the comparison cost: + + total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID; +*/ + +static double get_merge_buffers_cost(uint *buff_elems, uint elem_size, + uint *first, uint *last) +{ + uint total_buf_elems= 0; + for (uint *pbuf= first; pbuf <= last; pbuf++) + total_buf_elems+= *pbuf; + *last= total_buf_elems; + + int n_buffers= last - first + 1; + + /* Using log2(n)=log(n)/log(2) formula */ + return 2*((double)total_buf_elems*elem_size) / IO_SIZE + + total_buf_elems*log((double) n_buffers) / (TIME_FOR_COMPARE_ROWID * M_LN2); } +/* + Calculate cost of merging buffers into one in Unique::get, i.e. calculate + how long (in terms of disk seeks) the two calls + merge_many_buffs(...); + merge_buffers(...); + will take. + + SYNOPSIS + get_merge_many_buffs_cost() + buffer buffer space for temporary data, at least + Unique::get_cost_calc_buff_size bytes + maxbuffer # of full buffers + max_n_elems # of elements in first maxbuffer buffers + last_n_elems # of elements in last buffer + elem_size size of buffer element + + NOTES + maxbuffer+1 buffers are merged, where first maxbuffer buffers contain + max_n_elems elements each and last buffer contains last_n_elems elements. + + The current implementation does a dumb simulation of merge_many_buffs + function actions. + + RETURN + Cost of merge in disk seeks. +*/ + +static double get_merge_many_buffs_cost(uint *buffer, + uint maxbuffer, uint max_n_elems, + uint last_n_elems, int elem_size) +{ + register int i; + double total_cost= 0.0; + uint *buff_elems= buffer; /* #s of elements in each of merged sequences */ + + /* + Set initial state: first maxbuffer sequences contain max_n_elems elements + each, last sequence contains last_n_elems elements. + */ + for (i = 0; i < (int)maxbuffer; i++) + buff_elems[i]= max_n_elems; + buff_elems[maxbuffer]= last_n_elems; + + /* + Do it exactly as merge_many_buff function does, calling + get_merge_buffers_cost to get cost of merge_buffers. + */ + if (maxbuffer >= MERGEBUFF2) + { + while (maxbuffer >= MERGEBUFF2) + { + uint lastbuff= 0; + for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF) + { + total_cost+=get_merge_buffers_cost(buff_elems, elem_size, + buff_elems + i, + buff_elems + i + MERGEBUFF-1); + lastbuff++; + } + total_cost+=get_merge_buffers_cost(buff_elems, elem_size, + buff_elems + i, + buff_elems + maxbuffer); + maxbuffer= lastbuff; + } + } + + /* Simulate final merge_buff call. */ + total_cost += get_merge_buffers_cost(buff_elems, elem_size, + buff_elems, buff_elems + maxbuffer); + return total_cost; +} + + +/* + Calculate cost of using Unique for processing nkeys elements of size + key_size using max_in_memory_size memory. + + SYNOPSIS + Unique::get_use_cost() + buffer space for temporary data, use Unique::get_cost_calc_buff_size + to get # bytes needed. + nkeys #of elements in Unique + key_size size of each elements in bytes + max_in_memory_size amount of memory Unique will be allowed to use + + RETURN + Cost in disk seeks. + + NOTES + cost(using_unqiue) = + cost(create_trees) + (see #1) + cost(merge) + (see #2) + cost(read_result) (see #3) + + 1. Cost of trees creation + For each Unique::put operation there will be 2*log2(n+1) elements + comparisons, where n runs from 1 tree_size (we assume that all added + elements are different). Together this gives: + + n_compares = 2*(log2(2) + log2(3) + ... + log2(N+1)) = 2*log2((N+1)!) + + then cost(tree_creation) = n_compares*ROWID_COMPARE_COST; + + Total cost of creating trees: + (n_trees - 1)*max_size_tree_cost + non_max_size_tree_cost. + + Approximate value of log2(N!) is calculated by log2_n_fact function. + + 2. Cost of merging. + If only one tree is created by Unique no merging will be necessary. + Otherwise, we model execution of merge_many_buff function and count + #of merges. (The reason behind this is that number of buffers is small, + while size of buffers is big and we don't want to loose precision with + O(x)-style formula) + + 3. If only one tree is created by Unique no disk io will happen. + Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume + these will be random seeks. +*/ + +double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, + ulonglong max_in_memory_size) +{ + ulong max_elements_in_tree; + ulong last_tree_elems; + int n_full_trees; /* number of trees in unique - 1 */ + double result; + + max_elements_in_tree= ((ulong) max_in_memory_size / + ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size)); + + n_full_trees= nkeys / max_elements_in_tree; + last_tree_elems= nkeys % max_elements_in_tree; + + /* Calculate cost of creating trees */ + result= 2*log2_n_fact(last_tree_elems + 1.0); + if (n_full_trees) + result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0); + result /= TIME_FOR_COMPARE_ROWID; + + DBUG_PRINT("info",("unique trees sizes: %u=%u*%lu + %lu", nkeys, + n_full_trees, n_full_trees?max_elements_in_tree:0, + last_tree_elems)); + + if (!n_full_trees) + return result; + + /* + There is more then one tree and merging is necessary. + First, add cost of writing all trees to disk, assuming that all disk + writes are sequential. + */ + result += DISK_SEEK_BASE_COST * n_full_trees * + ceil(((double) key_size)*max_elements_in_tree / IO_SIZE); + result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE); + + /* Cost of merge */ + double merge_cost= get_merge_many_buffs_cost(buffer, n_full_trees, + max_elements_in_tree, + last_tree_elems, key_size); + if (merge_cost < 0.0) + return merge_cost; + + result += merge_cost; + /* + Add cost of reading the resulting sequence, assuming there were no + duplicate elements. + */ + result += ceil((double)key_size*nkeys/IO_SIZE); + + return result; +} + Unique::~Unique() { close_cached_file(&file); @@ -77,13 +320,14 @@ Unique::~Unique() } - /* Write tree to disk; clear tree */ + /* Write tree to disk; clear tree */ bool Unique::flush() { BUFFPEK file_ptr; elements+= tree.elements_in_tree; file_ptr.count=tree.elements_in_tree; file_ptr.file_pos=my_b_tell(&file); + if (tree_walk(&tree, (tree_walk_action) unique_write_to_file, (void*) this, left_root_right) || insert_dynamic(&file_ptrs, (gptr) &file_ptr)) @@ -94,6 +338,242 @@ bool Unique::flush() /* + Clear the tree and the file. + You must call reset() if you want to reuse Unique after walk(). +*/ + +void +Unique::reset() +{ + reset_tree(&tree); + /* + If elements != 0, some trees were stored in the file (see how + flush() works). Note, that we can not count on my_b_tell(&file) == 0 + here, because it can return 0 right after walk(), and walk() does not + reset any Unique member. + */ + if (elements) + { + reset_dynamic(&file_ptrs); + reinit_io_cache(&file, WRITE_CACHE, 0L, 0, 1); + } + elements= 0; +} + +/* + The comparison function, passed to queue_init() in merge_walk() must + use comparison function of Uniques::tree, but compare members of struct + BUFFPEK. +*/ + +struct BUFFPEK_COMPARE_CONTEXT +{ + qsort_cmp2 key_compare; + void *key_compare_arg; +}; + +C_MODE_START + +static int buffpek_compare(void *arg, byte *key_ptr1, byte *key_ptr2) +{ + BUFFPEK_COMPARE_CONTEXT *ctx= (BUFFPEK_COMPARE_CONTEXT *) arg; + return ctx->key_compare(ctx->key_compare_arg, + *((byte **) key_ptr1), *((byte **)key_ptr2)); +} + +C_MODE_END + + +/* + DESCRIPTION + + Function is very similar to merge_buffers, but instead of writing sorted + unique keys to the output file, it invokes walk_action for each key. + This saves I/O if you need to pass through all unique keys only once. + + SYNOPSIS + merge_walk() + All params are 'IN' (but see comment for begin, end): + merge_buffer buffer to perform cached piece-by-piece loading + of trees; initially the buffer is empty + merge_buffer_size size of merge_buffer. Must be aligned with + key_length + key_length size of tree element; key_length * (end - begin) + must be less or equal than merge_buffer_size. + begin pointer to BUFFPEK struct for the first tree. + end pointer to BUFFPEK struct for the last tree; + end > begin and [begin, end) form a consecutive + range. BUFFPEKs structs in that range are used and + overwritten in merge_walk(). + walk_action element visitor. Action is called for each unique + key. + walk_action_arg argument to walk action. Passed to it on each call. + compare elements comparison function + compare_arg comparison function argument + file file with all trees dumped. Trees in the file + must contain sorted unique values. Cache must be + initialized in read mode. + RETURN VALUE + 0 ok + <> 0 error +*/ + +static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size, + uint key_length, BUFFPEK *begin, BUFFPEK *end, + tree_walk_action walk_action, void *walk_action_arg, + qsort_cmp2 compare, void *compare_arg, + IO_CACHE *file) +{ + BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg }; + QUEUE queue; + if (end <= begin || + merge_buffer_size < (ulong) (key_length * (end - begin + 1)) || + init_queue(&queue, (uint) (end - begin), offsetof(BUFFPEK, key), 0, + buffpek_compare, &compare_context)) + return 1; + /* we need space for one key when a piece of merge buffer is re-read */ + merge_buffer_size-= key_length; + uchar *save_key_buff= merge_buffer + merge_buffer_size; + uint max_key_count_per_piece= (uint) (merge_buffer_size/(end-begin) / + key_length); + /* if piece_size is aligned reuse_freed_buffer will always hit */ + uint piece_size= max_key_count_per_piece * key_length; + uint bytes_read; /* to hold return value of read_to_buffer */ + BUFFPEK *top; + int res= 1; + /* + Invariant: queue must contain top element from each tree, until a tree + is not completely walked through. + Here we're forcing the invariant, inserting one element from each tree + to the queue. + */ + for (top= begin; top != end; ++top) + { + top->base= merge_buffer + (top - begin) * piece_size; + top->max_keys= max_key_count_per_piece; + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + DBUG_ASSERT(bytes_read); + queue_insert(&queue, (byte *) top); + } + top= (BUFFPEK *) queue_top(&queue); + while (queue.elements > 1) + { + /* + Every iteration one element is removed from the queue, and one is + inserted by the rules of the invariant. If two adjacent elements on + the top of the queue are not equal, biggest one is unique, because all + elements in each tree are unique. Action is applied only to unique + elements. + */ + void *old_key= top->key; + /* + read next key from the cache or from the file and push it to the + queue; this gives new top. + */ + top->key+= key_length; + if (--top->mem_count) + queue_replaced(&queue); + else /* next piece should be read */ + { + /* save old_key not to overwrite it in read_to_buffer */ + memcpy(save_key_buff, old_key, key_length); + old_key= save_key_buff; + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + else if (bytes_read > 0) /* top->key, top->mem_count are reset */ + queue_replaced(&queue); /* in read_to_buffer */ + else + { + /* + Tree for old 'top' element is empty: remove it from the queue and + give all its memory to the nearest tree. + */ + queue_remove(&queue, 0); + reuse_freed_buff(&queue, top, key_length); + } + } + top= (BUFFPEK *) queue_top(&queue); + /* new top has been obtained; if old top is unique, apply the action */ + if (compare(compare_arg, old_key, top->key)) + { + if (walk_action(old_key, 1, walk_action_arg)) + goto end; + } + } + /* + Applying walk_action to the tail of the last tree: this is safe because + either we had only one tree in the beginning, either we work with the + last tree in the queue. + */ + do + { + do + { + if (walk_action(top->key, 1, walk_action_arg)) + goto end; + top->key+= key_length; + } + while (--top->mem_count); + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + } + while (bytes_read); + res= 0; +end: + delete_queue(&queue); + return res; +} + + +/* + DESCRIPTION + Walks consecutively through all unique elements: + if all elements are in memory, then it simply invokes 'tree_walk', else + all flushed trees are loaded to memory piece-by-piece, pieces are + sorted, and action is called for each unique value. + Note: so as merging resets file_ptrs state, this method can change + internal Unique state to undefined: if you want to reuse Unique after + walk() you must call reset() first! + SYNOPSIS + Unique:walk() + All params are 'IN': + action function-visitor, typed in include/my_tree.h + function is called for each unique element + arg argument for visitor, which is passed to it on each call + RETURN VALUE + 0 OK + <> 0 error + */ + +bool Unique::walk(tree_walk_action action, void *walk_action_arg) +{ + int res; + uchar *merge_buffer; + + if (elements == 0) /* the whole tree is in memory */ + return tree_walk(&tree, action, walk_action_arg, left_root_right); + + /* flush current tree to the file to have some memory for merge buffer */ + if (flush()) + return 1; + if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0)) + return 1; + if (!(merge_buffer= (uchar *) my_malloc((ulong) max_in_memory_size, MYF(0)))) + return 1; + res= merge_walk(merge_buffer, (ulong) max_in_memory_size, size, + (BUFFPEK *) file_ptrs.buffer, + (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements, + action, walk_action_arg, + tree.compare, tree.custom_arg, &file); + my_free((char*) merge_buffer, MYF(0)); + return res; +} + +/* Modify the TABLE element so that when one calls init_records() the rows will be read in priority order. */ @@ -114,7 +594,7 @@ bool Unique::get(TABLE *table) return 0; } } - /* Not enough memory; Save the result to file */ + /* Not enough memory; Save the result to file && free memory used by tree */ if (flush()) return 1; @@ -126,7 +606,7 @@ bool Unique::get(TABLE *table) bool error=1; /* Open cached file if it isn't open */ - outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), + outfile=table->sort.io_cache=(IO_CACHE*) my_malloc(sizeof(IO_CACHE), MYF(MY_ZEROFILL)); if (!outfile || ! my_b_inited(outfile) && @@ -140,10 +620,10 @@ bool Unique::get(TABLE *table) sort_param.sort_form=table; sort_param.rec_length= sort_param.sort_length= sort_param.ref_length= size; - sort_param.keys= max_in_memory_size / sort_param.sort_length; + sort_param.keys= (uint) (max_in_memory_size / sort_param.sort_length); sort_param.not_killable=1; - if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) * + if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) * sort_param.sort_length, MYF(0)))) return 1; @@ -158,7 +638,7 @@ bool Unique::get(TABLE *table) goto err; if (merge_buffers(&sort_param, &file, outfile, sort_buffer, file_ptr, file_ptr, file_ptr+maxbuffer,0)) - goto err; + goto err; error=0; err: x_free((gptr) sort_buffer); diff --git a/sql/unireg.cc b/sql/unireg.cc index e5ee0222f20..06b2e0c10a2 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -43,11 +42,10 @@ static uint get_interval_id(uint *int_count,List<create_field> &create_fields, create_field *last_field); static bool pack_fields(File file, List<create_field> &create_fields, ulong data_offset); -static bool make_empty_rec(int file, enum db_type table_type, +static bool make_empty_rec(THD *thd, int file, enum db_type table_type, uint table_options, List<create_field> &create_fields, - uint reclength, uint null_fields, - ulong data_offset); + uint reclength, ulong data_offset); /* Create a frm (table definition) file @@ -76,19 +74,22 @@ bool mysql_create_frm(THD *thd, my_string file_name, uint keys, KEY *key_info, handler *db_file) { - uint reclength,info_length,screens,key_info_length,maxlength,null_fields; + LEX_STRING str_db_type; + uint reclength, info_length, screens, key_info_length, maxlength, tmp_len; + ulong key_buff_length; File file; ulong filepos, data_offset; uchar fileinfo[64],forminfo[288],*keybuff; TYPELIB formnames; uchar *screen_buff; + char buff[128]; DBUG_ENTER("mysql_create_frm"); formnames.type_names=0; if (!(screen_buff=pack_screens(create_fields,&info_length,&screens,0))) DBUG_RETURN(1); if (db_file == NULL) - db_file= get_new_handler((TABLE*) 0, create_info->db_type); + db_file= get_new_handler((TABLE*) 0, thd->mem_root, create_info->db_type); /* If fixed row records, we need one bit to check for deleted rows */ if (!(create_info->table_options & HA_OPTION_PACK_RECORD)) @@ -115,16 +116,21 @@ bool mysql_create_frm(THD *thd, my_string file_name, } } reclength=uint2korr(forminfo+266); - null_fields=uint2korr(forminfo+282); - if ((file=create_frm(file_name, db, table, reclength, fileinfo, + /* Calculate extra data segment length */ + str_db_type.str= (char *) ha_get_storage_engine(create_info->db_type); + str_db_type.length= strlen(str_db_type.str); + create_info->extra_size= (2 + str_db_type.length + + 2 + create_info->connect_string.length); + + if ((file=create_frm(thd, file_name, db, table, reclength, fileinfo, create_info, keys)) < 0) { my_free((gptr) screen_buff,MYF(0)); DBUG_RETURN(1); } - uint key_buff_length=keys*(7+NAME_LEN+MAX_REF_PARTS*9)+16; + key_buff_length= uint4korr(fileinfo+47); keybuff=(uchar*) my_malloc(key_buff_length, MYF(0)); key_info_length= pack_keys(keybuff, keys, key_info, data_offset); VOID(get_form_pos(file,fileinfo,&formnames)); @@ -133,14 +139,32 @@ bool mysql_create_frm(THD *thd, my_string file_name, maxlength=(uint) next_io_size((ulong) (uint2korr(forminfo)+1000)); int2store(forminfo+2,maxlength); int4store(fileinfo+10,(ulong) (filepos+maxlength)); - int4store(fileinfo+47,key_buff_length); fileinfo[26]= (uchar) test((create_info->max_rows == 1) && (create_info->min_rows == 1) && (keys == 0)); int2store(fileinfo+28,key_info_length); - strmake((char*) forminfo+47,create_info->comment ? create_info->comment : "", - 60); - forminfo[46]=(uchar) strlen((char*)forminfo+47); // Length of comment + tmp_len= system_charset_info->cset->charpos(system_charset_info, + create_info->comment.str, + create_info->comment.str + + create_info->comment.length, 60); + if (tmp_len < create_info->comment.length) + { + (void) my_snprintf(buff, sizeof(buff), "Too long comment for table '%s'", + table); + if ((thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))) + { + my_message(ER_UNKNOWN_ERROR, buff, MYF(0)); + goto err; + } + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), buff); + create_info->comment.length= tmp_len; + } + + strmake((char*) forminfo+47, create_info->comment.str ? + create_info->comment.str : "", create_info->comment.length); + forminfo[46]=(uchar) create_info->comment.length; if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) || my_pwrite(file,(byte*) keybuff,key_info_length, (ulong) uint2korr(fileinfo+6),MYF_RW)) @@ -148,10 +172,22 @@ bool mysql_create_frm(THD *thd, my_string file_name, VOID(my_seek(file, (ulong) uint2korr(fileinfo+6)+ (ulong) key_buff_length, MY_SEEK_SET,MYF(0))); - if (make_empty_rec(file,create_info->db_type,create_info->table_options, - create_fields,reclength, null_fields, data_offset)) + if (make_empty_rec(thd,file,create_info->db_type,create_info->table_options, + create_fields,reclength, data_offset)) goto err; + int2store(buff, create_info->connect_string.length); + if (my_write(file, (const byte*)buff, 2, MYF(MY_NABP)) || + my_write(file, (const byte*)create_info->connect_string.str, + create_info->connect_string.length, MYF(MY_NABP))) + goto err; + + int2store(buff, str_db_type.length); + if (my_write(file, (const byte*)buff, 2, MYF(MY_NABP)) || + my_write(file, (const byte*)str_db_type.str, + str_db_type.length, MYF(MY_NABP))) + goto err; + VOID(my_seek(file,filepos,MY_SEEK_SET,MYF(0))); if (my_write(file,(byte*) forminfo,288,MYF_RW) || my_write(file,(byte*) screen_buff,info_length,MYF_RW) || @@ -232,7 +268,7 @@ err3: keys number of keys to create key_info Keys to create db_file Handler to use. May be zero, in which case we use - create_info->db_type + create_info->db_type RETURN 0 ok 1 error @@ -247,11 +283,11 @@ int rea_create_table(THD *thd, my_string file_name, DBUG_ENTER("rea_create_table"); if (mysql_create_frm(thd, file_name, db, table, create_info, - create_fields, keys, key_info, NULL)) + create_fields, keys, key_info, NULL)) DBUG_RETURN(1); - if (ha_create_table(file_name,create_info,0)) + if (!create_info->frm_only && ha_create_table(file_name,create_info,0)) { - my_delete(file_name,MYF(0)); + my_delete(file_name,MYF(0)); DBUG_RETURN(1); } DBUG_RETURN(0); @@ -356,16 +392,16 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo, pos[6]=pos[7]=0; // For the future pos+=8; key_parts+=key->key_parts; - DBUG_PRINT("loop",("flags: %d key_parts: %d at %lx", - key->flags,key->key_parts, - key->key_part)); + DBUG_PRINT("loop", ("flags: %d key_parts: %d at 0x%lx", + key->flags, key->key_parts, + (long) key->key_part)); for (key_part=key->key_part,key_part_end=key_part+key->key_parts ; key_part != key_part_end ; key_part++) { uint offset; - DBUG_PRINT("loop",("field: %d startpos: %lu length: %ld", + DBUG_PRINT("loop",("field: %d startpos: %lu length: %d", key_part->fieldnr, key_part->offset + data_offset, key_part->length)); int2store(pos,key_part->fieldnr+1+FIELD_NAME_USED); @@ -421,11 +457,12 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, if (create_fields.elements > MAX_FIELDS) { - my_error(ER_TOO_MANY_FIELDS,MYF(0)); + my_message(ER_TOO_MANY_FIELDS, ER(ER_TOO_MANY_FIELDS), MYF(0)); DBUG_RETURN(1); } - totlength=reclength=0L; + totlength= 0L; + reclength= data_offset; no_empty=int_count=int_parts=int_length=time_stamp_pos=null_fields= com_length=0; n_length=2L; @@ -436,6 +473,27 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, create_field *field; while ((field=it++)) { + uint tmp_len= system_charset_info->cset->charpos(system_charset_info, + field->comment.str, + field->comment.str + + field->comment.length, + 255); + if (tmp_len < field->comment.length) + { + char buff[128]; + (void) my_snprintf(buff,sizeof(buff), "Too long comment for field '%s'", + field->field_name); + if ((current_thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))) + { + my_message(ER_UNKNOWN_ERROR, buff, MYF(0)); + DBUG_RETURN(1); + } + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), buff); + field->comment.length= tmp_len; + } + totlength+= field->length; com_length+= field->comment.length; if (MTYP_TYPENR(field->unireg_check) == Field::NOEMPTY || @@ -454,6 +512,8 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, !time_stamp_pos) time_stamp_pos= (uint) field->offset+ (uint) data_offset + 1; length=field->pack_length; + /* Ensure we don't have any bugs when generating offsets */ + DBUG_ASSERT(reclength == field->offset + data_offset); if ((uint) field->offset+ (uint) data_offset+ length > reclength) reclength=(uint) (field->offset+ data_offset + length); n_length+= (ulong) strlen(field->field_name)+1; @@ -486,18 +546,13 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, for (uint pos= 0; pos < field->interval->count; pos++) { char *dst; - uint length= field->save_interval->type_lengths[pos], hex_length; const char *src= field->save_interval->type_names[pos]; - const char *srcend= src + length; + uint hex_length; + length= field->save_interval->type_lengths[pos]; hex_length= length * 2; field->interval->type_lengths[pos]= hex_length; field->interval->type_names[pos]= dst= sql_alloc(hex_length + 1); - for ( ; src < srcend; src++) - { - *dst++= _dig_vec_upper[((uchar) *src) >> 4]; - *dst++= _dig_vec_upper[((uchar) *src) & 15]; - } - *dst= '\0'; + octet2hex(dst, src, length); } } @@ -526,7 +581,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, if (info_length+(ulong) create_fields.elements*FCOMP+288+ n_length+int_length+com_length > 65535L || int_count > 255) { - my_error(ER_TOO_MANY_FIELDS,MYF(0)); + my_message(ER_TOO_MANY_FIELDS, ER(ER_TOO_MANY_FIELDS), MYF(0)); DBUG_RETURN(1); } @@ -549,6 +604,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, int2store(forminfo+280,22); /* Rows needed */ int2store(forminfo+282,null_fields); int2store(forminfo+284,com_length); + /* Up to forminfo+288 is free to use for additional information */ DBUG_RETURN(0); } /* pack_header */ @@ -722,24 +778,26 @@ static bool pack_fields(File file, List<create_field> &create_fields, /* save an empty record on start of formfile */ -static bool make_empty_rec(File file,enum db_type table_type, +static bool make_empty_rec(THD *thd, File file,enum db_type table_type, uint table_options, List<create_field> &create_fields, - uint reclength, uint null_fields, + uint reclength, ulong data_offset) { int error; Field::utype type; - uint firstpos,null_count; + uint null_count; uchar *buff,*null_pos; TABLE table; create_field *field; handler *handler; + enum_check_fields old_count_cuted_fields= thd->count_cuted_fields; DBUG_ENTER("make_empty_rec"); /* We need a table to generate columns for default values */ bzero((char*) &table,sizeof(table)); - handler= get_new_handler((TABLE*) 0, table_type); + table.s= &table.share_not_to_be_used; + handler= get_new_handler((TABLE*) 0, thd->mem_root, table_type); if (!handler || !(buff=(uchar*) my_malloc((uint) reclength,MYF(MY_WME | MY_ZEROFILL)))) @@ -748,26 +806,29 @@ static bool make_empty_rec(File file,enum db_type table_type, DBUG_RETURN(1); } - table.in_use= current_thd; - table.db_low_byte_first= handler->low_byte_first(); - table.blob_ptr_size=portable_sizeof_char_ptr; + table.in_use= thd; + table.s->db_low_byte_first= handler->low_byte_first(); + table.s->blob_ptr_size= portable_sizeof_char_ptr; - firstpos=reclength; null_count=0; if (!(table_options & HA_OPTION_PACK_RECORD)) + { null_count++; // Need one bit for delete mark - DBUG_ASSERT(data_offset == ((null_fields + null_count + 7) / 8)); - bfill(buff, (uint) data_offset, 255); - null_pos=buff; + *buff|= 1; + } + null_pos= buff; List_iterator<create_field> it(create_fields); + thd->count_cuted_fields= CHECK_FIELD_WARN; // To find wrong default values while ((field=it++)) { + /* + regfield don't have to be deleted as it's allocated with sql_alloc() + */ Field *regfield=make_field((char*) buff+field->offset + data_offset, field->length, - field->flags & NOT_NULL_FLAG ? 0: - null_pos+null_count/8, - 1 << (null_count & 7), + null_pos + null_count / 8, + null_count & 7, field->pack_flag, field->sql_type, field->charset, @@ -777,25 +838,39 @@ static bool make_empty_rec(File file,enum db_type table_type, field->interval, field->field_name, &table); + if (!regfield) + { + error= 1; + goto err; // End of memory + } if (!(field->flags & NOT_NULL_FLAG)) + { + *regfield->null_ptr|= regfield->null_bit; null_count++; + } - if ((uint) (field->offset + data_offset) < firstpos && - regfield->type() != FIELD_TYPE_NULL) - firstpos= field->offset + data_offset; + if (field->sql_type == FIELD_TYPE_BIT && !f_bit_as_char(field->pack_flag)) + null_count+= field->length & 7; type= (Field::utype) MTYP_TYPENR(field->unireg_check); if (field->def && (regfield->real_type() != FIELD_TYPE_YEAR || field->def->val_int() != 0)) - (void) field->def->save_in_field(regfield, 1); + { + if (field->def->save_in_field(regfield, 1)) + { + my_error(ER_INVALID_DEFAULT, MYF(0), regfield->field_name); + error= 1; + goto err; + } + } else if (regfield->real_type() == FIELD_TYPE_ENUM && (field->flags & NOT_NULL_FLAG)) { regfield->set_notnull(); - regfield->store((longlong) 1); + regfield->store((longlong) 1, TRUE); } else if (type == Field::YES) // Old unireg type regfield->store(ER(ER_YES),(uint) strlen(ER(ER_YES)),system_charset_info); @@ -803,13 +878,21 @@ static bool make_empty_rec(File file,enum db_type table_type, regfield->store(ER(ER_NO), (uint) strlen(ER(ER_NO)),system_charset_info); else regfield->reset(); - delete regfield; } + DBUG_ASSERT(data_offset == ((null_count + 7) / 8)); + + /* + We need to set the unused bits to 1. If the number of bits is a multiple + of 8 there are no unused bits. + */ + if (null_count & 7) + *(null_pos + null_count / 8)|= ~(((uchar) 1 << (null_count & 7)) - 1); - /* Fill not used startpos */ - bfill((byte*) buff+data_offset, firstpos- (uint) data_offset, 255); error=(int) my_write(file,(byte*) buff, (uint) reclength,MYF_RW); + +err: my_free((gptr) buff,MYF(MY_FAE)); delete handler; + thd->count_cuted_fields= old_count_cuted_fields; DBUG_RETURN(error); } /* make_empty_rec */ diff --git a/sql/unireg.h b/sql/unireg.h index 3fb30315c81..886b3d99212 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -1,9 +1,8 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB +/* Copyright (C) 2000-2006 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -37,20 +36,20 @@ #define SHAREDIR "share/" #endif -#define ER(X) errmesg[(X)-1000] -#define ER_SAFE(X) (((X) >= 1000 && (X) < ER_ERROR_MESSAGES + 1000) ? ER(X) : "Invalid error code") +#define ER(X) errmesg[(X) - ER_ERROR_FIRST] +#define ER_SAFE(X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER(X) : "Invalid error code") #define ERRMAPP 1 /* Errormap f|r my_error */ #define LIBLEN FN_REFLEN-FN_LEN /* Max l{ngd p} dev */ -#define MAX_DBKEY_LENGTH (FN_LEN*2+1+1+4+4) /* extra 4+4 bytes for slave tmp - * tables */ +/* extra 4+4 bytes for slave tmp tables */ +#define MAX_DBKEY_LENGTH (NAME_LEN*2+1+1+4+4) #define MAX_ALIAS_NAME 256 #define MAX_FIELD_NAME 34 /* Max colum name length +2 */ #define MAX_SYS_VAR_LENGTH 32 -#define MAX_KEY 64 /* Max used keys */ +#define MAX_KEY MAX_INDEXES /* Max used keys */ #define MAX_REF_PARTS 16 /* Max parts used as ref */ -#define MAX_KEY_LENGTH 1024 /* max possible key */ +#define MAX_KEY_LENGTH 3072 /* max possible key */ #if SIZEOF_OFF_T > 4 #define MAX_REFLENGTH 8 /* Max length for record ref */ #else @@ -60,10 +59,14 @@ #define MAX_MBWIDTH 3 /* Max multibyte sequence */ #define MAX_FIELD_CHARLENGTH 255 -#define CONVERT_IF_BIGGER_TO_BLOB 255 +#define MAX_FIELD_VARCHARLENGTH 65535 +#define CONVERT_IF_BIGGER_TO_BLOB 512 /* Used for CREATE ... SELECT */ + /* Max column width +1 */ #define MAX_FIELD_WIDTH (MAX_FIELD_CHARLENGTH*MAX_MBWIDTH+1) +#define MAX_BIT_FIELD_LENGTH 64 /* Max length in bits for bit fields */ + #define MAX_DATE_WIDTH 10 /* YYYY-MM-DD */ #define MAX_TIME_WIDTH 23 /* -DDDDDD HH:MM:SS.###### */ #define MAX_DATETIME_FULL_WIDTH 29 /* YYYY-MM-DD HH:MM:SS.###### AM */ @@ -119,12 +122,12 @@ #define SPECIAL_LOG_QUERIES_NOT_USING_INDEXES 4096 /* Log q not using indexes */ /* Extern defines */ -#define store_record(A,B) bmove_align((A)->B,(A)->record[0],(size_t) (A)->reclength) -#define restore_record(A,B) bmove_align((A)->record[0],(A)->B,(size_t) (A)->reclength) -#define cmp_record(A,B) memcmp((A)->record[0],(A)->B,(size_t) (A)->reclength) +#define store_record(A,B) bmove_align((A)->B,(A)->record[0],(size_t) (A)->s->reclength) +#define restore_record(A,B) bmove_align((A)->record[0],(A)->B,(size_t) (A)->s->reclength) +#define cmp_record(A,B) memcmp((A)->record[0],(A)->B,(size_t) (A)->s->reclength) #define empty_record(A) { \ - restore_record((A),default_values); \ - bfill((A)->null_flags,(A)->null_bytes,255);\ + restore_record((A),s->default_values); \ + bfill((A)->null_flags,(A)->s->null_bytes,255);\ } /* Defines for use with openfrm, openprt and openfrd */ @@ -142,11 +145,14 @@ #define DONT_GIVE_ERROR 256 /* Don't do frm_error on openfrm */ #define READ_SCREENS 1024 /* Read screens, info and helpfile */ #define DELAYED_OPEN 4096 /* Open table later */ - +#define NO_ERR_ON_NEW_FRM 8192 /* stop error sending on new format */ +#define OPEN_VIEW_NO_PARSE 16384 /* Open frm only if it's a view, + but do not parse view itself */ #define SC_INFO_LENGTH 4 /* Form format constant */ #define TE_INFO_LENGTH 3 #define MTYP_NOEMPTY_BIT 128 +#define FRM_VER_TRUE_VARCHAR (FRM_VER+4) /* Minimum length pattern before Turbo Boyer-Moore is used for SELECT "text" LIKE "%pattern%", excluding the two |